diff --git a/src/.vuepress/sidebar/V2.0.x/en-Table.ts b/src/.vuepress/sidebar/V2.0.x/en-Table.ts
index fb6d4455a..471100060 100644
--- a/src/.vuepress/sidebar/V2.0.x/en-Table.ts
+++ b/src/.vuepress/sidebar/V2.0.x/en-Table.ts
@@ -62,6 +62,7 @@ export const enSidebar = {
},
{ text: 'Cluster Deployment', link: 'Cluster-Deployment_apache' },
{ text: 'Docker Deployment', link: 'Docker-Deployment_apache' },
+ { text: 'AINode Deployment', link: 'AINode_Deployment_apache' },
{
text: 'Monitoring Panel Deployment',
link: 'Monitoring-panel-deployment',
@@ -111,6 +112,15 @@ export const enSidebar = {
},
],
},
+ {
+ text: 'AI capability',
+ collapsible: true,
+ prefix: 'AI-capability/',
+ children: [
+ { text: 'AINode', link: 'AINode_apache' },
+ { text: 'TimeSeries Large Model', link: 'TimeSeries-Large-Model' },
+ ],
+ },
{
text: 'Tools System',
collapsible: true,
diff --git a/src/.vuepress/sidebar/V2.0.x/zh-Table.ts b/src/.vuepress/sidebar/V2.0.x/zh-Table.ts
index 6361b1d33..a4479b002 100644
--- a/src/.vuepress/sidebar/V2.0.x/zh-Table.ts
+++ b/src/.vuepress/sidebar/V2.0.x/zh-Table.ts
@@ -59,6 +59,7 @@ export const zhSidebar = {
{ text: '单机版部署指导', link: 'Stand-Alone-Deployment_apache' },
{ text: '集群版部署指导', link: 'Cluster-Deployment_apache' },
{ text: 'Docker部署指导', link: 'Docker-Deployment_apache' },
+ { text: 'AINode部署', link: 'AINode_Deployment_apache' },
],
},
{
@@ -102,6 +103,15 @@ export const zhSidebar = {
},
],
},
+ {
+ text: 'AI 能力',
+ collapsible: true,
+ prefix: 'AI-capability/',
+ children: [
+ { text: 'AINode', link: 'AINode_apache' },
+ { text: '时序大模型', link: 'TimeSeries-Large-Model' },
+ ],
+ },
{
text: '工具体系',
collapsible: true,
diff --git a/src/.vuepress/sidebar_timecho/V2.0.x/en-Table.ts b/src/.vuepress/sidebar_timecho/V2.0.x/en-Table.ts
index 2620c9f23..aa9ea1b7b 100644
--- a/src/.vuepress/sidebar_timecho/V2.0.x/en-Table.ts
+++ b/src/.vuepress/sidebar_timecho/V2.0.x/en-Table.ts
@@ -66,6 +66,7 @@ export const enSidebar = {
link: 'Dual-Active-Deployment_timecho',
},
{ text: 'Docker Deployment', link: 'Docker-Deployment_timecho' },
+ { text: 'AINode Deployment', link: 'AINode_Deployment_timecho' },
{
text: 'Monitoring Panel Deployment',
link: 'Monitoring-panel-deployment',
@@ -116,6 +117,15 @@ export const enSidebar = {
},
],
},
+ {
+ text: 'AI capability',
+ collapsible: true,
+ prefix: 'AI-capability/',
+ children: [
+ { text: 'AINode', link: 'AINode_timecho' },
+ { text: 'TimeSeries Large Model', link: 'TimeSeries-Large-Model' },
+ ],
+ },
{
text: 'Tools System',
collapsible: true,
diff --git a/src/.vuepress/sidebar_timecho/V2.0.x/zh-Table.ts b/src/.vuepress/sidebar_timecho/V2.0.x/zh-Table.ts
index 135414f0a..ca26bae2c 100644
--- a/src/.vuepress/sidebar_timecho/V2.0.x/zh-Table.ts
+++ b/src/.vuepress/sidebar_timecho/V2.0.x/zh-Table.ts
@@ -60,6 +60,7 @@ export const zhSidebar = {
{ text: '集群版部署指导', link: 'Cluster-Deployment_timecho' },
{ text: '双活版部署指导', link: 'Dual-Active-Deployment_timecho' },
{ text: 'Docker部署指导', link: 'Docker-Deployment_timecho' },
+ { text: 'AINode部署', link: 'AINode_Deployment_timecho' },
{ text: '监控面板部署', link: 'Monitoring-panel-deployment' },
],
},
@@ -105,6 +106,15 @@ export const zhSidebar = {
},
],
},
+ {
+ text: 'AI 能力',
+ collapsible: true,
+ prefix: 'AI-capability/',
+ children: [
+ { text: 'AINode', link: 'AINode_timecho' },
+ { text: '时序大模型', link: 'TimeSeries-Large-Model' },
+ ],
+ },
{
text: '工具体系',
collapsible: true,
diff --git a/src/UserGuide/Master/Table/AI-capability/AINode_apache.md b/src/UserGuide/Master/Table/AI-capability/AINode_apache.md
new file mode 100644
index 000000000..fc66e3dc0
--- /dev/null
+++ b/src/UserGuide/Master/Table/AI-capability/AINode_apache.md
@@ -0,0 +1,174 @@
+
+
+# AINode
+
+AINode is an IoTDB native node designed to support the registration, management, and invocation of large-scale time series models. It comes with industry-leading proprietary time series models such as Timer and Sundial. These models can be invoked through standard SQL statements, enabling real-time inference of time series data at the millisecond level, and supporting application scenarios such as trend forecasting, missing value imputation, and anomaly detection for time series data.
+
+The system architecture is shown below:
+::: center
+
+:::
+The responsibilities of the three nodes are as follows:
+
+- **ConfigNode**: responsible for storing and managing the meta-information of the model; responsible for distributed node management.
+- **DataNode**: responsible for receiving and parsing SQL requests from users; responsible for storing time-series data; responsible for preprocessing computation of data.
+- **AINode**: responsible for model file import creation and model inference.
+
+## 1. Advantageous features
+
+Compared with building a machine learning service alone, it has the following advantages:
+
+- **Simple and easy to use**: no need to use Python or Java programming, the complete process of machine learning model management and inference can be completed using SQL statements. Creating a model can be done using the CREATE MODEL statement, and using a model for inference can be done using the CALL INFERENCE (...) statement, making it simpler and more convenient to use.
+
+- **Avoid Data Migration**: With IoTDB native machine learning, data stored in IoTDB can be directly applied to the inference of machine learning models without having to move the data to a separate machine learning service platform, which accelerates data processing, improves security, and reduces costs.
+
+
+
+- **Built-in Advanced Algorithms**: supports industry-leading machine learning analytics algorithms covering typical timing analysis tasks, empowering the timing database with native data analysis capabilities. Such as:
+ - **Time Series Forecasting**: learns patterns of change from past time series; thus outputs the most likely prediction of future series based on observations at a given past time.
+ - **Anomaly Detection for Time Series**: detects and identifies outliers in a given time series data, helping to discover anomalous behaviour in the time series.
+ - **Annotation for Time Series (Time Series Annotation)**: Adds additional information or markers, such as event occurrence, outliers, trend changes, etc., to each data point or specific time period to better understand and analyse the data.
+
+
+
+## 2. Basic Concepts
+
+- **Model**: a machine learning model that takes time-series data as input and outputs the results or decisions of an analysis task. Model is the basic management unit of AINode, which supports adding (registration), deleting, checking, and using (inference) of models.
+- **Create**: Load externally designed or trained model files or algorithms into MLNode for unified management and use by IoTDB.
+- **Inference**: The process of using the created model to complete the timing analysis task applicable to the model on the specified timing data.
+- **Built-in capabilities**: AINode comes with machine learning algorithms or home-grown models for common timing analysis scenarios (e.g., prediction and anomaly detection).
+
+::: center
+
+::::
+
+## 3. Installation and Deployment
+
+The deployment of AINode can be found in the document [Deployment Guidelines](../Deployment-and-Maintenance/AINode_Deployment_apache.md#ainode-deployment) .
+
+
+## 4. Usage Guidelines
+
+AINode provides model creation and deletion process for deep learning models related to timing data. Built-in models do not need to be created and deleted, they can be used directly, and the built-in model instances created after inference is completed will be destroyed automatically.
+
+### 4.1 Registering Models
+
+A trained deep learning model can be registered by specifying the vector dimensions of the model's inputs and outputs, which can be used for model inference.
+
+Models that meet the following criteria can be registered in AINode:
+1. Models trained on PyTorch 2.1.0 and 2.2.0 versions supported by AINode should avoid using features from versions 2.2.0 and above.
+2. AINode supports models stored using PyTorch JIT, and the model file needs to include the parameters and structure of the model.
+3. The input sequence of the model can contain one or more columns, and if there are multiple columns, they need to correspond to the model capability and model configuration file.
+4. The input and output dimensions of the model must be clearly defined in the `config.yaml` configuration file. When using the model, it is necessary to strictly follow the input-output dimensions defined in the `config.yaml` configuration file. If the number of input and output columns does not match the configuration file, it will result in errors.
+
+The following is the SQL syntax definition for model registration.
+
+```SQL
+create model using uri
+```
+
+The specific meanings of the parameters in the SQL are as follows:
+
+- model_name: a globally unique identifier for the model, which cannot be repeated. The model name has the following constraints:
+
+ - Identifiers [ 0-9 a-z A-Z _ ] (letters, numbers, underscores) are allowed.
+ - Length is limited to 2-64 characters
+ - Case sensitive
+
+- uri: resource path to the model registration file, which should contain the **model weights model.pt file and the model's metadata description file config.yaml**.
+
+ - Model weight file: the weight file obtained after the training of the deep learning model is completed, currently supporting pytorch training of the .pt file
+
+ - yaml metadata description file: parameters related to the model structure that need to be provided when the model is registered, which must contain the input and output dimensions of the model for model inference:
+
+ - | **Parameter name** | **Parameter description** | **Example** |
+ | ------------ | ---------------------------- | -------- |
+ | input_shape | Rows and columns of model inputs for model inference | [96,2] |
+ | output_shape | rows and columns of model outputs, for model inference | [48,2] |
+
+ - In addition to model inference, the data types of model input and output can be specified:
+
+ - | **Parameter name** | **Parameter description** | **Example** |
+ | ----------- | ------------------ | --------------------- |
+ | input_type | model input data type | ['float32','float32'] |
+ | output_type | data type of the model output | ['float32','float32'] |
+
+ - In addition to this, additional notes can be specified for display during model management
+
+ - | **Parameter name** | **Parameter description** | **Examples** |
+ | ---------- | ---------------------------------------------- | ------------------------------------------- |
+ | attributes | optional, user-defined model notes for model display | 'model_type': 'dlinear','kernel_size': '25' |
+
+
+In addition to registration of local model files, registration can also be done by specifying remote resource paths via URIs, using open source model repositories (e.g. HuggingFace).
+
+
+### 4.2 Viewing Models
+
+Successfully registered models can be queried for model-specific information through the show models command. The SQL definition is as follows:
+
+```SQL
+show models
+
+show models
+```
+
+In addition to displaying information about all models directly, you can specify a model id to view information about a specific model. The results of the model show contain the following information:
+
+| **ModelId** | **State** | **Configs** | **Attributes** |
+| ------------ | ------------------------------------- | ---------------------------------------------- | -------------- |
+| Model Unique Identifier | Model Registration Status (LOADING, ACTIVE, DROPPING) | InputShape, outputShapeInputTypes, outputTypes | Model Notes |
+
+State is used to show the current state of model registration, which consists of the following three stages
+
+- **LOADING**: The corresponding model meta information has been added to the configNode, and the model file is being transferred to the AINode node.
+- **ACTIVE**: The model has been set up and the model is in the available state
+- **DROPPING**: Model deletion is in progress, model related information is being deleted from configNode and AINode.
+- **UNAVAILABLE**: Model creation failed, you can delete the failed model_name by drop model.
+
+
+### 4.3 Delete Model
+
+For a successfully registered model, the user can delete it via SQL. In addition to deleting the meta information on the configNode, this operation also deletes all the related model files under the AINode. The SQL is as follows:
+
+```SQL
+drop model
+```
+
+You need to specify the model model_name that has been successfully registered to delete the corresponding model. Since model deletion involves the deletion of data on multiple nodes, the operation will not be completed immediately, and the state of the model at this time is DROPPING, and the model in this state cannot be used for model inference.
+
+### 4.4 Using Built-in Model Reasoning
+
+Coming Soon
+
+## 5. Privilege Management
+
+When using AINode related functions, the authentication of IoTDB itself can be used to do a permission management, users can only use the model management related functions when they have the USE_MODEL permission. When using the inference function, the user needs to have the permission to access the source sequence corresponding to the SQL of the input model.
+
+| Privilege Name | Privilege Scope | Administrator User (default ROOT) | Normal User | Path Related |
+| --------- | --------------------------------- | ---------------------- | -------- | -------- |
+| USE_MODEL | create model/show models/drop model | √ | √ | x |
+| READ_DATA| call inference | √ | √|√ |
+
+## 6. Practical Examples
+
+Coming Soon
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/AI-capability/AINode_timecho.md b/src/UserGuide/Master/Table/AI-capability/AINode_timecho.md
new file mode 100644
index 000000000..2ecbe7361
--- /dev/null
+++ b/src/UserGuide/Master/Table/AI-capability/AINode_timecho.md
@@ -0,0 +1,175 @@
+
+
+# AINode
+
+AINode is an IoTDB native node designed to support the registration, management, and invocation of large-scale time series models. It comes with industry-leading proprietary time series models such as Timer and Sundial. These models can be invoked through standard SQL statements, enabling real-time inference of time series data at the millisecond level, and supporting application scenarios such as trend forecasting, missing value imputation, and anomaly detection for time series data.
+
+
+The system architecture is shown below:
+::: center
+
+:::
+The responsibilities of the three nodes are as follows:
+
+- **ConfigNode**: responsible for storing and managing the meta-information of the model; responsible for distributed node management.
+- **DataNode**: responsible for receiving and parsing SQL requests from users; responsible for storing time-series data; responsible for preprocessing computation of data.
+- **AINode**: responsible for model file import creation and model inference.
+
+## 1. Advantageous features
+
+Compared with building a machine learning service alone, it has the following advantages:
+
+- **Simple and easy to use**: no need to use Python or Java programming, the complete process of machine learning model management and inference can be completed using SQL statements. Creating a model can be done using the CREATE MODEL statement, and using a model for inference can be done using the CALL INFERENCE (...) statement, making it simpler and more convenient to use.
+
+- **Avoid Data Migration**: With IoTDB native machine learning, data stored in IoTDB can be directly applied to the inference of machine learning models without having to move the data to a separate machine learning service platform, which accelerates data processing, improves security, and reduces costs.
+
+
+
+- **Built-in Advanced Algorithms**: supports industry-leading machine learning analytics algorithms covering typical timing analysis tasks, empowering the timing database with native data analysis capabilities. Such as:
+ - **Time Series Forecasting**: learns patterns of change from past time series; thus outputs the most likely prediction of future series based on observations at a given past time.
+ - **Anomaly Detection for Time Series**: detects and identifies outliers in a given time series data, helping to discover anomalous behaviour in the time series.
+ - **Annotation for Time Series (Time Series Annotation)**: Adds additional information or markers, such as event occurrence, outliers, trend changes, etc., to each data point or specific time period to better understand and analyse the data.
+
+
+
+## 2. Basic Concepts
+
+- **Model**: a machine learning model that takes time-series data as input and outputs the results or decisions of an analysis task. Model is the basic management unit of AINode, which supports adding (registration), deleting, checking, and using (inference) of models.
+- **Create**: Load externally designed or trained model files or algorithms into MLNode for unified management and use by IoTDB.
+- **Inference**: The process of using the created model to complete the timing analysis task applicable to the model on the specified timing data.
+- **Built-in capabilities**: AINode comes with machine learning algorithms or home-grown models for common timing analysis scenarios (e.g., prediction and anomaly detection).
+
+::: center
+
+::::
+
+## 3. Installation and Deployment
+
+The deployment of AINode can be found in the document [Deployment Guidelines](../Deployment-and-Maintenance/AINode_Deployment_timecho.md) .
+
+
+## 4. Usage Guidelines
+
+AINode provides model creation and deletion process for deep learning models related to timing data. Built-in models do not need to be created and deleted, they can be used directly, and the built-in model instances created after inference is completed will be destroyed automatically.
+
+### 4.1 Registering Models
+
+A trained deep learning model can be registered by specifying the vector dimensions of the model's inputs and outputs, which can be used for model inference.
+
+Models that meet the following criteria can be registered in AINode:
+1. Models trained on PyTorch 2.1.0 and 2.2.0 versions supported by AINode should avoid using features from versions 2.2.0 and above.
+2. AINode supports models stored using PyTorch JIT, and the model file needs to include the parameters and structure of the model.
+3. The input sequence of the model can contain one or more columns, and if there are multiple columns, they need to correspond to the model capability and model configuration file.
+4. The input and output dimensions of the model must be clearly defined in the `config.yaml` configuration file. When using the model, it is necessary to strictly follow the input-output dimensions defined in the `config.yaml` configuration file. If the number of input and output columns does not match the configuration file, it will result in errors.
+
+The following is the SQL syntax definition for model registration.
+
+```SQL
+create model using uri
+```
+
+The specific meanings of the parameters in the SQL are as follows:
+
+- model_name: a globally unique identifier for the model, which cannot be repeated. The model name has the following constraints:
+
+ - Identifiers [ 0-9 a-z A-Z _ ] (letters, numbers, underscores) are allowed.
+ - Length is limited to 2-64 characters
+ - Case sensitive
+
+- uri: resource path to the model registration file, which should contain the **model weights model.pt file and the model's metadata description file config.yaml**.
+
+ - Model weight file: the weight file obtained after the training of the deep learning model is completed, currently supporting pytorch training of the .pt file
+
+ - yaml metadata description file: parameters related to the model structure that need to be provided when the model is registered, which must contain the input and output dimensions of the model for model inference:
+
+ - | **Parameter name** | **Parameter description** | **Example** |
+ | ------------ | ---------------------------- | -------- |
+ | input_shape | Rows and columns of model inputs for model inference | [96,2] |
+ | output_shape | rows and columns of model outputs, for model inference | [48,2] |
+
+ - In addition to model inference, the data types of model input and output can be specified:
+
+ - | **Parameter name** | **Parameter description** | **Example** |
+ | ----------- | ------------------ | --------------------- |
+ | input_type | model input data type | ['float32','float32'] |
+ | output_type | data type of the model output | ['float32','float32'] |
+
+ - In addition to this, additional notes can be specified for display during model management
+
+ - | **Parameter name** | **Parameter description** | **Examples** |
+ | ---------- | ---------------------------------------------- | ------------------------------------------- |
+ | attributes | optional, user-defined model notes for model display | 'model_type': 'dlinear','kernel_size': '25' |
+
+
+In addition to registration of local model files, registration can also be done by specifying remote resource paths via URIs, using open source model repositories (e.g. HuggingFace).
+
+
+### 4.2 Viewing Models
+
+Successfully registered models can be queried for model-specific information through the show models command. The SQL definition is as follows:
+
+```SQL
+show models
+
+show models
+```
+
+In addition to displaying information about all models directly, you can specify a model id to view information about a specific model. The results of the model show contain the following information:
+
+| **ModelId** | **State** | **Configs** | **Attributes** |
+| ------------ | ------------------------------------- | ---------------------------------------------- | -------------- |
+| Model Unique Identifier | Model Registration Status (LOADING, ACTIVE, DROPPING) | InputShape, outputShapeInputTypes, outputTypes | Model Notes |
+
+State is used to show the current state of model registration, which consists of the following three stages
+
+- **LOADING**: The corresponding model meta information has been added to the configNode, and the model file is being transferred to the AINode node.
+- **ACTIVE**: The model has been set up and the model is in the available state
+- **DROPPING**: Model deletion is in progress, model related information is being deleted from configNode and AINode.
+- **UNAVAILABLE**: Model creation failed, you can delete the failed model_name by drop model.
+
+
+### 4.3 Delete Model
+
+For a successfully registered model, the user can delete it via SQL. In addition to deleting the meta information on the configNode, this operation also deletes all the related model files under the AINode. The SQL is as follows:
+
+```SQL
+drop model
+```
+
+You need to specify the model model_name that has been successfully registered to delete the corresponding model. Since model deletion involves the deletion of data on multiple nodes, the operation will not be completed immediately, and the state of the model at this time is DROPPING, and the model in this state cannot be used for model inference.
+
+### 4.4 Using Built-in Model Reasoning
+
+Coming Soon
+
+## 5. Privilege Management
+
+When using AINode related functions, the authentication of IoTDB itself can be used to do a permission management, users can only use the model management related functions when they have the USE_MODEL permission. When using the inference function, the user needs to have the permission to access the source sequence corresponding to the SQL of the input model.
+
+| Privilege Name | Privilege Scope | Administrator User (default ROOT) | Normal User | Path Related |
+| --------- | --------------------------------- | ---------------------- | -------- | -------- |
+| USE_MODEL | create model/show models/drop model | √ | √ | x |
+| READ_DATA| call inference | √ | √|√ |
+
+## 6. Practical Examples
+
+Coming Soon
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/AI-capability/TimeSeries-Large-Model.md b/src/UserGuide/Master/Table/AI-capability/TimeSeries-Large-Model.md
new file mode 100644
index 000000000..7743ff545
--- /dev/null
+++ b/src/UserGuide/Master/Table/AI-capability/TimeSeries-Large-Model.md
@@ -0,0 +1,117 @@
+
+
+# TimeSeries Large Model
+
+## Introduction
+
+A time series large model is a foundational model specifically designed for time series analysis. The IoTDB team has independently developed time series large models, which are pre-trained on massive time series data using technologies such as transformer structures. These models can understand and generate time series data across various domains and are applicable to applications like time series forecasting, anomaly detection, and time series imputation. Unlike traditional time series analysis techniques, time series large models possess the capability to extract universal features and provide technical services based on zero-shot analysis and fine-tuning for a wide range of analytical tasks.
+
+The team's related technologies of time series large models have been published in top international machine learning conferences.
+
+## Application Scenarios
+
+- **Time Series Forecasting**: Provides forecasting services for time series data in industrial production, natural environments, and other areas, helping users to understand future trends in advance.
+- **Data Imputation**: For missing segments in time series, perform context imputation to enhance the continuity and completeness of the dataset.
+- **Anomaly Detection**: Utilizing regression analysis technology, monitor time series data in real-time and provide timely warnings for potential anomalies.
+
+
+
+## Timer Model
+
+The Timer model not only demonstrates excellent few-shot generalization and multi-task adaptation capabilities but also gains a rich knowledge base through pre-training, endowing it with the universal capability to handle a variety of downstream tasks, featuring the following:
+
+- **Generalization**: The model can be fine-tuned using a small number of samples to achieve leading predictive performance in the industry.
+- **Versatility**: The model is designed flexibly to adapt to various task requirements and supports variable input and output lengths, enabling it to play a role in various application scenarios.
+- **Scalability**: As the number of model parameters increases or the scale of pre-training data expands, the model's performance continues to improve, ensuring the model can optimize its predictive effects with the growth of time and data volume.
+
+
+
+## Timer-XL Model
+
+Timer-XL is an upgraded version of Timer that further extends the network structure and achieves comprehensive breakthroughs in multiple dimensions:
+
+- **Long Context Support**: This model breaks through the limitations of traditional time series forecasting models, supporting the processing of thousands of tokens (equivalent to tens of thousands of time points) of input, effectively addressing the bottleneck of context length.
+- **Multi-variable Forecasting Scenario Coverage**: Supports a variety of forecasting scenarios, including non-stationary time series forecasting, multi-variable prediction tasks, and predictions involving covariates, meeting diverse business needs.
+- **Large-scale Industrial Time Series Dataset**: Pre-trained using a massive industrial IoT time series dataset that has a large volume, excellent quality, and rich domain characteristics, covering energy, aerospace, steel, transportation, and more.
+
+
+## Effect Demonstration
+
+Time series large models can adapt to real time series data from various fields and scenarios, showing excellent processing effects in various tasks. Here are the real performances on different data:
+
+**Time Series Forecasting:**
+
+Utilizing the predictive capabilities of the time series large model, it can accurately predict the future trend of time series. As shown in the figure, the blue curve represents the predicted trend, and the red curve represents the actual trend, with the two curves highly matching.
+
+
+
+**Data Imputation:**
+
+Using the time series large model to perform predictive imputation for missing data segments.
+
+
+
+
+**Anomaly Detection:**
+
+Utilizing the time series large model to accurately identify anomalies that deviate significantly from the normal trend.
+
+
+
+## Deployment Usage
+
+1. Open the IoTDB CLI console and verify that the ConfigNode, DataNode, and AINode statuses are all Running.
+
+Check command:
+
+```sql
+show cluster
+```
+
+
+
+2. Model file storage path: It is recommended to place the model files in the same directory as the AINode installation package.
+ You may create a new folder to store model files.
+
+3. Register the model
+
+Use the following SQL statement:
+
+```sql
+create model using uri
+```
+
+Example (for the Timer model):
+
+```sql
+create model Timer using uri
+```
+
+4. Verify model registration success
+
+Check command:
+
+```sql
+show models
+```
+
+
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/AINode_Deployment_apache.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/AINode_Deployment_apache.md
new file mode 100644
index 000000000..3ca19baef
--- /dev/null
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/AINode_Deployment_apache.md
@@ -0,0 +1,23 @@
+
+# AINode Deployment
+
+Coming Soon
\ No newline at end of file
diff --git a/src/UserGuide/Master/Table/Deployment-and-Maintenance/AINode_Deployment_timecho.md b/src/UserGuide/Master/Table/Deployment-and-Maintenance/AINode_Deployment_timecho.md
new file mode 100644
index 000000000..3ca19baef
--- /dev/null
+++ b/src/UserGuide/Master/Table/Deployment-and-Maintenance/AINode_Deployment_timecho.md
@@ -0,0 +1,23 @@
+
+# AINode Deployment
+
+Coming Soon
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/AI-capability/AINode_apache.md b/src/UserGuide/latest-Table/AI-capability/AINode_apache.md
new file mode 100644
index 000000000..aad5839e6
--- /dev/null
+++ b/src/UserGuide/latest-Table/AI-capability/AINode_apache.md
@@ -0,0 +1,24 @@
+
+
+# AINode
+
+Coming Soon
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/AI-capability/AINode_timecho.md b/src/UserGuide/latest-Table/AI-capability/AINode_timecho.md
new file mode 100644
index 000000000..aad5839e6
--- /dev/null
+++ b/src/UserGuide/latest-Table/AI-capability/AINode_timecho.md
@@ -0,0 +1,24 @@
+
+
+# AINode
+
+Coming Soon
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/AI-capability/TimeSeries-Large-Model.md b/src/UserGuide/latest-Table/AI-capability/TimeSeries-Large-Model.md
new file mode 100644
index 000000000..7743ff545
--- /dev/null
+++ b/src/UserGuide/latest-Table/AI-capability/TimeSeries-Large-Model.md
@@ -0,0 +1,117 @@
+
+
+# TimeSeries Large Model
+
+## Introduction
+
+A time series large model is a foundational model specifically designed for time series analysis. The IoTDB team has independently developed time series large models, which are pre-trained on massive time series data using technologies such as transformer structures. These models can understand and generate time series data across various domains and are applicable to applications like time series forecasting, anomaly detection, and time series imputation. Unlike traditional time series analysis techniques, time series large models possess the capability to extract universal features and provide technical services based on zero-shot analysis and fine-tuning for a wide range of analytical tasks.
+
+The team's related technologies of time series large models have been published in top international machine learning conferences.
+
+## Application Scenarios
+
+- **Time Series Forecasting**: Provides forecasting services for time series data in industrial production, natural environments, and other areas, helping users to understand future trends in advance.
+- **Data Imputation**: For missing segments in time series, perform context imputation to enhance the continuity and completeness of the dataset.
+- **Anomaly Detection**: Utilizing regression analysis technology, monitor time series data in real-time and provide timely warnings for potential anomalies.
+
+
+
+## Timer Model
+
+The Timer model not only demonstrates excellent few-shot generalization and multi-task adaptation capabilities but also gains a rich knowledge base through pre-training, endowing it with the universal capability to handle a variety of downstream tasks, featuring the following:
+
+- **Generalization**: The model can be fine-tuned using a small number of samples to achieve leading predictive performance in the industry.
+- **Versatility**: The model is designed flexibly to adapt to various task requirements and supports variable input and output lengths, enabling it to play a role in various application scenarios.
+- **Scalability**: As the number of model parameters increases or the scale of pre-training data expands, the model's performance continues to improve, ensuring the model can optimize its predictive effects with the growth of time and data volume.
+
+
+
+## Timer-XL Model
+
+Timer-XL is an upgraded version of Timer that further extends the network structure and achieves comprehensive breakthroughs in multiple dimensions:
+
+- **Long Context Support**: This model breaks through the limitations of traditional time series forecasting models, supporting the processing of thousands of tokens (equivalent to tens of thousands of time points) of input, effectively addressing the bottleneck of context length.
+- **Multi-variable Forecasting Scenario Coverage**: Supports a variety of forecasting scenarios, including non-stationary time series forecasting, multi-variable prediction tasks, and predictions involving covariates, meeting diverse business needs.
+- **Large-scale Industrial Time Series Dataset**: Pre-trained using a massive industrial IoT time series dataset that has a large volume, excellent quality, and rich domain characteristics, covering energy, aerospace, steel, transportation, and more.
+
+
+## Effect Demonstration
+
+Time series large models can adapt to real time series data from various fields and scenarios, showing excellent processing effects in various tasks. Here are the real performances on different data:
+
+**Time Series Forecasting:**
+
+Utilizing the predictive capabilities of the time series large model, it can accurately predict the future trend of time series. As shown in the figure, the blue curve represents the predicted trend, and the red curve represents the actual trend, with the two curves highly matching.
+
+
+
+**Data Imputation:**
+
+Using the time series large model to perform predictive imputation for missing data segments.
+
+
+
+
+**Anomaly Detection:**
+
+Utilizing the time series large model to accurately identify anomalies that deviate significantly from the normal trend.
+
+
+
+## Deployment Usage
+
+1. Open the IoTDB CLI console and verify that the ConfigNode, DataNode, and AINode statuses are all Running.
+
+Check command:
+
+```sql
+show cluster
+```
+
+
+
+2. Model file storage path: It is recommended to place the model files in the same directory as the AINode installation package.
+ You may create a new folder to store model files.
+
+3. Register the model
+
+Use the following SQL statement:
+
+```sql
+create model using uri
+```
+
+Example (for the Timer model):
+
+```sql
+create model Timer using uri
+```
+
+4. Verify model registration success
+
+Check command:
+
+```sql
+show models
+```
+
+
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/AINode_Deployment_apache.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/AINode_Deployment_apache.md
new file mode 100644
index 000000000..3ca19baef
--- /dev/null
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/AINode_Deployment_apache.md
@@ -0,0 +1,23 @@
+
+# AINode Deployment
+
+Coming Soon
\ No newline at end of file
diff --git a/src/UserGuide/latest-Table/Deployment-and-Maintenance/AINode_Deployment_timecho.md b/src/UserGuide/latest-Table/Deployment-and-Maintenance/AINode_Deployment_timecho.md
new file mode 100644
index 000000000..3ca19baef
--- /dev/null
+++ b/src/UserGuide/latest-Table/Deployment-and-Maintenance/AINode_Deployment_timecho.md
@@ -0,0 +1,23 @@
+
+# AINode Deployment
+
+Coming Soon
\ No newline at end of file
diff --git a/src/zh/UserGuide/Master/Table/AI-capability/AINode_apache.md b/src/zh/UserGuide/Master/Table/AI-capability/AINode_apache.md
new file mode 100644
index 000000000..0042346b6
--- /dev/null
+++ b/src/zh/UserGuide/Master/Table/AI-capability/AINode_apache.md
@@ -0,0 +1,171 @@
+
+
+# AINode
+
+AINode 是支持时序大模型注册、管理、调用的 IoTDB 原生节点,内置业界领先的自研时序大模型,如 Timer、Sundial 等,可通过标准 SQL 语句进行调用,实现时序数据的毫秒级实时推理,可支持时序趋势预测、缺失值填补、异常值检测等应用场景。
+
+系统架构如下图所示:
+
+
+
+三种节点的职责如下:
+
+- **ConfigNode**:负责保存和管理模型的元信息;负责分布式节点管理。
+- **DataNode**:负责接收并解析用户的 SQL请求;负责存储时间序列数据;负责数据的预处理计算。
+- **AINode**:负责模型文件的导入创建以及模型推理。
+
+## 1. 优势特点
+
+与单独构建机器学习服务相比,具有以下优势:
+
+- **简单易用**:无需使用 Python 或 Java 编程,使用 SQL 语句即可完成机器学习模型管理与推理的完整流程。如创建模型可使用CREATE MODEL语句、使用模型进行推理可使用CALL INFERENCE(...)语句等,使用更加简单便捷。
+
+- **避免数据迁移**:使用 IoTDB 原生机器学习可以将存储在 IoTDB 中的数据直接应用于机器学习模型的推理,无需将数据移动到单独的机器学习服务平台,从而加速数据处理、提高安全性并降低成本。
+
+
+
+- **内置先进算法**:支持业内领先机器学习分析算法,覆盖典型时序分析任务,为时序数据库赋能原生数据分析能力。如:
+ - **时间序列预测(Time Series Forecasting)**:从过去时间序列中学习变化模式;从而根据给定过去时间的观测值,输出未来序列最可能的预测。
+ - **时序异常检测(Anomaly Detection for Time Series)**:在给定的时间序列数据中检测和识别异常值,帮助发现时间序列中的异常行为。
+ - **时间序列标注(Time Series Annotation)**:为每个数据点或特定时间段添加额外的信息或标记,例如事件发生、异常点、趋势变化等,以便更好地理解和分析数据。
+
+
+## 2. 基本概念
+
+- **模型(Model)**:机器学习模型,以时序数据作为输入,输出分析任务的结果或决策。模型是AINode 的基本管理单元,支持模型的增(注册)、删、查、用(推理)。
+- **创建(Create)**: 将外部设计或训练好的模型文件或算法加载到MLNode中,由IoTDB统一管理与使用。
+- **推理(Inference)**:使用创建的模型在指定时序数据上完成该模型适用的时序分析任务的过程。
+- **内置能力(Built-in)**:AINode 自带常见时序分析场景(例如预测与异常检测)的机器学习算法或自研模型。
+
+
+
+## 3. 安装部署
+
+AINode 的部署可参考文档 [部署指导](../Deployment-and-Maintenance/AINode_Deployment_apache.md#ainode-部署) 章节。
+
+## 4. 使用指导
+
+AINode 对时序数据相关的深度学习模型提供了模型创建及删除的流程,内置模型无需创建及删除,可直接使用,并且在完成推理后创建的内置模型实例将自动销毁。
+
+### 4.1 注册模型
+
+通过指定模型输入输出的向量维度,可以注册训练好的深度学习模型,从而用于模型推理。
+
+符合以下内容的模型可以注册到AINode中:
+ 1. AINode 支持的PyTorch 2.1.0、 2.2.0版本训练的模型,需避免使用2.2.0版本以上的特性。
+ 2. AINode支持使用PyTorch JIT存储的模型,模型文件需要包含模型的参数和结构。
+ 3. 模型输入序列可以包含一列或多列,若有多列,需要和模型能力、模型配置文件对应。
+ 4. 模型的输入输出维度必须在`config.yaml`配置文件中明确定义。使用模型时,必须严格按照`config.yaml`配置文件中定义的输入输出维度。如果输入输出列数不匹配配置文件,将会导致错误。
+
+下方为模型注册的SQL语法定义。
+
+```SQL
+create model using uri
+```
+
+SQL中参数的具体含义如下:
+
+- model_name:模型的全局唯一标识,不可重复。模型名称具备以下约束:
+
+ - 允许出现标识符 [ 0-9 a-z A-Z _ ] (字母,数字,下划线)
+ - 长度限制为2-64字符
+ - 大小写敏感
+
+- uri:模型注册文件的资源路径,路径下应包含**模型权重model.pt文件和模型的元数据描述文件config.yaml**
+
+ - 模型权重文件:深度学习模型训练完成后得到的权重文件,目前支持pytorch训练得到的.pt文件
+
+ - yaml元数据描述文件:模型注册时需要提供的与模型结构有关的参数,其中必须包含模型的输入输出维度用于模型推理:
+
+ - | **参数名** | **参数描述** | **示例** |
+ | ------------ | ---------------------------- | -------- |
+ | input_shape | 模型输入的行列,用于模型推理 | [96,2] |
+ | output_shape | 模型输出的行列,用于模型推理 | [48,2] |
+
+ - 除了模型推理外,还可以指定模型输入输出的数据类型:
+
+ - | **参数名** | **参数描述** | **示例** |
+ | ----------- | ------------------ | --------------------- |
+ | input_type | 模型输入的数据类型 | ['float32','float32'] |
+ | output_type | 模型输出的数据类型 | ['float32','float32'] |
+
+ - 除此之外,可以额外指定备注信息用于在模型管理时进行展示
+
+ - | **参数名** | **参数描述** | **示例** |
+ | ---------- | ---------------------------------------------- | ------------------------------------------- |
+ | attributes | 可选,用户自行设定的模型备注信息,用于模型展示 | 'model_type': 'dlinear','kernel_size': '25' |
+
+
+除了本地模型文件的注册,还可以通过URI来指定远程资源路径来进行注册,使用开源的模型仓库(例如HuggingFace)。
+
+
+### 4.2 查看模型
+
+注册成功的模型可以通过show models指令查询模型的具体信息。其SQL定义如下:
+
+```SQL
+show models
+
+show models
+```
+
+除了直接展示所有模型的信息外,可以指定model id来查看某一具体模型的信息。模型展示的结果中包含如下信息:
+
+| **ModelId** | **State** | **Configs** | **Attributes** |
+| ------------ | ------------------------------------- | ---------------------------------------------- | -------------- |
+| 模型唯一标识 | 模型注册状态(LOADING,ACTIVE,DROPPING) | InputShape, outputShapeInputTypes, outputTypes | 模型备注信息 |
+
+其中,State用于展示当前模型注册的状态,包含以下三个阶段
+
+- **LOADING**:已经在configNode中添加对应的模型元信息,正将模型文件传输到AINode节点上
+- **ACTIVE:** 模型已经设置完成,模型处于可用状态
+- **DROPPING**:模型删除中,正在从configNode以及AINode处删除模型相关信息
+- **UNAVAILABLE**: 模型创建失败,可以通过drop model删除创建失败的model_name。
+
+
+### 4.3 删除模型
+
+对于注册成功的模型,用户可以通过SQL进行删除。该操作除了删除configNode上的元信息外,还会删除所有AINode下的相关模型文件。其SQL如下:
+
+```SQL
+drop model
+```
+
+需要指定已经成功注册的模型model_name来删除对应的模型。由于模型删除涉及多个节点上的数据删除,操作不会立即完成,此时模型的状态为DROPPING,该状态的模型不能用于模型推理。
+
+### 4.4 使用内置模型推理
+
+敬请期待
+
+## 5. 权限管理
+
+使用AINode相关的功能时,可以使用IoTDB本身的鉴权去做一个权限管理,用户只有在具备 USE_MODEL 权限时,才可以使用模型管理的相关功能。当使用推理功能时,用户需要有访问输入模型的SQL对应的源序列的权限。
+
+| 权限名称 | 权限范围 | 管理员用户(默认ROOT) | 普通用户 | 路径相关 |
+| --------- | --------------------------------- | ---------------------- | -------- | -------- |
+| USE_MODEL | create model / show models / drop model | √ | √ | x |
+| READ_DATA | call inference | √ | √ | √ |
+
+## 6. 实际案例
+
+敬请期待
+
diff --git a/src/zh/UserGuide/Master/Table/AI-capability/AINode_timecho.md b/src/zh/UserGuide/Master/Table/AI-capability/AINode_timecho.md
new file mode 100644
index 000000000..4eb8225c3
--- /dev/null
+++ b/src/zh/UserGuide/Master/Table/AI-capability/AINode_timecho.md
@@ -0,0 +1,170 @@
+
+
+# AINode
+
+AINode 是支持时序大模型注册、管理、调用的 IoTDB 原生节点,内置业界领先的自研时序大模型,如 Timer、Sundial 等,可通过标准 SQL 语句进行调用,实现时序数据的毫秒级实时推理,可支持时序趋势预测、缺失值填补、异常值检测等应用场景。
+
+系统架构如下图所示:
+
+
+
+三种节点的职责如下:
+
+- **ConfigNode**:负责保存和管理模型的元信息;负责分布式节点管理。
+- **DataNode**:负责接收并解析用户的 SQL请求;负责存储时间序列数据;负责数据的预处理计算。
+- **AINode**:负责模型文件的导入创建以及模型推理。
+
+## 1. 优势特点
+
+与单独构建机器学习服务相比,具有以下优势:
+
+- **简单易用**:无需使用 Python 或 Java 编程,使用 SQL 语句即可完成机器学习模型管理与推理的完整流程。如创建模型可使用CREATE MODEL语句、使用模型进行推理可使用CALL INFERENCE(...)语句等,使用更加简单便捷。
+
+- **避免数据迁移**:使用 IoTDB 原生机器学习可以将存储在 IoTDB 中的数据直接应用于机器学习模型的推理,无需将数据移动到单独的机器学习服务平台,从而加速数据处理、提高安全性并降低成本。
+
+
+
+- **内置先进算法**:支持业内领先机器学习分析算法,覆盖典型时序分析任务,为时序数据库赋能原生数据分析能力。如:
+ - **时间序列预测(Time Series Forecasting)**:从过去时间序列中学习变化模式;从而根据给定过去时间的观测值,输出未来序列最可能的预测。
+ - **时序异常检测(Anomaly Detection for Time Series)**:在给定的时间序列数据中检测和识别异常值,帮助发现时间序列中的异常行为。
+ - **时间序列标注(Time Series Annotation)**:为每个数据点或特定时间段添加额外的信息或标记,例如事件发生、异常点、趋势变化等,以便更好地理解和分析数据。
+
+
+## 2. 基本概念
+
+- **模型(Model)**:机器学习模型,以时序数据作为输入,输出分析任务的结果或决策。模型是AINode 的基本管理单元,支持模型的增(注册)、删、查、用(推理)。
+- **创建(Create)**: 将外部设计或训练好的模型文件或算法加载到MLNode中,由IoTDB统一管理与使用。
+- **推理(Inference)**:使用创建的模型在指定时序数据上完成该模型适用的时序分析任务的过程。
+- **内置能力(Built-in)**:AINode 自带常见时序分析场景(例如预测与异常检测)的机器学习算法或自研模型。
+
+
+
+## 3. 安装部署
+
+AINode 的部署可参考文档 [部署指导](../Deployment-and-Maintenance/AINode_Deployment_timecho.md) 章节。
+
+## 4. 使用指导
+
+AINode 对时序数据相关的深度学习模型提供了模型创建及删除的流程,内置模型无需创建及删除,可直接使用,并且在完成推理后创建的内置模型实例将自动销毁。
+
+### 4.1 注册模型
+
+通过指定模型输入输出的向量维度,可以注册训练好的深度学习模型,从而用于模型推理。
+
+符合以下内容的模型可以注册到AINode中:
+ 1. AINode 支持的PyTorch 2.1.0、 2.2.0版本训练的模型,需避免使用2.2.0版本以上的特性。
+ 2. AINode支持使用PyTorch JIT存储的模型,模型文件需要包含模型的参数和结构。
+ 3. 模型输入序列可以包含一列或多列,若有多列,需要和模型能力、模型配置文件对应。
+ 4. 模型的输入输出维度必须在`config.yaml`配置文件中明确定义。使用模型时,必须严格按照`config.yaml`配置文件中定义的输入输出维度。如果输入输出列数不匹配配置文件,将会导致错误。
+
+下方为模型注册的SQL语法定义。
+
+```SQL
+create model using uri
+```
+
+SQL中参数的具体含义如下:
+
+- model_name:模型的全局唯一标识,不可重复。模型名称具备以下约束:
+
+ - 允许出现标识符 [ 0-9 a-z A-Z _ ] (字母,数字,下划线)
+ - 长度限制为2-64字符
+ - 大小写敏感
+
+- uri:模型注册文件的资源路径,路径下应包含**模型权重model.pt文件和模型的元数据描述文件config.yaml**
+
+ - 模型权重文件:深度学习模型训练完成后得到的权重文件,目前支持pytorch训练得到的.pt文件
+
+ - yaml元数据描述文件:模型注册时需要提供的与模型结构有关的参数,其中必须包含模型的输入输出维度用于模型推理:
+
+ - | **参数名** | **参数描述** | **示例** |
+ | ------------ | ---------------------------- | -------- |
+ | input_shape | 模型输入的行列,用于模型推理 | [96,2] |
+ | output_shape | 模型输出的行列,用于模型推理 | [48,2] |
+
+ - 除了模型推理外,还可以指定模型输入输出的数据类型:
+
+ - | **参数名** | **参数描述** | **示例** |
+ | ----------- | ------------------ | --------------------- |
+ | input_type | 模型输入的数据类型 | ['float32','float32'] |
+ | output_type | 模型输出的数据类型 | ['float32','float32'] |
+
+ - 除此之外,可以额外指定备注信息用于在模型管理时进行展示
+
+ - | **参数名** | **参数描述** | **示例** |
+ | ---------- | ---------------------------------------------- | ------------------------------------------- |
+ | attributes | 可选,用户自行设定的模型备注信息,用于模型展示 | 'model_type': 'dlinear','kernel_size': '25' |
+
+
+除了本地模型文件的注册,还可以通过URI来指定远程资源路径来进行注册,使用开源的模型仓库(例如HuggingFace)。
+
+
+### 4.2 查看模型
+
+注册成功的模型可以通过show models指令查询模型的具体信息。其SQL定义如下:
+
+```SQL
+show models
+
+show models
+```
+
+除了直接展示所有模型的信息外,可以指定model id来查看某一具体模型的信息。模型展示的结果中包含如下信息:
+
+| **ModelId** | **State** | **Configs** | **Attributes** |
+| ------------ | ------------------------------------- | ---------------------------------------------- | -------------- |
+| 模型唯一标识 | 模型注册状态(LOADING,ACTIVE,DROPPING) | InputShape, outputShapeInputTypes, outputTypes | 模型备注信息 |
+
+其中,State用于展示当前模型注册的状态,包含以下三个阶段
+
+- **LOADING**:已经在configNode中添加对应的模型元信息,正将模型文件传输到AINode节点上
+- **ACTIVE:** 模型已经设置完成,模型处于可用状态
+- **DROPPING**:模型删除中,正在从configNode以及AINode处删除模型相关信息
+- **UNAVAILABLE**: 模型创建失败,可以通过drop model删除创建失败的model_name。
+
+
+### 4.3 删除模型
+
+对于注册成功的模型,用户可以通过SQL进行删除。该操作除了删除configNode上的元信息外,还会删除所有AINode下的相关模型文件。其SQL如下:
+
+```SQL
+drop model
+```
+
+需要指定已经成功注册的模型model_name来删除对应的模型。由于模型删除涉及多个节点上的数据删除,操作不会立即完成,此时模型的状态为DROPPING,该状态的模型不能用于模型推理。
+
+### 4.4 使用内置模型推理
+
+敬请期待
+
+## 5. 权限管理
+
+使用AINode相关的功能时,可以使用IoTDB本身的鉴权去做一个权限管理,用户只有在具备 USE_MODEL 权限时,才可以使用模型管理的相关功能。当使用推理功能时,用户需要有访问输入模型的SQL对应的源序列的权限。
+
+| 权限名称 | 权限范围 | 管理员用户(默认ROOT) | 普通用户 | 路径相关 |
+| --------- | --------------------------------- | ---------------------- | -------- | -------- |
+| USE_MODEL | create model / show models / drop model | √ | √ | x |
+| READ_DATA | call inference | √ | √ | √ |
+
+## 6. 实际案例
+
+敬请期待
\ No newline at end of file
diff --git a/src/zh/UserGuide/Master/Table/AI-capability/TimeSeries-Large-Model.md b/src/zh/UserGuide/Master/Table/AI-capability/TimeSeries-Large-Model.md
new file mode 100644
index 000000000..79a8f6c01
--- /dev/null
+++ b/src/zh/UserGuide/Master/Table/AI-capability/TimeSeries-Large-Model.md
@@ -0,0 +1,111 @@
+
+
+# 时序大模型
+
+## 简介
+
+时序大模型是一种专为时序数据分析设计的基础模型。IoTDB 团队长期自研时序大模型,基于变换器(Transformer)结构等技术在海量时序数据上预训练,能够理解并生成多种领域的时序数据,可被应用于时序预测、异常检测、时序填补等应用场景。不同于传统时序分析技术,时序大模型具备通用特征提取能力,基于零样本分析、微调等技术服务广泛的分析任务。
+
+团队所研时序大模型相关技术均发表在国际机器学习顶级会议。
+
+## 应用场景
+
+- **时序预测**:为工业生产、自然环境等领域提供时间序列数据的预测服务,帮助用户提前了解未来趋势。
+- **数据填补**:针对时间序列中的缺失序列段,进行上下文填补,以增强数据集的连续性和完整性。
+- **异常检测**:利用自回归分析技术,对时间序列数据进行实时监测,及时预警潜在的异常情况。
+
+
+
+## Timer 模型
+
+Timer模型不仅展现了出色的少样本泛化和多任务适配能力,还通过预训练获得了丰富的知识库,赋予了它处理多样化下游任务的通用能力,拥有以下特点:
+
+- **泛化性**:模型能够通过使用少量样本进行微调,达到行业内领先的深度模型预测效果。
+- **通用性**:模型设计灵活,能够适配多种不同的任务需求,并且支持变化的输入和输出长度,使其在各种应用场景中都能发挥作用。
+- **可扩展性**:随着模型参数数量的增加或预训练数据规模的扩大,模型的性能会持续提升,确保模型能够随着时间和数据量的增长而不断优化其预测效果。
+
+
+
+## Timer-XL 模型
+
+Timer-XL 基于 Timer 进一步扩展升级了网络结构,在多个维度上进行全面突破:
+
+- **超长上下文支持**:该模型突破了传统时序预测模型的限制,支持处理数千个Token(相当于数万个时间点)的输入,有效解决了上下文长度的瓶颈问题。
+- **多变量预测场景覆盖**:支持多种预测场景,包括非平稳时间序列的预测、涉及多个变量的预测任务以及包含协变量的预测,满足多样化的业务需求。
+- **大规模工业时序数据集**:采用万亿大规模工业物联网领域的时序数据集进行预训练,数据集兼有庞大的体量、卓越的质量和丰富的领域等重要特质,覆盖能源、航空航天、钢铁、交通等多领域。
+
+## 效果展示
+
+时序大模型能够适应多种不同领域和场景的真实时序数据,在各种任务上拥有优异的处理效果,以下是在不同数据上的真实表现:
+
+**时序预测:**
+
+利用时序大模型的预测能力,能够准确预测时间序列的未来变化趋势,如下图蓝色曲线代表预测趋势,红色曲线为实际趋势,两曲线高度吻合。
+
+
+
+**数据填补:**
+
+利用时序大模型对缺失数据段进行预测式填补。
+
+
+
+
+**异常检测:**
+
+利用时序大模型精准识别与正常趋势偏离过大的异常值。
+
+
+
+## 部署使用
+
+1. 打开 IoTDB cli 控制台,检查 ConfigNode、DataNode、AINode 节点确保均为 Running。
+
+检查命令:
+```sql
+show cluster
+```
+
+
+
+2. 模型文件存放路径:推荐放在 AINode 安装包相同文件夹下,可新建模型文件夹存放模型文件
+3. 注册模型语句
+
+```sql
+create model using uri
+```
+
+示例:
+
+```sql
+create model Timer-xl using uri
+```
+
+4. 检查模型是否注册成功
+
+检查命令:
+
+```sql
+show models
+```
+
+
diff --git a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/AINode_Deployment_apache.md b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/AINode_Deployment_apache.md
new file mode 100644
index 000000000..d31222535
--- /dev/null
+++ b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/AINode_Deployment_apache.md
@@ -0,0 +1,23 @@
+
+# AINode 部署
+
+敬请期待
\ No newline at end of file
diff --git a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/AINode_Deployment_timecho.md b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/AINode_Deployment_timecho.md
new file mode 100644
index 000000000..d31222535
--- /dev/null
+++ b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/AINode_Deployment_timecho.md
@@ -0,0 +1,23 @@
+
+# AINode 部署
+
+敬请期待
\ No newline at end of file
diff --git a/src/zh/UserGuide/latest-Table/AI-capability/AINode_apache.md b/src/zh/UserGuide/latest-Table/AI-capability/AINode_apache.md
new file mode 100644
index 000000000..0a2c6e0f0
--- /dev/null
+++ b/src/zh/UserGuide/latest-Table/AI-capability/AINode_apache.md
@@ -0,0 +1,25 @@
+
+
+# AINode
+
+敬请期待
+
diff --git a/src/zh/UserGuide/latest-Table/AI-capability/AINode_timecho.md b/src/zh/UserGuide/latest-Table/AI-capability/AINode_timecho.md
new file mode 100644
index 000000000..2174f5900
--- /dev/null
+++ b/src/zh/UserGuide/latest-Table/AI-capability/AINode_timecho.md
@@ -0,0 +1,24 @@
+
+
+# AINode
+
+敬请期待
\ No newline at end of file
diff --git a/src/zh/UserGuide/latest-Table/AI-capability/TimeSeries-Large-Model.md b/src/zh/UserGuide/latest-Table/AI-capability/TimeSeries-Large-Model.md
new file mode 100644
index 000000000..79a8f6c01
--- /dev/null
+++ b/src/zh/UserGuide/latest-Table/AI-capability/TimeSeries-Large-Model.md
@@ -0,0 +1,111 @@
+
+
+# 时序大模型
+
+## 简介
+
+时序大模型是一种专为时序数据分析设计的基础模型。IoTDB 团队长期自研时序大模型,基于变换器(Transformer)结构等技术在海量时序数据上预训练,能够理解并生成多种领域的时序数据,可被应用于时序预测、异常检测、时序填补等应用场景。不同于传统时序分析技术,时序大模型具备通用特征提取能力,基于零样本分析、微调等技术服务广泛的分析任务。
+
+团队所研时序大模型相关技术均发表在国际机器学习顶级会议。
+
+## 应用场景
+
+- **时序预测**:为工业生产、自然环境等领域提供时间序列数据的预测服务,帮助用户提前了解未来趋势。
+- **数据填补**:针对时间序列中的缺失序列段,进行上下文填补,以增强数据集的连续性和完整性。
+- **异常检测**:利用自回归分析技术,对时间序列数据进行实时监测,及时预警潜在的异常情况。
+
+
+
+## Timer 模型
+
+Timer模型不仅展现了出色的少样本泛化和多任务适配能力,还通过预训练获得了丰富的知识库,赋予了它处理多样化下游任务的通用能力,拥有以下特点:
+
+- **泛化性**:模型能够通过使用少量样本进行微调,达到行业内领先的深度模型预测效果。
+- **通用性**:模型设计灵活,能够适配多种不同的任务需求,并且支持变化的输入和输出长度,使其在各种应用场景中都能发挥作用。
+- **可扩展性**:随着模型参数数量的增加或预训练数据规模的扩大,模型的性能会持续提升,确保模型能够随着时间和数据量的增长而不断优化其预测效果。
+
+
+
+## Timer-XL 模型
+
+Timer-XL 基于 Timer 进一步扩展升级了网络结构,在多个维度上进行全面突破:
+
+- **超长上下文支持**:该模型突破了传统时序预测模型的限制,支持处理数千个Token(相当于数万个时间点)的输入,有效解决了上下文长度的瓶颈问题。
+- **多变量预测场景覆盖**:支持多种预测场景,包括非平稳时间序列的预测、涉及多个变量的预测任务以及包含协变量的预测,满足多样化的业务需求。
+- **大规模工业时序数据集**:采用万亿大规模工业物联网领域的时序数据集进行预训练,数据集兼有庞大的体量、卓越的质量和丰富的领域等重要特质,覆盖能源、航空航天、钢铁、交通等多领域。
+
+## 效果展示
+
+时序大模型能够适应多种不同领域和场景的真实时序数据,在各种任务上拥有优异的处理效果,以下是在不同数据上的真实表现:
+
+**时序预测:**
+
+利用时序大模型的预测能力,能够准确预测时间序列的未来变化趋势,如下图蓝色曲线代表预测趋势,红色曲线为实际趋势,两曲线高度吻合。
+
+
+
+**数据填补:**
+
+利用时序大模型对缺失数据段进行预测式填补。
+
+
+
+
+**异常检测:**
+
+利用时序大模型精准识别与正常趋势偏离过大的异常值。
+
+
+
+## 部署使用
+
+1. 打开 IoTDB cli 控制台,检查 ConfigNode、DataNode、AINode 节点确保均为 Running。
+
+检查命令:
+```sql
+show cluster
+```
+
+
+
+2. 模型文件存放路径:推荐放在 AINode 安装包相同文件夹下,可新建模型文件夹存放模型文件
+3. 注册模型语句
+
+```sql
+create model using uri
+```
+
+示例:
+
+```sql
+create model Timer-xl using uri
+```
+
+4. 检查模型是否注册成功
+
+检查命令:
+
+```sql
+show models
+```
+
+
diff --git a/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/AINode_Deployment_apache.md b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/AINode_Deployment_apache.md
new file mode 100644
index 000000000..d31222535
--- /dev/null
+++ b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/AINode_Deployment_apache.md
@@ -0,0 +1,23 @@
+
+# AINode 部署
+
+敬请期待
\ No newline at end of file
diff --git a/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/AINode_Deployment_timecho.md b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/AINode_Deployment_timecho.md
new file mode 100644
index 000000000..d31222535
--- /dev/null
+++ b/src/zh/UserGuide/latest-Table/Deployment-and-Maintenance/AINode_Deployment_timecho.md
@@ -0,0 +1,23 @@
+
+# AINode 部署
+
+敬请期待
\ No newline at end of file