diff --git a/src/.vuepress/navbar/en.ts b/src/.vuepress/navbar/en.ts index 5d890ac2d..778220c30 100644 --- a/src/.vuepress/navbar/en.ts +++ b/src/.vuepress/navbar/en.ts @@ -25,7 +25,7 @@ export const enNavbar = navbar([ // { text: 'latest', link: '/UserGuide/Master/QuickStart/QuickStart_apache' }, { text: 'v1.3.3', - link: '/UserGuide/latest/QuickStart/QuickStart_apache', + link: '/UserGuide/V1.3.3/QuickStart/QuickStart_apache', }, { text: 'v1.3.0/1/2', diff --git a/src/.vuepress/navbar/zh.ts b/src/.vuepress/navbar/zh.ts index fddebc7a5..5de61587f 100644 --- a/src/.vuepress/navbar/zh.ts +++ b/src/.vuepress/navbar/zh.ts @@ -23,9 +23,13 @@ export const zhNavbar = navbar([ text: '文档', children: [ // { text: 'latest', link: '/zh/UserGuide/Master/QuickStart/QuickStart_apache' }, + { + text: 'v2.0.1', + link: 'src/zh/UserGuide/latest/QuickStart/QuickStart_apache', + }, { text: 'v1.3.3', - link: '/zh/UserGuide/latest/QuickStart/QuickStart_apache', + link: '/zh/UserGuide/V1.3.3/QuickStart/QuickStart_apache', }, { text: 'v1.3.0/1/2', diff --git a/src/.vuepress/sidebar/V0.13.x/zh.ts b/src/.vuepress/sidebar/V0.13.x/zh.ts index 5fce5eb3e..82754c641 100644 --- a/src/.vuepress/sidebar/V0.13.x/zh.ts +++ b/src/.vuepress/sidebar/V0.13.x/zh.ts @@ -195,7 +195,7 @@ export const zhSidebar = { ], }, { - text: '参考', + text: '附录', collapsible: true, prefix: 'Reference/', children: [ diff --git a/src/.vuepress/sidebar/V1.2.x/zh.ts b/src/.vuepress/sidebar/V1.2.x/zh.ts index e7329a8ab..7863d58d5 100644 --- a/src/.vuepress/sidebar/V1.2.x/zh.ts +++ b/src/.vuepress/sidebar/V1.2.x/zh.ts @@ -158,7 +158,7 @@ export const zhSidebar = { children: [{ text: '常见问题', link: 'Frequently-asked-questions' }], }, { - text: '参考', + text: '附录', collapsible: true, prefix: 'Reference/', // children: 'structure', diff --git a/src/.vuepress/sidebar/V1.3.0-2/zh.ts b/src/.vuepress/sidebar/V1.3.0-2/zh.ts index 97759ef8b..4cd5d7680 100644 --- a/src/.vuepress/sidebar/V1.3.0-2/zh.ts +++ b/src/.vuepress/sidebar/V1.3.0-2/zh.ts @@ -156,7 +156,7 @@ export const zhSidebar = { children: [{ text: '常见问题', link: 'Frequently-asked-questions' }], }, { - text: '参考', + text: '附录', collapsible: true, prefix: 'Reference/', // children: 'structure', diff --git a/src/.vuepress/sidebar/V1.3.3/en.ts b/src/.vuepress/sidebar/V1.3.3/en.ts index b63b54712..0047bbc19 100644 --- a/src/.vuepress/sidebar/V1.3.3/en.ts +++ b/src/.vuepress/sidebar/V1.3.3/en.ts @@ -17,7 +17,7 @@ */ export const enSidebar = { - '/UserGuide/latest/': [ + '/UserGuide/V1.3.3/': [ { text: 'IoTDB User Guide (V1.3.3)', children: [], diff --git a/src/.vuepress/sidebar/V1.3.3/zh.ts b/src/.vuepress/sidebar/V1.3.3/zh.ts index 16d5e6166..b52d895bd 100644 --- a/src/.vuepress/sidebar/V1.3.3/zh.ts +++ b/src/.vuepress/sidebar/V1.3.3/zh.ts @@ -17,7 +17,7 @@ */ export const zhSidebar = { - '/zh/UserGuide/latest/': [ + '/zh/UserGuide/V1.3.3/': [ { text: 'IoTDB用户手册 (V1.3.3)', children: [], @@ -210,7 +210,7 @@ export const zhSidebar = { ], }, { - text: '参考', + text: '附录', collapsible: true, prefix: 'Reference/', // children: 'structure', diff --git a/src/.vuepress/sidebar/V2.0.1/en.ts b/src/.vuepress/sidebar/V2.0.1/en.ts new file mode 100644 index 000000000..44711c5fa --- /dev/null +++ b/src/.vuepress/sidebar/V2.0.1/en.ts @@ -0,0 +1,281 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export const enSidebar = { + '/UserGuide/latest/': [ + { + text: 'IoTDB User Guide', + children: [], + }, + { + text: 'About IoTDB', + collapsible: true, + prefix: 'IoTDB-Introduction/', + // children: 'structure', 使用该方式自动获取文件夹下的文件 + children: [ + { text: 'IoTDB Introduction', link: 'IoTDB-Introduction_timecho' }, + { text: 'Scenario', link: 'Scenario' }, + ], + }, + { + text: 'Background knowledge', + collapsible: true, + prefix: 'Background-knowledge/', + children: [ + { text: 'Cluster-related Concepts', link: 'Cluster-Concept' }, + { text: 'Data Type', link: 'Data-Type' }, + ], + }, + { + text: 'Quick Start', + link: 'QuickStart/QuickStart_timecho', + }, + { + text: 'Deployment & Maintenance', + collapsible: true, + prefix: 'Deployment-and-Maintenance/', + // children: 'structure', + children: [ + { text: 'Obtain TimechoDB', link: 'IoTDB-Package_timecho' }, + { text: 'Database Resources', link: 'Database-Resources' }, + { text: 'System Requirements', link: 'Environment-Requirements' }, + { + text: 'Stand-Alone Deployment', + link: 'Stand-Alone-Deployment_timecho', + }, + { text: 'Cluster Deployment', link: 'Cluster-Deployment_timecho' }, + { + text: 'Dual Active Deployment', + link: 'Dual-Active-Deployment_timecho', + }, + { text: 'Docker Deployment', link: 'Docker-Deployment_timecho' }, + { text: 'AINode Deployment', link: 'AINode_Deployment_timecho' }, + { + text: 'Monitoring Panel Deployment', + link: 'Monitoring-panel-deployment', + }, + { text: 'Workbench Deployment', link: 'workbench-deployment_timecho' }, + ], + }, + { + text: 'Basic Functions', + collapsible: true, + prefix: 'Basic-Concept/', + // children: 'structure', + children: [ + { + text: 'Data Modeling', + collapsible: true, + children: [ + { + text: 'Timeseries Data Model', + link: 'Navigating_Time_Series_Data', + }, + { + text: 'Modeling Scheme Design', + link: 'Data-Model-and-Terminology', + }, + { + text: 'Measurement Point Management', + link: 'Operate-Metadata_timecho', + }, + ], + }, + { text: 'Write & Delete', link: 'Write-Delete-Data' }, + { text: 'Query Data', link: 'Query-Data' }, + ], + }, + { + text: 'Advanced Features', + collapsible: true, + prefix: 'User-Manual/', + // children: 'structure', + children: [ + { text: 'Data Sync', link: 'Data-Sync_timecho' }, + { text: 'Data Subscription', link: 'Data-subscription' }, + { text: 'AI Capability', link: 'AINode_timecho' }, + { + text: 'Security Management', + collapsible: true, + children: [ + { text: 'White List', link: 'White-List_timecho' }, + { text: 'Audit Log', link: 'Audit-Log_timecho' }, + { text: 'Authority Management', link: 'Authority-Management' }, + ], + }, + { text: 'UDF', link: 'User-defined-function_timecho' }, + { text: 'View', link: 'IoTDB-View_timecho' }, + { text: 'Tiered Storage', link: 'Tiered-Storage_timecho' }, + { text: 'Continuous Query', link: 'Database-Programming' }, + { + text: 'Database Programming', + collapsible: true, + children: [ + { text: 'UDF Development', link: 'UDF-development' }, + { text: 'Trigger', link: 'Trigger' }, + { text: 'Stream Processing', link: 'Streaming_timecho' }, + ], + }, + { text: 'Maintenance SQL', link: 'Maintennance' }, + ], + }, + { + text: 'Tools System', + collapsible: true, + prefix: 'Tools-System/', + // children: 'structure', + children: [ + { text: 'CLI', link: 'CLI' }, + { text: 'Workbench', link: 'Workbench_timecho' }, + { text: 'Monitor Tool', link: 'Monitor-Tool_timecho' }, + { text: 'Benchmark Tool', link: 'Benchmark' }, + { text: 'Cluster Management Tool', link: 'Maintenance-Tool_timecho' }, + { text: 'Data Import', link: 'Data-Import-Tool' }, + { text: 'Data Export', link: 'Data-Export-Tool' }, + ], + }, + { + text: 'API', + collapsible: true, + prefix: 'API/', + // children: 'structure', + children: [ + { text: 'Java Native API', link: 'Programming-Java-Native-API' }, + { text: 'Python Native API', link: 'Programming-Python-Native-API' }, + { text: 'C++ Native API', link: 'Programming-Cpp-Native-API' }, + { text: 'Go Native API', link: 'Programming-Go-Native-API' }, + { text: 'C# Native API', link: 'Programming-CSharp-Native-API' }, + { text: 'Node.js Native API', link: 'Programming-NodeJS-Native-API' }, + { text: 'Rust Native API', link: 'Programming-Rust-Native-API' }, + { text: 'JDBC (Not Recommend)', link: 'Programming-JDBC' }, + { text: 'MQTT Protocol', link: 'Programming-MQTT' }, + { text: 'OPC UA Protocol', link: 'Programming-OPC-UA_timecho' }, + { text: 'Kafka', link: 'Programming-Kafka' }, + { + text: 'REST API', + collapsible: true, + children: [ + { text: 'V1 (Not Recommend)', link: 'RestServiceV1' }, + { text: 'V2', link: 'RestServiceV2' }, + ], + }, + ], + }, + { + text: 'Ecosystem Integration', + collapsible: true, + prefix: 'Ecosystem-Integration/', + // children: 'structure', + children: [ + { text: 'Apache Flink(IoTDB)', link: 'Flink-IoTDB' }, + { text: 'Apache Flink(TsFile)', link: 'Flink-TsFile' }, + { text: 'Apache Hive(TsFile)', link: 'Hive-TsFile' }, + { text: 'Apache NiFi', link: 'NiFi-IoTDB' }, + { text: 'Apache Spark(TsFile)', link: 'Spark-TsFile' }, + { text: 'Apache Spark(IoTDB)', link: 'Spark-IoTDB' }, + { text: 'Apache Zeppelin', link: 'Zeppelin-IoTDB_timecho' }, + { text: 'DataEase', link: 'DataEase' }, + { text: 'DBeaver', link: 'DBeaver' }, + { text: 'Ignition', link: 'Ignition-IoTDB-plugin_timecho' }, + { text: 'Grafana(IoTDB)', link: 'Grafana-Connector' }, + { text: 'Grafana Plugin', link: 'Grafana-Plugin' }, + { text: 'Telegraf Plugin', link: 'Telegraf' }, + { text: 'ThingsBoard', link: 'Thingsboard' }, + ], + }, + { + text: 'SQL Manual', + collapsible: true, + prefix: 'SQL-Manual/', + // children: 'structure', + children: [ + { text: 'SQL Manual', link: 'SQL-Manual' }, + { + text: 'Functions and Operators', + collapsible: true, + children: [ + { text: 'UDF Libraries', link: 'UDF-Libraries_timecho' }, + { + text: 'Operator and Expression', + link: 'Operator-and-Expression', + }, + { + text: 'Function and Expression', + link: 'Function-and-Expression', + }, + ], + }, + ], + }, + { + text: 'Technical Insider', + collapsible: true, + prefix: 'Technical-Insider/', + // children: 'structure', + children: [ + { text: 'Research Paper ', link: 'Publication' }, + { text: 'Compression & Encoding', link: 'Encoding-and-Compression' }, + { + text: 'Data Partitioning & Load Balancing', + link: 'Cluster-data-partitioning', + }, + ], + }, + { + text: 'Reference', + collapsible: true, + prefix: 'Reference/', + // children: 'structure', + children: [ + { + text: 'Config Manual', + collapsible: true, + children: [ + { text: 'Common Config Manual', link: 'Common-Config-Manual' }, + { + text: 'ConfigNode Config Manual', + link: 'ConfigNode-Config-Manual', + }, + { text: 'DataNode Config Manual', link: 'DataNode-Config-Manual_timecho' }, + ], + }, + { + text: 'Syntax-Rule', + collapsible: true, + children: [ + { text: 'Identifiers', link: 'Syntax-Rule' }, + { text: 'Keywords', link: 'Keywords' }, + ], + }, + { text: 'Status Codes', link: 'Status-Codes' }, + ], + }, + { + text: 'FAQ', + collapsible: true, + prefix: 'FAQ/', + // children: 'structure', + children: [ + { + text: 'Frequently Asked Questions', + link: 'Frequently-asked-questions', + }, + ], + }, + ], +}; diff --git a/src/.vuepress/sidebar/V2.0.1/zh-Table.ts b/src/.vuepress/sidebar/V2.0.1/zh-Table.ts new file mode 100644 index 000000000..32a0ac9a8 --- /dev/null +++ b/src/.vuepress/sidebar/V2.0.1/zh-Table.ts @@ -0,0 +1,161 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export const zhSidebar = { + '/zh/UserGuide/V2.0.1-Table/': [ + { + text: 'IoTDB用户手册', + children: [], + }, + { + text: '关于IoTDB', + collapsible: true, + prefix: 'IoTDB-Introduction/', + // children: 'structure', 使用该方式自动获取文件夹下的文件 + children: [ + { text: '产品介绍', link: 'IoTDB-Introduction_apache' }, + { text: '应用场景', link: 'Scenario' }, + { text: '发布历史', link: 'Release-history_apache' }, + ], + }, + { + text: '预备知识', + collapsible: true, + prefix: 'Background-knowledge/', + children: [ + { text: '常见概念', link: 'Cluster-Concept_apache' }, + { text: '数据类型', link: 'Data-Type' }, + { text: '时序数据模型', link: 'Navigating_Time_Series_Data' }, + { text: '建模方案设计', link: 'Data-Model-and-Terminology' }, + ], + }, + { + text: '快速上手', + link: 'QuickStart/QuickStart_apache', + }, + { + text: '部署与运维', + collapsible: true, + prefix: 'Deployment-and-Maintenance/', + // children: 'structure', + children: [ + { text: '安装包获取', link: 'IoTDB-Package_apache' }, + { text: '资源规划', link: 'Database-Resources' }, + { text: '系统配置', link: 'Environment-Requirements' }, + { text: '单机版部署指导', link: 'Stand-Alone-Deployment_apache' }, + { text: '集群版部署指导', link: 'Cluster-Deployment_apache' }, + { text: 'Docker部署指导', link: 'Docker-Deployment_apache' }, + ], + }, + { + text: '基础功能', + collapsible: true, + prefix: 'Basic-Concept/', + // children: 'structure', + children: [ + { text: '数据库管理', link: 'Database-Management' }, + { text: '表管理', link: 'Table-Management' }, + { text: '写入&更新', link: 'Write-Updata-Data' }, + { text: '数据查询', link: 'Query-Data' }, + { + text: '数据删除', + collapsible: true, + children: [ + { text: '数据删除', link: 'Delete-Data' }, + { text: '自动过期删除', link: 'TTL-Delete-Data' }, + ], + }, + ], + }, + { + text: '高级功能', + collapsible: true, + prefix: 'User-Manual/', + // children: 'structure', + children: [ + { text: '数据同步', link: 'Data-Sync_apache' }, + ], + }, + { + text: '工具体系', + collapsible: true, + prefix: 'Tools-System/', + // children: 'structure', + children: [ + { text: '命令行工具', link: 'CLI' }, + ], + }, + { + text: '应用编程接口', + collapsible: true, + prefix: 'API/', + // children: 'structure', + children: [ + { text: 'Java原生接口', link: 'Programming-Java-Native-API' }, + { text: 'Python原生接口', link: 'Programming-Python-Native-API' }, + { text: 'JDBC', link: 'Programming-JDBC' }, + ], + }, + { + text: 'SQL手册', + collapsible: true, + prefix: 'SQL-Manual/', + // children: 'structure', + children: [ + { text: '标识符', link: 'Identifier' }, + { text: '保留字&关键字', link: 'Keywords' }, + { + text: '查询语句', + collapsible: true, + children: [ + { text: '概览', link: 'overview' }, + { text: 'SELECT子句', link: 'Select-Clause' }, + { text: 'FROM&JOIN子句', link: 'From-Join-Clause' }, + { text: 'WHERE子句', link: 'Where-Clause' }, + { text: 'GROUP BY子句', link: 'GroupBy-Clause' }, + { text: 'HAVING子句', link: 'Having-Clause' }, + { text: 'FILL子句', link: 'Fill-Clause' }, + { text: 'ORDER BY子句', link: 'OrderBy-Clause' }, + { text: 'LIMIT&OFFSET子句', link: 'Limit-Offset-Clause' }, + ], + }, + ], + }, + { + text: '技术内幕', + collapsible: true, + prefix: 'Technical-Insider/', + // children: 'structure', + children: [ + { text: '压缩&编码', link: 'Encoding-and-Compression' }, + { text: '数据分区和负载均衡', link: 'Cluster-data-partitioning' }, + ], + }, + { + text: '附录', + collapsible: true, + prefix: 'Reference/', + // children: 'structure', + children: [ + { text: '示例数据', link: 'Sample-Data' }, + { text: '配置参数', link: 'System-Config-Manual' }, + { text: '状态码', link: 'Status-Codes' }, + ], + }, + ], +}; diff --git a/src/.vuepress/sidebar/V2.0.1/zh-Tree.ts b/src/.vuepress/sidebar/V2.0.1/zh-Tree.ts new file mode 100644 index 000000000..57734bed0 --- /dev/null +++ b/src/.vuepress/sidebar/V2.0.1/zh-Tree.ts @@ -0,0 +1,240 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export const zhSidebar = { + '/zh/UserGuide/latest/': [ + { + text: 'IoTDB用户手册', + children: [], + }, + { + text: '关于IoTDB', + collapsible: true, + prefix: 'IoTDB-Introduction/', + // children: 'structure', 使用该方式自动获取文件夹下的文件 + children: [ + { text: '产品介绍', link: 'IoTDB-Introduction_apache' }, + { text: '应用场景', link: 'Scenario' }, + { text: '发布历史', link: 'Release-history_apache' }, + ], + }, + { + text: '预备知识', + collapsible: true, + prefix: 'Background-knowledge/', + children: [ + { text: '常见概念', link: 'Cluster-Concept_apache' }, + { text: '数据类型', link: 'Data-Type' }, + ], + }, + { + text: '快速上手', + link: 'QuickStart/QuickStart_apache', + }, + { + text: '部署与运维', + collapsible: true, + prefix: 'Deployment-and-Maintenance/', + // children: 'structure', + children: [ + { text: '安装包获取', link: 'IoTDB-Package_apache' }, + { text: '资源规划', link: 'Database-Resources' }, + { text: '系统配置', link: 'Environment-Requirements' }, + { text: '单机版部署指导', link: 'Stand-Alone-Deployment_apache' }, + { text: '集群版部署指导', link: 'Cluster-Deployment_apache' }, + { text: 'Docker部署指导', link: 'Docker-Deployment_apache' }, + ], + }, + { + text: '基础功能', + collapsible: true, + prefix: 'Basic-Concept/', + // children: 'structure', + children: [ + { + text: '数据建模', + collapsible: true, + children: [ + { text: '时序数据模型', link: 'Navigating_Time_Series_Data' }, + { text: '建模方案设计', link: 'Data-Model-and-Terminology' }, + { text: '测点管理', link: 'Operate-Metadata_apache' }, + ], + }, + { text: '写入&删除', link: 'Write-Delete-Data' }, + { text: '数据查询', link: 'Query-Data' }, + ], + }, + { + text: '高级功能', + collapsible: true, + prefix: 'User-Manual/', + // children: 'structure', + children: [ + { text: '数据同步', link: 'Data-Sync_apache' }, + { text: '数据订阅', link: 'Data-subscription' }, + { + text: '安全管理', + collapsible: true, + children: [{ text: '权限管理', link: 'Authority-Management' }], + }, + { text: '用户自定义函数', link: 'User-defined-function_apache' }, + { text: '连续查询', link: 'Database-Programming' }, + { + text: '数据库编程', + collapsible: true, + children: [ + { text: 'UDF开发', link: 'UDF-development' }, + { text: '触发器', link: 'Trigger' }, + { text: '流处理框架', link: 'Streaming_apache' }, + ], + }, + { + text: '运维语句', + collapsible: true, + children: [ + { text: '查询性能分析', link: 'Query-Performance-Analysis' }, + { text: '负载均衡', link: 'Load-Balance' }, + { text: '数据修复', link: 'Data-Recovery' }, + ], + }, + ], + }, + { + text: '工具体系', + collapsible: true, + prefix: 'Tools-System/', + // children: 'structure', + children: [ + { text: '命令行工具', link: 'CLI' }, + { text: '监控工具', link: 'Monitor-Tool_apache' }, + { text: '测试工具', link: 'Benchmark' }, + { text: '集群管理工具', link: 'Maintenance-Tool_apache' }, + { text: '数据导入', link: 'Data-Import-Tool' }, + { text: '数据导出', link: 'Data-Export-Tool' }, + ], + }, + { + text: '应用编程接口', + collapsible: true, + prefix: 'API/', + // children: 'structure', + children: [ + { text: 'Java原生接口', collapsible: true, + children: [ + { text: 'Java原生API', link: 'Programming-Java-Native-API' }, + { text: '数据订阅API', link: 'Programming-Data-Subscription' }, + ], + }, + { text: 'Python原生接口', link: 'Programming-Python-Native-API' }, + { text: 'C++原生接口', link: 'Programming-Cpp-Native-API' }, + { text: 'Go原生接口', link: 'Programming-Go-Native-API' }, + { text: 'C#原生接口', link: 'Programming-CSharp-Native-API' }, + { text: 'Node.js原生接口', link: 'Programming-NodeJS-Native-API' }, + { text: 'Rust原生接口', link: 'Programming-Rust-Native-API' }, + { text: 'JDBC (不推荐)', link: 'Programming-JDBC' }, + { text: 'MQTT协议', link: 'Programming-MQTT' }, + { text: 'Kafka', link: 'Programming-Kafka' }, + { + text: 'REST API', + collapsible: true, + children: [ + { text: 'V1 (不推荐)', link: 'RestServiceV1' }, + { text: 'V2', link: 'RestServiceV2' }, + ], + }, + ], + }, + { + text: '系统集成', + collapsible: true, + prefix: 'Ecosystem-Integration/', + // children: 'structure', + children: [ + { text: 'Apache Flink(IoTDB)', link: 'Flink-IoTDB' }, + { text: 'Apache Flink(TsFile)', link: 'Flink-TsFile' }, + { text: 'Apache Hive(TsFile)', link: 'Hive-TsFile' }, + { text: 'Apache NiFi', link: 'NiFi-IoTDB' }, + { text: 'Apache Spark(TsFile)', link: 'Spark-TsFile' }, + { text: 'Apache Spark(IoTDB)', link: 'Spark-IoTDB' }, + { text: 'Apache Zeppelin', link: 'Zeppelin-IoTDB_apache' }, + { text: 'DBeaver', link: 'DBeaver' }, + { text: 'Grafana(IoTDB)', link: 'Grafana-Connector' }, + { text: 'Grafana插件', link: 'Grafana-Plugin' }, + { text: 'Kubernetes', link: 'Kubernetes_apache' }, + { text: 'Telegraf插件', link: 'Telegraf' }, + { text: 'ThingsBoard', link: 'Thingsboard' }, + ], + }, + { + text: 'SQL手册', + collapsible: true, + prefix: 'SQL-Manual/', + // children: 'structure', + children: [ + { text: '标识符', link: 'Syntax-Rule' }, + { text: '关键字', link: 'Keywords' }, + { text: 'SQL手册', link: 'SQL-Manual' }, + { + text: '函数与运算符', + collapsible: true, + children: [ + { text: 'UDF函数库', link: 'UDF-Libraries_apache' }, + { text: '函数与运算符', link: 'Operator-and-Expression' }, + { text: '内置函数与表达式', link: 'Function-and-Expression' }, + ], + }, + ], + }, + { + text: '技术内幕', + collapsible: true, + prefix: 'Technical-Insider/', + // children: 'structure', + children: [ + { text: '研究论文', link: 'Publication' }, + { text: '压缩&编码', link: 'Encoding-and-Compression' }, + { text: '数据分区和负载均衡', link: 'Cluster-data-partitioning' }, + ], + }, + { + text: '附录', + collapsible: true, + prefix: 'Reference/', + // children: 'structure', + children: [ + { + text: '配置参数', + collapsible: true, + children: [ + { text: '配置参数', link: 'Common-Config-Manual' }, + { text: 'ConfigNode配置参数', link: 'ConfigNode-Config-Manual' }, + { text: 'DataNode配置参数', link: 'DataNode-Config-Manual_apache' }, + ], + }, + { text: '状态码', link: 'Status-Codes' }, + ], + }, + { + text: 'FAQ', + collapsible: true, + prefix: 'FAQ/', + // children: 'structure', + children: [{ text: '常见问题', link: 'Frequently-asked-questions' }], + }, + ], +}; diff --git a/src/.vuepress/sidebar/zh.ts b/src/.vuepress/sidebar/zh.ts index c248341fd..329931ada 100644 --- a/src/.vuepress/sidebar/zh.ts +++ b/src/.vuepress/sidebar/zh.ts @@ -17,12 +17,16 @@ */ import { sidebar } from 'vuepress-theme-hope'; +import { zhSidebar as V201xTableSidebar } from './V2.0.1/zh-Table.js'; +import { zhSidebar as V201xTreeSidebar } from './V2.0.1/zh-Tree.js'; import { zhSidebar as V103xSidebar } from './V1.3.3/zh.js'; import { zhSidebar as V1030Sidebar } from './V1.3.0-2/zh.js'; import { zhSidebar as V102xSidebar } from './V1.2.x/zh.js'; import { zhSidebar as V013xSidebar } from './V0.13.x/zh.js'; export const zhSidebar = sidebar({ + ...V201xTableSidebar, + ...V201xTreeSidebar, ...V103xSidebar, ...V1030Sidebar, ...V102xSidebar, diff --git a/src/.vuepress/sidebar_timecho/V0.13.x/zh.ts b/src/.vuepress/sidebar_timecho/V0.13.x/zh.ts index 78c9f1e4d..555b98cb7 100644 --- a/src/.vuepress/sidebar_timecho/V0.13.x/zh.ts +++ b/src/.vuepress/sidebar_timecho/V0.13.x/zh.ts @@ -202,7 +202,7 @@ export const zhSidebar = { ], }, { - text: '参考', + text: '附录', collapsible: true, prefix: 'Reference/', children: [ diff --git a/src/.vuepress/sidebar_timecho/V1.2.x/zh.ts b/src/.vuepress/sidebar_timecho/V1.2.x/zh.ts index 508510388..288a83303 100644 --- a/src/.vuepress/sidebar_timecho/V1.2.x/zh.ts +++ b/src/.vuepress/sidebar_timecho/V1.2.x/zh.ts @@ -166,7 +166,7 @@ export const zhSidebar = { children: [{ text: '常见问题', link: 'Frequently-asked-questions' }], }, { - text: '参考', + text: '附录', collapsible: true, prefix: 'Reference/', // children: 'structure', diff --git a/src/.vuepress/sidebar_timecho/V1.3.0-2/zh.ts b/src/.vuepress/sidebar_timecho/V1.3.0-2/zh.ts index 834a0bf80..8749ff82c 100644 --- a/src/.vuepress/sidebar_timecho/V1.3.0-2/zh.ts +++ b/src/.vuepress/sidebar_timecho/V1.3.0-2/zh.ts @@ -167,7 +167,7 @@ export const zhSidebar = { children: [{ text: '常见问题', link: 'Frequently-asked-questions' }], }, { - text: '参考', + text: '附录', collapsible: true, prefix: 'Reference/', // children: 'structure', diff --git a/src/.vuepress/sidebar_timecho/V1.3.3/en.ts b/src/.vuepress/sidebar_timecho/V1.3.3/en.ts index 29748b983..e63cb6d81 100644 --- a/src/.vuepress/sidebar_timecho/V1.3.3/en.ts +++ b/src/.vuepress/sidebar_timecho/V1.3.3/en.ts @@ -17,7 +17,7 @@ */ export const enSidebar = { - '/UserGuide/latest/': [ + '/UserGuide/V1.3.3/': [ { text: 'IoTDB User Guide', children: [], diff --git a/src/.vuepress/sidebar_timecho/V1.3.3/zh.ts b/src/.vuepress/sidebar_timecho/V1.3.3/zh.ts index 54dcb21e4..0f04d574a 100644 --- a/src/.vuepress/sidebar_timecho/V1.3.3/zh.ts +++ b/src/.vuepress/sidebar_timecho/V1.3.3/zh.ts @@ -17,7 +17,7 @@ */ export const zhSidebar = { - '/zh/UserGuide/latest/': [ + '/zh/UserGuide/V1.3.3/': [ { text: 'IoTDB用户手册', children: [], @@ -225,7 +225,7 @@ export const zhSidebar = { ], }, { - text: '参考', + text: '附录', collapsible: true, prefix: 'Reference/', // children: 'structure', diff --git a/src/.vuepress/sidebar_timecho/V2.0.1/zh-Table.ts b/src/.vuepress/sidebar_timecho/V2.0.1/zh-Table.ts index f5766bb7b..74d40d644 100644 --- a/src/.vuepress/sidebar_timecho/V2.0.1/zh-Table.ts +++ b/src/.vuepress/sidebar_timecho/V2.0.1/zh-Table.ts @@ -17,7 +17,7 @@ */ export const zhSidebar = { - '/zh/UserGuide/V2.0.1/Table': [ + '/zh/UserGuide/V2.0.1-Table/': [ { text: 'IoTDB用户手册', children: [], @@ -30,6 +30,7 @@ export const zhSidebar = { children: [ { text: '产品介绍', link: 'IoTDB-Introduction_timecho' }, { text: '应用场景', link: 'Scenario' }, + { text: '发布历史', link: 'Release-history_timecho' }, ], }, { @@ -39,6 +40,8 @@ export const zhSidebar = { children: [ { text: '常见概念', link: 'Cluster-Concept_timecho' }, { text: '数据类型', link: 'Data-Type' }, + { text: '时序数据模型', link: 'Navigating_Time_Series_Data' }, + { text: '建模方案设计', link: 'Data-Model-and-Terminology' }, ], }, { @@ -67,16 +70,8 @@ export const zhSidebar = { prefix: 'Basic-Concept/', // children: 'structure', children: [ - { text: '示例数据', link: 'Sample-Data' }, - { - text: '数据建模', - collapsible: true, - children: [ - { text: '时序数据模型', link: 'Navigating_Time_Series_Data' }, - { text: '建模方案设计', link: 'Data-Model-and-Terminology' }, - { text: '数据库&表管理', link: 'Database&Table-Management' }, - ], - }, + { text: '数据库管理', link: 'Database-Management' }, + { text: '表管理', link: 'Table-Management' }, { text: '写入&更新', link: 'Write-Updata-Data' }, { text: '数据查询', link: 'Query-Data' }, { @@ -124,6 +119,8 @@ export const zhSidebar = { prefix: 'SQL-Manual/', // children: 'structure', children: [ + { text: '标识符', link: 'Identifier' }, + { text: '保留字&关键字', link: 'Keywords' }, { text: '查询语句', collapsible: true, @@ -139,8 +136,6 @@ export const zhSidebar = { { text: 'LIMIT&OFFSET子句', link: 'Limit-Offset-Clause' }, ], }, - { text: '保留字&关键字', link: 'Keywords' }, - { text: '标识符', link: 'Identifier' }, ], }, { @@ -154,12 +149,14 @@ export const zhSidebar = { ], }, { - text: '参考', + text: '附录', collapsible: true, prefix: 'Reference/', // children: 'structure', children: [ + { text: '示例数据', link: 'Sample-Data' }, { text: '配置参数', link: 'System-Config-Manual' }, + { text: '状态码', link: 'Status-Codes' }, ], }, ], diff --git a/src/.vuepress/sidebar_timecho/V2.0.1/zh-Tree.ts b/src/.vuepress/sidebar_timecho/V2.0.1/zh-Tree.ts index dc0900727..5d861ba64 100644 --- a/src/.vuepress/sidebar_timecho/V2.0.1/zh-Tree.ts +++ b/src/.vuepress/sidebar_timecho/V2.0.1/zh-Tree.ts @@ -17,7 +17,7 @@ */ export const zhSidebar = { - '/zh/UserGuide/V2.0.1/Tree': [ + '/zh/UserGuide/latest/': [ { text: 'IoTDB用户手册', children: [], @@ -201,6 +201,8 @@ export const zhSidebar = { prefix: 'SQL-Manual/', // children: 'structure', children: [ + { text: '标识符', link: 'Syntax-Rule' }, + { text: '关键字', link: 'Keywords' }, { text: 'SQL手册', link: 'SQL-Manual' }, { text: '函数与运算符', @@ -225,7 +227,7 @@ export const zhSidebar = { ], }, { - text: '参考', + text: '附录', collapsible: true, prefix: 'Reference/', // children: 'structure', @@ -239,14 +241,6 @@ export const zhSidebar = { { text: 'DataNode配置参数', link: 'DataNode-Config-Manual_timecho' }, ], }, - { - text: '语法约定', - collapsible: true, - children: [ - { text: '标识符', link: 'Syntax-Rule' }, - { text: '关键字', link: 'Keywords' }, - ], - }, { text: '状态码', link: 'Status-Codes' }, ], }, diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-CSharp-Native-API.md b/src/UserGuide/V1.3.3/API/Programming-CSharp-Native-API.md similarity index 62% rename from src/UserGuide/V2.0.1/Tree/API/Programming-CSharp-Native-API.md rename to src/UserGuide/V1.3.3/API/Programming-CSharp-Native-API.md index 12d431a3a..06f403f42 100644 --- a/src/UserGuide/V2.0.1/Tree/API/Programming-CSharp-Native-API.md +++ b/src/UserGuide/V1.3.3/API/Programming-CSharp-Native-API.md @@ -1,22 +1,19 @@ # C# Native API @@ -35,33 +32,31 @@ Note that the `Apache.IoTDB` package only supports versions greater than `.net f ## Prerequisites - .NET SDK Version >= 5.0 - .NET Framework >= 4.6.1 +- .NET SDK Version >= 5.0 +- .NET Framework >= 4.6.1 ## How to Use the Client (Quick Start) Users can quickly get started by referring to the use cases under the Apache-IoTDB-Client-CSharp-UserCase directory. These use cases serve as a useful resource for getting familiar with the client's functionality and capabilities. -For those who wish to delve deeper into the client's usage and explore more advanced features, the samples directory contains additional code samples. +For those who wish to delve deeper into the client's usage and explore more advanced features, the samples directory contains additional code samples. ## Developer environment requirements for iotdb-client-csharp -``` -.NET SDK Version >= 5.0 -.NET Framework >= 4.6.1 -ApacheThrift >= 0.14.1 -NLog >= 4.7.9 -``` +- .NET SDK Version >= 5.0 +- .NET Framework >= 4.6.1 +- ApacheThrift >= 0.14.1 +- NLog >= 4.7.9 ### OS -* Linux, Macos or other unix-like OS -* Windows+bash(WSL, cygwin, Git Bash) +- Linux, Macos or other unix-like OS +- Windows+bash(WSL, cygwin, Git Bash) ### Command Line Tools -* dotnet CLI -* Thrift +- dotnet CLI +- Thrift ## Basic interface description @@ -79,7 +74,7 @@ var session_pool = new SessionPool(host, port, pool_size); // Open Session await session_pool.Open(false); -// Create TimeSeries +// Create TimeSeries await session_pool.CreateTimeSeries("root.test_group.test_device.ts1", TSDataType.TEXT, TSEncoding.PLAIN, Compressor.UNCOMPRESSED); await session_pool.CreateTimeSeries("root.test_group.test_device.ts2", TSDataType.BOOLEAN, TSEncoding.PLAIN, Compressor.UNCOMPRESSED); await session_pool.CreateTimeSeries("root.test_group.test_device.ts3", TSDataType.INT32, TSEncoding.PLAIN, Compressor.UNCOMPRESSED); @@ -113,7 +108,7 @@ await session_pool.Close(); - Construction: ```csharp -var rowRecord = +var rowRecord = new RowRecord(long timestamps, List values, List measurements); ``` @@ -131,12 +126,10 @@ var rowRecord = - Construction: ```csharp -var tablet = +var tablet = Tablet(string deviceId, List measurements, List> values, List timestamps); ``` - - ## **API** ### **Basic API** @@ -153,43 +146,43 @@ var tablet = ### **Record API** -| api name | parameters | notes | use example | -| ----------------------------------- | ----------------------------- | ----------------------------------- | ------------------------------------------------------------ | -| InsertRecordAsync | string, RowRecord | insert single record | session_pool.InsertRecordAsync("root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE", new RowRecord(1, values, measures)); | -| InsertRecordsAsync | List\, List\ | insert records | session_pool.InsertRecordsAsync(device_id, rowRecords) | -| InsertRecordsOfOneDeviceAsync | string, List\ | insert records of one device | session_pool.InsertRecordsOfOneDeviceAsync(device_id, rowRecords) | -| InsertRecordsOfOneDeviceSortedAsync | string, List\ | insert sorted records of one device | InsertRecordsOfOneDeviceSortedAsync(deviceId, sortedRowRecords); | -| TestInsertRecordAsync | string, RowRecord | test insert record | session_pool.TestInsertRecordAsync("root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE", rowRecord) | -| TestInsertRecordsAsync | List\, List\ | test insert record | session_pool.TestInsertRecordsAsync(device_id, rowRecords) | +| api name | parameters | notes | use example | +| ----------------------------------- | --------------------------------- | ----------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| InsertRecordAsync | string, RowRecord | insert single record | session_pool.InsertRecordAsync("root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE", new RowRecord(1, values, measures)); | +| InsertRecordsAsync | List\, List\ | insert records | session_pool.InsertRecordsAsync(device_id, rowRecords) | +| InsertRecordsOfOneDeviceAsync | string, List\ | insert records of one device | session_pool.InsertRecordsOfOneDeviceAsync(device_id, rowRecords) | +| InsertRecordsOfOneDeviceSortedAsync | string, List\ | insert sorted records of one device | InsertRecordsOfOneDeviceSortedAsync(deviceId, sortedRowRecords); | +| TestInsertRecordAsync | string, RowRecord | test insert record | session_pool.TestInsertRecordAsync("root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE", rowRecord) | +| TestInsertRecordsAsync | List\, List\ | test insert record | session_pool.TestInsertRecordsAsync(device_id, rowRecords) | ### **Tablet API** -| api name | parameters | notes | use example | -| ---------------------- | ------------ | -------------------- | -------------------------------------------- | -| InsertTabletAsync | Tablet | insert single tablet | session_pool.InsertTabletAsync(tablet) | +| api name | parameters | notes | use example | +| ---------------------- | -------------- | -------------------- | -------------------------------------------- | +| InsertTabletAsync | Tablet | insert single tablet | session_pool.InsertTabletAsync(tablet) | | InsertTabletsAsync | List\ | insert tablets | session_pool.InsertTabletsAsync(tablets) | -| TestInsertTabletAsync | Tablet | test insert tablet | session_pool.TestInsertTabletAsync(tablet) | +| TestInsertTabletAsync | Tablet | test insert tablet | session_pool.TestInsertTabletAsync(tablet) | | TestInsertTabletsAsync | List\ | test insert tablets | session_pool.TestInsertTabletsAsync(tablets) | ### **SQL API** -| api name | parameters | notes | use example | -| ----------------------------- | ---------- | ------------------------------ | ------------------------------------------------------------ | -| ExecuteQueryStatementAsync | string | execute sql query statement | session_pool.ExecuteQueryStatementAsync("select * from root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE where time<15"); | +| api name | parameters | notes | use example | +| ----------------------------- | ---------- | ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ExecuteQueryStatementAsync | string | execute sql query statement | session_pool.ExecuteQueryStatementAsync("select \* from root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE where time<15"); | | ExecuteNonQueryStatementAsync | string | execute sql nonquery statement | session_pool.ExecuteNonQueryStatementAsync( "create timeseries root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE.status with datatype=BOOLEAN,encoding=PLAIN") | ### **Scheam API** -| api name | parameters | notes | use example | -| -------------------------- | ------------------------------------------------------------ | --------------------------- | ------------------------------------------------------------ | -| SetStorageGroup | string | set storage group | session_pool.SetStorageGroup("root.97209_TEST_CSHARP_CLIENT_GROUP_01") | -| CreateTimeSeries | string, TSDataType, TSEncoding, Compressor | create time series | session_pool.InsertTabletsAsync(tablets) | -| DeleteStorageGroupAsync | string | delete single storage group | session_pool.DeleteStorageGroupAsync("root.97209_TEST_CSHARP_CLIENT_GROUP_01") | -| DeleteStorageGroupsAsync | List\ | delete storage group | session_pool.DeleteStorageGroupAsync("root.97209_TEST_CSHARP_CLIENT_GROUP") | +| api name | parameters | notes | use example | +| -------------------------- | ---------------------------------------------------------------------------- | --------------------------- | -------------------------------------------------------------------------------------------------- | +| SetStorageGroup | string | set storage group | session_pool.SetStorageGroup("root.97209_TEST_CSHARP_CLIENT_GROUP_01") | +| CreateTimeSeries | string, TSDataType, TSEncoding, Compressor | create time series | session_pool.InsertTabletsAsync(tablets) | +| DeleteStorageGroupAsync | string | delete single storage group | session_pool.DeleteStorageGroupAsync("root.97209_TEST_CSHARP_CLIENT_GROUP_01") | +| DeleteStorageGroupsAsync | List\ | delete storage group | session_pool.DeleteStorageGroupAsync("root.97209_TEST_CSHARP_CLIENT_GROUP") | | CreateMultiTimeSeriesAsync | List\, List\ , List\ , List\ | create multi time series | session_pool.CreateMultiTimeSeriesAsync(ts_path_lst, data_type_lst, encoding_lst, compressor_lst); | -| DeleteTimeSeriesAsync | List\ | delete time series | | -| DeleteTimeSeriesAsync | string | delete time series | | -| DeleteDataAsync | List\, long, long | delete data | session_pool.DeleteDataAsync(ts_path_lst, 2, 3) | +| DeleteTimeSeriesAsync | List\ | delete time series | | +| DeleteTimeSeriesAsync | string | delete time series | | +| DeleteDataAsync | List\, long, long | delete data | session_pool.DeleteDataAsync(ts_path_lst, 2, 3) | ### **Other API** @@ -197,8 +190,6 @@ var tablet = | -------------------------- | ---------- | --------------------------- | ---------------------------------------------------- | | CheckTimeSeriesExistsAsync | string | check if time series exists | session_pool.CheckTimeSeriesExistsAsync(time series) | - - [e.g.](https://github.com/apache/iotdb-client-csharp/tree/main/samples/Apache.IoTDB.Samples) ## SessionPool @@ -210,4 +201,3 @@ We use the `ConcurrentQueue` data structure to encapsulate a client queue to mai When a request occurs, it will try to find an idle client connection from the Connection pool. If there is no idle connection, the program will need to wait until there is an idle connection When a connection is used up, it will automatically return to the pool and wait for the next time it is used up - diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-Cpp-Native-API.md b/src/UserGuide/V1.3.3/API/Programming-Cpp-Native-API.md similarity index 70% rename from src/UserGuide/V2.0.1/Tree/API/Programming-Cpp-Native-API.md rename to src/UserGuide/V1.3.3/API/Programming-Cpp-Native-API.md index b462983d2..0d2267ff1 100644 --- a/src/UserGuide/V2.0.1/Tree/API/Programming-Cpp-Native-API.md +++ b/src/UserGuide/V1.3.3/API/Programming-Cpp-Native-API.md @@ -1,22 +1,19 @@ # C++ Native API @@ -35,68 +32,75 @@ ### Install Required Dependencies - **MAC** - 1. Install Bison: - Use the following brew command to install the Bison version: - ```shell - brew install bison - ``` + 1. Install Bison: + + Use the following brew command to install the Bison version: - 2. Install Boost: Make sure to install the latest version of Boost. + ```shell + brew install bison + ``` - ```shell - brew install boost - ``` + 2. Install Boost: Make sure to install the latest version of Boost. - 3. Check OpenSSL: Make sure the OpenSSL library is installed. The default OpenSSL header file path is "/usr/local/opt/openssl/include". + ```shell + brew install boost + ``` - If you encounter errors related to OpenSSL not being found during compilation, try adding `-Dopenssl.include.dir=""`. + 3. Check OpenSSL: Make sure the OpenSSL library is installed. The default OpenSSL header file path is "/usr/local/opt/openssl/include". + + If you encounter errors related to OpenSSL not being found during compilation, try adding `-Dopenssl.include.dir=""`. - **Ubuntu 16.04+ or Other Debian-based Systems** Use the following commands to install dependencies: - ```shell - sudo apt-get update - sudo apt-get install gcc g++ bison flex libboost-all-dev libssl-dev - ``` + ```shell + sudo apt-get update + sudo apt-get install gcc g++ bison flex libboost-all-dev libssl-dev + ``` - **CentOS 7.7+/Fedora/Rocky Linux or Other Red Hat-based Systems** Use the yum command to install dependencies: - ```shell - sudo yum update - sudo yum install gcc gcc-c++ boost-devel bison flex openssl-devel - ``` + ```shell + sudo yum update + sudo yum install gcc gcc-c++ boost-devel bison flex openssl-devel + ``` - **Windows** - 1. Set Up the Build Environment - - Install MS Visual Studio (version 2019+ recommended): Make sure to select Visual Studio C/C++ IDE and compiler (supporting CMake, Clang, MinGW) during installation. - - Download and install [CMake](https://cmake.org/download/). + 1. Set Up the Build Environment + + - Install MS Visual Studio (version 2019+ recommended): Make sure to select Visual Studio C/C++ IDE and compiler (supporting CMake, Clang, MinGW) during installation. + - Download and install [CMake](https://cmake.org/download/). + + 2. Download and Install Flex, Bison - 2. Download and Install Flex, Bison - - Download [Win_Flex_Bison](https://sourceforge.net/projects/winflexbison/). - - After downloading, rename the executables to flex.exe and bison.exe to ensure they can be found during compilation, and add the directory of these executables to the PATH environment variable. + - Download [Win_Flex_Bison](https://sourceforge.net/projects/winflexbison/). + - After downloading, rename the executables to flex.exe and bison.exe to ensure they can be found during compilation, and add the directory of these executables to the PATH environment variable. - 3. Install Boost Library - - Download [Boost](https://www.boost.org/users/download/). - - Compile Boost locally: Run `bootstrap.bat` and `b2.exe` in sequence. - - Add the Boost installation directory to the PATH environment variable, e.g., `C:\Program Files (x86)\boost_1_78_0`. + 3. Install Boost Library - 4. Install OpenSSL - - Download and install [OpenSSL](http://slproweb.com/products/Win32OpenSSL.html). - - Add the include directory under the installation directory to the PATH environment variable. + - Download [Boost](https://www.boost.org/users/download/). + - Compile Boost locally: Run `bootstrap.bat` and `b2.exe` in sequence. + - Add the Boost installation directory to the PATH environment variable, e.g., `C:\Program Files (x86)\boost_1_78_0`. + + 4. Install OpenSSL + - Download and install [OpenSSL](http://slproweb.com/products/Win32OpenSSL.html). + - Add the include directory under the installation directory to the PATH environment variable. ### Compilation Clone the source code from git: + ```shell git clone https://github.com/apache/iotdb.git ``` The default main branch is the master branch. If you want to use a specific release version, switch to that branch (e.g., version 1.3.2): + ```shell git checkout rc/1.3.2 ``` @@ -104,30 +108,36 @@ git checkout rc/1.3.2 Run Maven to compile in the IoTDB root directory: - Mac or Linux with glibc version >= 2.32 - ```shell - ./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp - ``` + + ```shell + ./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp + ``` - Linux with glibc version >= 2.31 - ```shell - ./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Diotdb-tools-thrift.version=0.14.1.1-old-glibc-SNAPSHOT - ``` + + ```shell + ./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Diotdb-tools-thrift.version=0.14.1.1-old-glibc-SNAPSHOT + ``` - Linux with glibc version >= 2.17 - ```shell - ./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Diotdb-tools-thrift.version=0.14.1.1-glibc223-SNAPSHOT - ``` + + ```shell + ./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Diotdb-tools-thrift.version=0.14.1.1-glibc223-SNAPSHOT + ``` - Windows using Visual Studio 2022 - ```batch - .\mvnw.cmd clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp - ``` + + ```batch + .\mvnw.cmd clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp + ``` - Windows using Visual Studio 2019 - ```batch - .\mvnw.cmd clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Dcmake.generator="Visual Studio 16 2019" -Diotdb-tools-thrift.version=0.14.1.1-msvc142-SNAPSHOT - ``` - - If you haven't added the Boost library path to the PATH environment variable, you need to add the relevant parameters to the compile command, e.g., `-DboostIncludeDir="C:\Program Files (x86)\boost_1_78_0" -DboostLibraryDir="C:\Program Files (x86)\boost_1_78_0\stage\lib"`. + + ```batch + .\mvnw.cmd clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Dcmake.generator="Visual Studio 16 2019" -Diotdb-tools-thrift.version=0.14.1.1-msvc142-SNAPSHOT + ``` + + - If you haven't added the Boost library path to the PATH environment variable, you need to add the relevant parameters to the compile command, e.g., `-DboostIncludeDir="C:\Program Files (x86)\boost_1_78_0" -DboostLibraryDir="C:\Program Files (x86)\boost_1_78_0\stage\lib"`. After successful compilation, the packaged library files will be located in `iotdb-client/client-cpp/target`, and you can find the compiled example program under `example/client-cpp-example/target`. @@ -136,27 +146,29 @@ After successful compilation, the packaged library files will be located in `iot Q: What are the requirements for the environment on Linux? A: + - The known minimum version requirement for glibc (x86_64 version) is 2.17, and the minimum version for GCC is 5.5. - The known minimum version requirement for glibc (ARM version) is 2.31, and the minimum version for GCC is 10.2. - If the above requirements are not met, you can try compiling Thrift locally: - - Download the code from https://github.com/apache/iotdb-bin-resources/tree/iotdb-tools-thrift-v0.14.1.0/iotdb-tools-thrift. - - Run `./mvnw clean install`. - - Go back to the IoTDB code directory and run `./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp`. + - Download the code from . + - Run `./mvnw clean install`. + - Go back to the IoTDB code directory and run `./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp`. Q: How to resolve the `undefined reference to '_libc_single_thread'` error during Linux compilation? A: + - This issue is caused by the precompiled Thrift dependencies requiring a higher version of glibc. - You can try adding `-Diotdb-tools-thrift.version=0.14.1.1-glibc223-SNAPSHOT` or `-Diotdb-tools-thrift.version=0.14.1.1-old-glibc-SNAPSHOT` to the Maven compile command. Q: What if I need to compile using Visual Studio 2017 or earlier on Windows? A: -- You can try compiling Thrift locally before compiling the client: - - Download the code from https://github.com/apache/iotdb-bin-resources/tree/iotdb-tools-thrift-v0.14.1.0/iotdb-tools-thrift. - - Run `.\mvnw.cmd clean install`. - - Go back to the IoTDB code directory and run `.\mvnw.cmd clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Dcmake.generator="Visual Studio 15 2017"`. +- You can try compiling Thrift locally before compiling the client: + - Download the code from . + - Run `.\mvnw.cmd clean install`. + - Go back to the IoTDB code directory and run `.\mvnw.cmd clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Dcmake.generator="Visual Studio 15 2017"`. ## Native APIs @@ -165,19 +177,23 @@ Here we show the commonly used interfaces and their parameters in the Native API ### Initialization - Open a Session + ```cpp -void open(); +void open(); ``` - Open a session, with a parameter to specify whether to enable RPC compression + ```cpp -void open(bool enableRPCCompression); +void open(bool enableRPCCompression); ``` + Notice: this RPC compression status of client must comply with that of IoTDB server - Close a Session + ```cpp -void close(); +void close(); ``` ### Data Definition Interface (DDL) @@ -185,11 +201,13 @@ void close(); #### Database Management - CREATE DATABASE + ```cpp void setStorageGroup(const std::string &storageGroupId); ``` - Delete one or several databases + ```cpp void deleteStorageGroup(const std::string &storageGroup); void deleteStorageGroups(const std::vector &storageGroups); @@ -198,10 +216,11 @@ void deleteStorageGroups(const std::vector &storageGroups); #### Timeseries Management - Create one or multiple timeseries + ```cpp void createTimeseries(const std::string &path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor); - + void createMultiTimeseries(const std::vector &paths, const std::vector &dataTypes, const std::vector &encodings, @@ -213,6 +232,7 @@ void createMultiTimeseries(const std::vector &paths, ``` - Create aligned timeseries + ```cpp void createAlignedTimeseries(const std::string &deviceId, const std::vector &measurements, @@ -222,12 +242,14 @@ void createAlignedTimeseries(const std::string &deviceId, ``` - Delete one or several timeseries + ```cpp void deleteTimeseries(const std::string &path); void deleteTimeseries(const std::vector &paths); ``` - Check whether the specific timeseries exists. + ```cpp bool checkTimeseriesExists(const std::string &path); ``` @@ -235,21 +257,25 @@ bool checkTimeseriesExists(const std::string &path); #### Schema Template - Create a schema template + ```cpp void createSchemaTemplate(const Template &templ); ``` - Set the schema template named `templateName` at path `prefixPath`. + ```cpp void setSchemaTemplate(const std::string &template_name, const std::string &prefix_path); ``` - Unset the schema template + ```cpp void unsetSchemaTemplate(const std::string &prefix_path, const std::string &template_name); ``` - After measurement template created, you can edit the template with belowed APIs. + ```cpp // Add aligned measurements to a template void addAlignedMeasurementsInTemplate(const std::string &template_name, @@ -284,6 +310,7 @@ void deleteNodeInTemplate(const std::string &template_name, const std::string &p ``` - You can query measurement templates with these APIS: + ```cpp // Return the amount of measurements inside a template int countMeasurementsInTemplate(const std::string &template_name); @@ -301,7 +328,6 @@ std::vector showMeasurementsInTemplate(const std::string &template_ std::vector showMeasurementsInTemplate(const std::string &template_name, const std::string &pattern); ``` - ### Data Manipulation Interface (DML) #### Insert @@ -309,24 +335,28 @@ std::vector showMeasurementsInTemplate(const std::string &template_ > It is recommended to use insertTablet to help improve write efficiency. - Insert a Tablet,which is multiple rows of a device, each row has the same measurements - - Better Write Performance - - Support null values: fill the null value with any value, and then mark the null value via BitMap + - Better Write Performance + - Support null values: fill the null value with any value, and then mark the null value via BitMap + ```cpp void insertTablet(Tablet &tablet); ``` - Insert multiple Tablets + ```cpp void insertTablets(std::unordered_map &tablets); ``` - Insert a Record, which contains multiple measurement value of a device at a timestamp + ```cpp void insertRecord(const std::string &deviceId, int64_t time, const std::vector &measurements, const std::vector &types, const std::vector &values); ``` - Insert multiple Records + ```cpp void insertRecords(const std::vector &deviceIds, const std::vector ×, @@ -336,6 +366,7 @@ void insertRecords(const std::vector &deviceIds, ``` - Insert multiple Records that belong to the same device. With type info the server has no need to do type inference, which leads a better performance + ```cpp void insertRecordsOfOneDevice(const std::string &deviceId, std::vector ×, @@ -378,6 +409,7 @@ The Insert of aligned timeseries uses interfaces like `insertAlignedXXX`, and ot #### Delete - Delete data in a time range of one or several timeseries + ```cpp void deleteData(const std::string &path, int64_t endTime); void deleteData(const std::vector &paths, int64_t endTime); @@ -387,16 +419,17 @@ void deleteData(const std::vector &paths, int64_t startTime, int64_ ### IoTDB-SQL Interface - Execute query statement + ```cpp unique_ptr executeQueryStatement(const std::string &sql); ``` - Execute non query statement + ```cpp void executeNonQueryStatement(const std::string &sql); ``` - ## Examples The sample code of using these interfaces is in: @@ -412,17 +445,18 @@ If the compilation finishes successfully, the example project will be placed und If errors occur when compiling thrift source code, try to downgrade your xcode-commandline from 12 to 11.5 -see https://stackoverflow.com/questions/63592445/ld-unsupported-tapi-file-type-tapi-tbd-in-yaml-file/65518087#65518087 - +see ### on Windows When Building Thrift and downloading packages via "wget", a possible annoying issue may occur with error message looks like: + ```shell Failed to delete cached file C:\Users\Administrator\.m2\repository\.cache\download-maven-plugin\index.ser ``` + Possible fixes: -- Try to delete the ".m2\repository\\.cache\" directory and try again. -- Add "\true\" configuration to the download-maven-plugin maven phase that complains this error. +- Try to delete the `.m2\repository\.cache`" directory and try again. +- Add `true` configuration to the download-maven-plugin maven phase that complains this error. diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-Data-Subscription.md b/src/UserGuide/V1.3.3/API/Programming-Data-Subscription.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/API/Programming-Data-Subscription.md rename to src/UserGuide/V1.3.3/API/Programming-Data-Subscription.md diff --git a/src/UserGuide/V1.3.3/API/Programming-Go-Native-API.md b/src/UserGuide/V1.3.3/API/Programming-Go-Native-API.md new file mode 100644 index 000000000..ca8ce541a --- /dev/null +++ b/src/UserGuide/V1.3.3/API/Programming-Go-Native-API.md @@ -0,0 +1,60 @@ + + +# Go Native API + +The Git repository for the Go Native API client is located [here](https://github.com/apache/iotdb-client-go/) + +## Dependencies + +- golang >= 1.13 +- make >= 3.0 +- curl >= 7.1.1 +- thrift 0.15.0 +- Linux、Macos or other unix-like systems +- Windows+bash (WSL、cygwin、Git Bash) + +## Installation + +- go mod + + ```sh + export GO111MODULE=on + export GOPROXY=https://goproxy.io + + mkdir session_example && cd session_example + + curl -o session_example.go -L https://github.com/apache/iotdb-client-go/raw/main/example/session_example.go + + go mod init session_example + go run session_example.go + ``` + +- GOPATH + + ```sh + # get thrift 0.15.0 + go get github.com/apache/thrift + cd $GOPATH/src/github.com/apache/thrift + git checkout 0.15.0 + + mkdir -p $GOPATH/src/iotdb-client-go-example/session_example + cd $GOPATH/src/iotdb-client-go-example/session_example + curl -o session_example.go -L https://github.com/apache/iotdb-client-go/raw/main/example/session_example.go + go run session_example.go + ``` diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-JDBC.md b/src/UserGuide/V1.3.3/API/Programming-JDBC.md similarity index 90% rename from src/UserGuide/V2.0.1/Tree/API/Programming-JDBC.md rename to src/UserGuide/V1.3.3/API/Programming-JDBC.md index 0251e469c..fa9fc3cc0 100644 --- a/src/UserGuide/V2.0.1/Tree/API/Programming-JDBC.md +++ b/src/UserGuide/V1.3.3/API/Programming-JDBC.md @@ -1,34 +1,35 @@ # JDBC (Not Recommend) -*NOTICE: CURRENTLY, JDBC IS USED FOR CONNECTING SOME THIRD-PART TOOLS. -IT CAN NOT PROVIDE HIGH THROUGHPUT FOR WRITE OPERATIONS. -PLEASE USE [Java Native API](./Programming-Java-Native-API.md) INSTEAD* +::: warning + +NOTICE: CURRENTLY, JDBC IS USED FOR CONNECTING SOME THIRD-PART TOOLS. +IT CAN NOT PROVIDE HIGH THROUGHPUT FOR WRITE OPERATIONS. +PLEASE USE [Java Native API](./Programming-Java-Native-API.md) INSTEAD + +::: ## Dependencies -* JDK >= 1.8+ -* Maven >= 3.9+ +- JDK >= 1.8+ +- Maven >= 3.9+ ## Installation @@ -110,7 +111,7 @@ public class JDBCExample { //Count timeseries group by each node at the given level statement.execute("COUNT TIMESERIES root GROUP BY LEVEL=3"); outputResult(statement.getResultSet()); - + //Execute insert statements in batch statement.addBatch("INSERT INTO root.demo(timestamp,s0) VALUES(1,1);"); @@ -206,27 +207,37 @@ public class JDBCExample { ``` The parameter `version` can be used in the url: -````java + +```java String url = "jdbc:iotdb://127.0.0.1:6667?version=V_1_0"; -```` -The parameter `version` represents the SQL semantic version used by the client, which is used in order to be compatible with the SQL semantics of `0.12` when upgrading to `0.13`. +``` + +The parameter `version` represents the SQL semantic version used by the client, which is used in order to be compatible with the SQL semantics of `0.12` when upgrading to `0.13`. The possible values are: `V_0_12`, `V_0_13`, `V_1_0`. In addition, IoTDB provides additional interfaces in JDBC for users to read and write the database using different character sets (e.g., GB18030) in the connection. The default character set for IoTDB is UTF-8. When users want to use a character set other than UTF-8, they need to specify the charset property in the JDBC connection. For example: + 1. Create a connection using the GB18030 charset: + ```java DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667?charset=GB18030", "root", "root"); ``` + 2. When executing SQL with the `IoTDBStatement` interface, the SQL can be provided as a `byte[]` array, and it will be parsed into a string according to the specified charset. + ```java public boolean execute(byte[] sql) throws SQLException; ``` + 3. When outputting query results, the `getBytes` method of `ResultSet` can be used to get `byte[]`, which will be encoded using the charset specified in the connection. + ```java System.out.print(resultSet.getString(i) + " (" + new String(resultSet.getBytes(i), charset) + ")"); ``` + Here is a complete example: + ```java public class JDBCCharsetExample { @@ -293,4 +304,4 @@ public class JDBCCharsetExample { } } } -``` \ No newline at end of file +``` diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-Java-Native-API.md b/src/UserGuide/V1.3.3/API/Programming-Java-Native-API.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/API/Programming-Java-Native-API.md rename to src/UserGuide/V1.3.3/API/Programming-Java-Native-API.md diff --git a/src/UserGuide/V1.3.3/API/Programming-Kafka.md b/src/UserGuide/V1.3.3/API/Programming-Kafka.md new file mode 100644 index 000000000..22ad13100 --- /dev/null +++ b/src/UserGuide/V1.3.3/API/Programming-Kafka.md @@ -0,0 +1,114 @@ + + +# Kafka + +[Apache Kafka](https://kafka.apache.org/) is an open-source distributed event streaming platform used by thousands of companies for high-performance data pipelines, streaming analytics, data integration, and mission-critical applications. + +## Coding Example + +### kafka Producer Producing Data Java Code Example + +```java +Properties props = new Properties(); +props.put("bootstrap.servers", "127.0.0.1:9092"); +props.put("key.serializer", StringSerializer.class); +props.put("value.serializer", StringSerializer.class); +KafkaProducer producer = new KafkaProducer<>(props); +producer.send( + new ProducerRecord<>( + "Kafka-Test", "key", "root.kafka," + System.currentTimeMillis() + ",value,INT32,100")); +producer.close(); +``` + +### kafka Consumer Receiving Data Java Code Example + +```java +Properties props = new Properties(); +props.put("bootstrap.servers", "127.0.0.1:9092"); +props.put("key.deserializer", StringDeserializer.class); +props.put("value.deserializer", StringDeserializer.class); +props.put("auto.offset.reset", "earliest"); +props.put("group.id", "Kafka-Test"); +KafkaConsumer kafkaConsumer = new KafkaConsumer<>(props); +kafkaConsumer.subscribe(Collections.singleton("Kafka-Test")); +ConsumerRecords records = kafkaConsumer.poll(Duration.ofSeconds(1)); +``` + +### Example of Java Code Stored in IoTDB Server + +```java +SessionPool pool = + new SessionPool.Builder() + .host("127.0.0.1") + .port(6667) + .user("root") + .password("root") + .maxSize(3) + .build(); +List datas = new ArrayList<>(records.count()); +for (ConsumerRecord record : records) { + datas.add(record.value()); +} +int size = datas.size(); +List deviceIds = new ArrayList<>(size); +List times = new ArrayList<>(size); +List> measurementsList = new ArrayList<>(size); +List> typesList = new ArrayList<>(size); +List> valuesList = new ArrayList<>(size); +for (String data : datas) { + String[] dataArray = data.split(","); + String device = dataArray[0]; + long time = Long.parseLong(dataArray[1]); + List measurements = Arrays.asList(dataArray[2].split(":")); + List types = new ArrayList<>(); + for (String type : dataArray[3].split(":")) { + types.add(TSDataType.valueOf(type)); + } + List values = new ArrayList<>(); + String[] valuesStr = dataArray[4].split(":"); + for (int i = 0; i < valuesStr.length; i++) { + switch (types.get(i)) { + case INT64: + values.add(Long.parseLong(valuesStr[i])); + break; + case DOUBLE: + values.add(Double.parseDouble(valuesStr[i])); + break; + case INT32: + values.add(Integer.parseInt(valuesStr[i])); + break; + case TEXT: + values.add(valuesStr[i]); + break; + case FLOAT: + values.add(Float.parseFloat(valuesStr[i])); + break; + case BOOLEAN: + values.add(Boolean.parseBoolean(valuesStr[i])); + break; + } + } + deviceIds.add(device); + times.add(time); + measurementsList.add(measurements); + typesList.add(types); + valuesList.add(values); +} +pool.insertRecords(deviceIds, times, measurementsList, typesList, valuesList); +``` diff --git a/src/UserGuide/V1.3.3/API/Programming-MQTT.md b/src/UserGuide/V1.3.3/API/Programming-MQTT.md new file mode 100644 index 000000000..d33270105 --- /dev/null +++ b/src/UserGuide/V1.3.3/API/Programming-MQTT.md @@ -0,0 +1,189 @@ + + +# MQTT Protocol + +[MQTT](http://mqtt.org/) is a machine-to-machine (M2M)/"Internet of Things" connectivity protocol. +It was designed as an extremely lightweight publish/subscribe messaging transport. +It is useful for connections with remote locations where a small code footprint is required and/or network bandwidth is at a premium. + +IoTDB supports the MQTT v3.1(an OASIS Standard) protocol. +IoTDB server includes a built-in MQTT service that allows remote devices send messages into IoTDB server directly. + +![](https://alioss.timecho.com/docs/img/github/78357432-0c71cf80-75e4-11ea-98aa-c43a54d469ce.png) + +## Built-in MQTT Service + +The Built-in MQTT Service provide the ability of direct connection to IoTDB through MQTT. It listen the publish messages from MQTT clients +and then write the data into storage immediately. +The MQTT topic corresponds to IoTDB timeseries. +The messages payload can be format to events by `PayloadFormatter` which loaded by java SPI, and the default implementation is `JSONPayloadFormatter`. +The default `json` formatter support two json format and its json array. The following is an MQTT message payload example: + +```json +{ + "device": "root.sg.d1", + "timestamp": 1586076045524, + "measurements": ["s1", "s2"], + "values": [0.530635, 0.530635] +} +``` + +or + +```json +{ + "device": "root.sg.d1", + "timestamps": [1586076045524, 1586076065526], + "measurements": ["s1", "s2"], + "values": [ + [0.530635, 0.530635], + [0.530655, 0.530695] + ] +} +``` + +or json array of the above two. + + + +## MQTT Configurations + +The IoTDB MQTT service load configurations from `${IOTDB_HOME}/${IOTDB_CONF}/iotdb-system.properties` by default. + +Configurations are as follows: + +| NAME | DESCRIPTION | DEFAULT | +| ---------------------- | :-------------------------------------------------: | :-------: | +| enable_mqtt_service | whether to enable the mqtt service | false | +| mqtt_host | the mqtt service binding host | 127.0.0.1 | +| mqtt_port | the mqtt service binding port | 1883 | +| mqtt_handler_pool_size | the handler pool size for handing the mqtt messages | 1 | +| mqtt_payload_formatter | the mqtt message payload formatter | json | +| mqtt_max_message_size | the max mqtt message size in byte | 1048576 | + +## Coding Examples + +The following is an example which a mqtt client send messages to IoTDB server. + +```java +MQTT mqtt = new MQTT(); +mqtt.setHost("127.0.0.1", 1883); +mqtt.setUserName("root"); +mqtt.setPassword("root"); + +BlockingConnection connection = mqtt.blockingConnection(); +connection.connect(); + +Random random = new Random(); +for (int i = 0; i < 10; i++) { + String payload = String.format("{\n" + + "\"device\":\"root.sg.d1\",\n" + + "\"timestamp\":%d,\n" + + "\"measurements\":[\"s1\"],\n" + + "\"values\":[%f]\n" + + "}", System.currentTimeMillis(), random.nextDouble()); + + connection.publish("root.sg.d1.s1", payload.getBytes(), QoS.AT_LEAST_ONCE, false); +} + +connection.disconnect(); + +``` + +## Customize your MQTT Message Format + +If you do not like the above Json format, you can customize your MQTT Message format by just writing several lines +of codes. An example can be found in `example/mqtt-customize` project. + +Steps: + +1. Create a java project, and add dependency: + + ```xml + + org.apache.iotdb + iotdb-server + 1.1.0-SNAPSHOT + + ``` + +2. Define your implementation which implements `org.apache.iotdb.db.protocol.mqtt.PayloadFormatter` + e.g., + + ```java + package org.apache.iotdb.mqtt.server; + + import io.netty.buffer.ByteBuf; + import org.apache.iotdb.db.protocol.mqtt.Message; + import org.apache.iotdb.db.protocol.mqtt.PayloadFormatter; + + import java.nio.charset.StandardCharsets; + import java.util.ArrayList; + import java.util.Arrays; + import java.util.List; + + public class CustomizedJsonPayloadFormatter implements PayloadFormatter { + + @Override + public List format(ByteBuf payload) { + // Suppose the payload is a json format + if (payload == null) { + return null; + } + + String json = payload.toString(StandardCharsets.UTF_8); + // parse data from the json and generate Messages and put them into List ret + List ret = new ArrayList<>(); + // this is just an example, so we just generate some Messages directly + for (int i = 0; i < 2; i++) { + long ts = i; + Message message = new Message(); + message.setDevice("d" + i); + message.setTimestamp(ts); + message.setMeasurements(Arrays.asList("s1", "s2")); + message.setValues(Arrays.asList("4.0" + i, "5.0" + i)); + ret.add(message); + } + return ret; + } + + @Override + public String getName() { + // set the value of mqtt_payload_formatter in iotdb-system.properties as the following string: + return "CustomizedJson"; + } + } + ``` + +3. modify the file in `src/main/resources/META-INF/services/org.apache.iotdb.db.protocol.mqtt.PayloadFormatter`: + clean the file and put your implementation class name into the file. + In this example, the content is: `org.apache.iotdb.mqtt.server.CustomizedJsonPayloadFormatter` +4. compile your implementation as a jar file: `mvn package -DskipTests` + +Then, in your server: + +1. Create ${IOTDB_HOME}/ext/mqtt/ folder, and put the jar into this folder. +2. Update configuration to enable MQTT service. (`enable_mqtt_service=true` in `conf/iotdb-system.properties`) +3. Set the value of `mqtt_payload_formatter` in `conf/iotdb-system.properties` as the value of getName() in your implementation + , in this example, the value is `CustomizedJson` +4. Launch the IoTDB server. +5. Now IoTDB will use your implementation to parse the MQTT message. + +More: the message format can be anything you want. For example, if it is a binary format, +just use `payload.forEachByte()` or `payload.array` to get bytes content. diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-NodeJS-Native-API.md b/src/UserGuide/V1.3.3/API/Programming-NodeJS-Native-API.md similarity index 62% rename from src/UserGuide/V2.0.1/Tree/API/Programming-NodeJS-Native-API.md rename to src/UserGuide/V1.3.3/API/Programming-NodeJS-Native-API.md index 35c7964cd..e67f1f0d8 100644 --- a/src/UserGuide/V2.0.1/Tree/API/Programming-NodeJS-Native-API.md +++ b/src/UserGuide/V1.3.3/API/Programming-NodeJS-Native-API.md @@ -1,71 +1,72 @@ # Node.js Native API -Apache IoTDB uses Thrift as a cross-language RPC-framework so access to IoTDB can be achieved through the interfaces provided by Thrift. +Apache IoTDB uses Thrift as a cross-language RPC-framework so access to IoTDB can be achieved through the interfaces provided by Thrift. This document will introduce how to generate a native Node.js interface that can be used to access IoTDB. ## Dependents - * JDK >= 1.8 - * Node.js >= 16.0.0 - * Linux、Macos or like unix - * Windows+bash +- JDK >= 1.8 +- Node.js >= 16.0.0 +- Linux、Macos or like unix +- Windows+bash ## Generate the Node.js native interface 1. Find the `pom.xml` file in the root directory of the IoTDB source code folder. 2. Open the `pom.xml` file and find the following content: + ```xml - - generate-thrift-sources-python - generate-sources - - compile - - - py - ${project.build.directory}/generated-sources-python/ - - + + generate-thrift-sources-python + generate-sources + + compile + + + py + ${project.build.directory}/generated-sources-python/ + + ``` + 3. Duplicate this block and change the `id`, `generator` and `outputDirectory` to this: + ```xml - - generate-thrift-sources-nodejs - generate-sources - - compile - - - js:node - ${project.build.directory}/generated-sources-nodejs/ - - + + generate-thrift-sources-nodejs + generate-sources + + compile + + + js:node + ${project.build.directory}/generated-sources-nodejs/ + + ``` + 4. In the root directory of the IoTDB source code folder,run `mvn clean generate-sources`. -This command will automatically delete the files in `iotdb/iotdb-protocol/thrift/target` and `iotdb/iotdb-protocol/thrift-commons/target`, and repopulate the folder with the newly generated files. -The newly generated JavaScript sources will be located in `iotdb/iotdb-protocol/thrift/target/generated-sources-nodejs` in the various modules of the `iotdb-protocol` module. + This command will automatically delete the files in `iotdb/iotdb-protocol/thrift/target` and `iotdb/iotdb-protocol/thrift-commons/target`, and repopulate the folder with the newly generated files. + The newly generated JavaScript sources will be located in `iotdb/iotdb-protocol/thrift/target/generated-sources-nodejs` in the various modules of the `iotdb-protocol` module. ## Using the Node.js native interface @@ -73,7 +74,7 @@ Simply copy the files in `iotdb/iotdb-protocol/thrift/target/generated-sources-n ## rpc interface -``` +```cpp // open a session TSOpenSessionResp openSession(1:TSOpenSessionReq req); @@ -89,7 +90,7 @@ TSStatus executeBatchStatement(1:TSExecuteBatchStatementReq req); // execute query SQL statement TSExecuteStatementResp executeQueryStatement(1:TSExecuteStatementReq req); -// execute insert, delete and update SQL statement +// execute insert, delete and update SQL statement TSExecuteStatementResp executeUpdateStatement(1:TSExecuteStatementReq req); // fetch next query result @@ -98,7 +99,7 @@ TSFetchResultsResp fetchResults(1:TSFetchResultsReq req) // fetch meta data TSFetchMetadataResp fetchMetadata(1:TSFetchMetadataReq req) -// cancel a query +// cancel a query TSStatus cancelOperation(1:TSCancelOperationReq req); // close a query dataset @@ -178,4 +179,4 @@ TSExecuteStatementResp executeRawDataQuery(1:TSRawDataQueryReq req); // request a statement id from server i64 requestStatementId(1:i64 sessionId); -``` \ No newline at end of file +``` diff --git a/src/UserGuide/V1.3.3/API/Programming-ODBC.md b/src/UserGuide/V1.3.3/API/Programming-ODBC.md new file mode 100644 index 000000000..51ac098ba --- /dev/null +++ b/src/UserGuide/V1.3.3/API/Programming-ODBC.md @@ -0,0 +1,155 @@ + + +# ODBC + +With IoTDB JDBC, IoTDB can be accessed using the ODBC-JDBC bridge. + +## Dependencies + +- IoTDB-JDBC's jar-with-dependency package +- ODBC-JDBC bridge (e.g. ZappySys JDBC Bridge) + +## Deployment + +### Preparing JDBC package + +Download the source code of IoTDB, and execute the following command in root directory: + +```shell +mvn clean package -pl iotdb-client/jdbc -am -DskipTests -P get-jar-with-dependencies +``` + +Then, you can see the output `iotdb-jdbc-1.3.2-SNAPSHOT-jar-with-dependencies.jar` under `iotdb-client/jdbc/target` directory. + +### Preparing ODBC-JDBC Bridge + +_Note: Here we only provide one kind of ODBC-JDBC bridge as the instance. Readers can use other ODBC-JDBC bridges to access IoTDB with the IOTDB-JDBC._ + +1. **Download Zappy-Sys ODBC-JDBC Bridge**: + Enter the website, and click "download". + + ![ZappySys_website.jpg](https://alioss.timecho.com/upload/ZappySys_website.jpg) + +2. **Prepare IoTDB**: Set up IoTDB cluster, and write a row of data arbitrarily. + + ```sql + IoTDB > insert into root.ln.wf02.wt02(timestamp,status) values(1,true) + ``` + +3. **Deploy and Test the Bridge**: + + 1. Open ODBC Data Sources(32/64 bit), depending on the bits of Windows. One possible position is `C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Administrative Tools`. + + ![ODBC_ADD_EN.jpg](https://alioss.timecho.com/upload/ODBC_ADD_EN.jpg) + + 2. Click on "add" and select ZappySys JDBC Bridge. + + ![ODBC_CREATE_EN.jpg](https://alioss.timecho.com/upload/ODBC_CREATE_EN.jpg) + + 3. Fill in the following settings: + + | Property | Content | Example | + | ------------------- | --------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | + | Connection String | jdbc:iotdb://\:\/ | jdbc:iotdb://127.0.0.1:6667/ | + | Driver Class | org.apache.iotdb.jdbc.IoTDBDriver | org.apache.iotdb.jdbc.IoTDBDriver | + | JDBC driver file(s) | The path of IoTDB JDBC jar-with-dependencies | C:\Users\13361\Documents\GitHub\iotdb\iotdb-client\jdbc\target\iotdb-jdbc-1.3.2-SNAPSHOT-jar-with-dependencies.jar | + | User name | IoTDB's user name | root | + | User password | IoTDB's password | root | + + ![ODBC_CONNECTION.png](https://alioss.timecho.com/upload/ODBC_CONNECTION.png) + + 4. Click on "Test Connection" button, and a "Test Connection: SUCCESSFUL" should appear. + + ![ODBC_CONFIG_EN.jpg](https://alioss.timecho.com/upload/ODBC_CONFIG_EN.jpg) + + 5. Click the "Preview" button above, and replace the original query text with `select * from root.**`, then click "Preview Data", and the query result should correctly. + + ![ODBC_TEST.jpg](https://alioss.timecho.com/upload/ODBC_TEST.jpg) + +4. **Operate IoTDB's data with ODBC**: After correct deployment, you can use Microsoft's ODBC library to operate IoTDB's data. Here's an example written in C#: + + ```C# + using System.Data.Odbc; + + // Get a connection + var dbConnection = new OdbcConnection("DSN=ZappySys JDBC Bridge"); + dbConnection.Open(); + + // Execute the write commands to prepare data + var dbCommand = dbConnection.CreateCommand(); + dbCommand.CommandText = "insert into root.Keller.Flur.Energieversorgung(time, s1) values(1715670861634, 1)"; + dbCommand.ExecuteNonQuery(); + dbCommand.CommandText = "insert into root.Keller.Flur.Energieversorgung(time, s2) values(1715670861634, true)"; + dbCommand.ExecuteNonQuery(); + dbCommand.CommandText = "insert into root.Keller.Flur.Energieversorgung(time, s3) values(1715670861634, 3.1)"; + dbCommand.ExecuteNonQuery(); + + // Execute the read command + dbCommand.CommandText = "SELECT * FROM root.Keller.Flur.Energieversorgung"; + var dbReader = dbCommand.ExecuteReader(); + + // Write the output header + var fCount = dbReader.FieldCount; + Console.Write(":"); + for(var i = 0; i < fCount; i++) + { + var fName = dbReader.GetName(i); + Console.Write(fName + ":"); + } + Console.WriteLine(); + + // Output the content + while (dbReader.Read()) + { + Console.Write(":"); + for(var i = 0; i < fCount; i++) + { + var fieldType = dbReader.GetFieldType(i); + switch (fieldType.Name) + { + case "DateTime": + var dateTime = dbReader.GetInt64(i); + Console.Write(dateTime + ":"); + break; + case "Double": + if (dbReader.IsDBNull(i)) + { + Console.Write("null:"); + } + else + { + var fValue = dbReader.GetDouble(i); + Console.Write(fValue + ":"); + } + break; + default: + Console.Write(fieldType.Name + ":"); + break; + } + } + Console.WriteLine(); + } + + // Shut down gracefully + dbReader.Close(); + dbCommand.Dispose(); + dbConnection.Close(); + ``` + + This program can write data into IoTDB, and query the data we have just written. diff --git a/src/UserGuide/V1.3.3/API/Programming-OPC-UA_timecho.md b/src/UserGuide/V1.3.3/API/Programming-OPC-UA_timecho.md new file mode 100644 index 000000000..7459c19b7 --- /dev/null +++ b/src/UserGuide/V1.3.3/API/Programming-OPC-UA_timecho.md @@ -0,0 +1,282 @@ + + +# OPC UA Protocol + +## OPC UA + +OPC UA is a technical specification used in the automation field for communication between different devices and systems, enabling cross platform, cross language, and cross network operations, providing a reliable and secure data exchange foundation for the Industrial Internet of Things. IoTDB supports OPC UA protocol, and IoTDB OPC Server supports both Client/Server and Pub/Sub communication modes. + +### OPC UA Client/Server Mode + +- **Client/Server Mode**:In this mode, IoTDB's stream processing engine establishes a connection with the OPC UA Server via an OPC UA Sink. The OPC UA Server maintains data within its Address Space, from which IoTDB can request and retrieve data. Additionally, other OPC UA Clients can access the data on the server. + +::: center + + + +::: + +- Features: + + - OPC UA will organize the device information received from Sink into folders under the Objects folder according to a tree model. + + - Each measurement point is recorded as a variable node and the latest value in the current database is recorded. + +### OPC UA Pub/Sub Mode + +- **Pub/Sub Mode**: In this mode, IoTDB's stream processing engine sends data change events to the OPC UA Server through an OPC UA Sink. These events are published to the server's message queue and managed through Event Nodes. Other OPC UA Clients can subscribe to these Event Nodes to receive notifications upon data changes. + +::: center + + + +::: + +- Features: + + - Each measurement point is wrapped as an Event Node in OPC UA. + + - The relevant fields and their meanings are as follows: + + | Field | Meaning | Type (Milo) | Example | + | :--------- | :--------------------------------- | :------------ | :-------------------- | + | Time | Timestamp | DateTime | 1698907326198 | + | SourceName | Full path of the measurement point | String | root.test.opc.sensor0 | + | SourceNode | Data type of the measurement point | NodeId | Int32 | + | Message | Data | LocalizedText | 3.0 | + + - Events are only sent to clients that are already listening; if a client is not connected, the Event will be ignored. + +## IoTDB OPC Server Startup method + +### Syntax + +The syntax for creating the Sink is as follows: + +```sql +create pipe p1 + with source (...) + with processor (...) + with sink ('sink' = 'opc-ua-sink', + 'sink.opcua.tcp.port' = '12686', + 'sink.opcua.https.port' = '8443', + 'sink.user' = 'root', + 'sink.password' = 'root', + 'sink.opcua.security.dir' = '...' + ) +``` + +### Parameters + +| key | value | value range | required or not | default value | +| :--------------------------------- | :-------------------------------------------------- | :------------------------------------------------------- | :-------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| sink | OPC UA SINK | String: opc-ua-sink | Required | | +| sink.opcua.model | OPC UA model used | String: client-server / pub-sub | Optional | client-server | +| sink.opcua.tcp.port | OPC UA's TCP port | Integer: \[0, 65536] | Optional | 12686 | +| sink.opcua.https.port | OPC UA's HTTPS port | Integer: \[0, 65536] | Optional | 8443 | +| sink.opcua.security.dir | Directory for OPC UA's keys and certificates | String: Path, supports absolute and relative directories | Optional | Opc_security folder/in the conf directory of the DataNode related to iotdb
If there is no conf directory for iotdb (such as launching DataNode in IDEA), it will be the iotdb_opc_Security folder/\in the user's home directory | +| sink.opcua.enable-anonymous-access | Whether OPC UA allows anonymous access | Boolean | Optional | true | +| sink.user | User for OPC UA, specified in the configuration | String | Optional | root | +| sink.password | Password for OPC UA, specified in the configuration | String | Optional | root | + +### 示例 + +```Bash +create pipe p1 + with sink ('sink' = 'opc-ua-sink', + 'sink.user' = 'root', + 'sink.password' = 'root'); +start pipe p1; +``` + +### Usage Limitations + +1. **DataRegion Requirement**: The OPC UA server will only start if there is a DataRegion in IoTDB. For an empty IoTDB, a data entry is necessary for the OPC UA server to become effective. + +2. **Data Availability**: Clients subscribing to the server will not receive data written to IoTDB before their connection. + +3. **Multiple DataNodes may have scattered sending/conflict issues**: + + - For IoTDB clusters with multiple dataRegions and scattered across different DataNode IPs, data will be sent in a dispersed manner on the leaders of the dataRegions. The client needs to listen to the configuration ports of the DataNode IP separately.。 + + - Suggest using this OPC UA server under 1C1D. + +4. **Does not support deleting data and modifying measurement point types:** In Client Server mode, OPC UA cannot delete data or change data type settings. In Pub Sub mode, if data is deleted, information cannot be pushed to the client. + +## IoTDB OPC Server Example + +### Client / Server Mode + +#### Preparation Work + +1. Take UAExpert client as an example, download the UAExpert client: + +2. Install UAExpert and fill in your own certificate information. + +#### Quick Start + +1. Use the following SQL to create and start the OPC UA Sink in client-server mode. For detailed syntax, please refer to: [IoTDB OPC Server Syntax](#syntax) + + ```sql + create pipe p1 with sink ('sink'='opc-ua-sink'); + ``` + +2. Write some data. + + ```sql + insert into root.test.db(time, s2) values(now(), 2) + ``` + + ​The metadata is automatically created and enabled here. + +3. Configure the connection to IoTDB in UAExpert, where the password should be set to the one defined in the sink.password parameter (using the default password "root" as an example): + + ::: center + + + + ::: + + ::: center + + + + ::: + +4. After trusting the server's certificate, you can see the written data in the Objects folder on the left. + + ::: center + + + + ::: + + ::: center + + + + ::: + +5. You can drag the node on the left to the center and display the latest value of that node: + + ::: center + + + + ::: + +### Pub / Sub Mode + +#### Preparation Work + +The code is located in the [opc-ua-sink 文件夹](https://github.com/apache/iotdb/tree/master/example/pipe-opc-ua-sink/src/main/java/org/apache/iotdb/opcua) under the iotdb-example package. + +The code includes: + +- The main class (ClientTest) +- Client certificate-related logic(IoTDBKeyStoreLoaderClient) +- Client configuration and startup logic(ClientExampleRunner) +- The parent class of ClientTest(ClientExample) + +### Quick Start + +The steps are as follows: + +1. Start IoTDB and write some data. + + ```sql + insert into root.a.b(time, c, d) values(now(), 1, 2); + ``` + + ​The metadata is automatically created and enabled here. + +2. Use the following SQL to create and start the OPC UA Sink in Pub-Sub mode. For detailed syntax, please refer to: [IoTDB OPC Server Syntax](#syntax) + + ```sql + create pipe p1 with sink ('sink'='opc-ua-sink', + 'sink.opcua.model'='pub-sub'); + start pipe p1; + ``` + + ​ At this point, you can see that the opc certificate-related directory has been created under the server's conf directory. + + ::: center + + + + ::: + +3. Run the Client connection directly; the Client's certificate will be rejected by the server. + + ::: center + + + + ::: + +4. Go to the server's sink.opcua.security.dir directory, then to the pki's rejected directory, where the Client's certificate should have been generated. + + ::: center + + + + ::: + +5. Move (not copy) the client's certificate into (not into a subdirectory of) the trusted directory's certs folder in the same directory. + + ::: center + + + + ::: + +6. Open the Client connection again; the server's certificate should now be rejected by the Client. + + ::: center + + + + ::: + +7. Go to the client's /client/security directory, then to the pki's rejected directory, and move the server's certificate into (not into a subdirectory of) the trusted directory. + + ::: center + + + + ::: + +8. Open the Client, and now the two-way trust is successful, and the Client can connect to the server. + +9. Write data to the server, and the Client will print out the received data. + + ::: center + + + + ::: + +### Notes + +1. **stand alone and cluster:**It is recommended to use a 1C1D (one coordinator and one data node) single machine version. If there are multiple DataNodes in the cluster, data may be sent in a scattered manner across various DataNodes, and it may not be possible to listen to all the data. + +2. **No Need to Operate Root Directory Certificates:** During the certificate operation process, there is no need to operate the `iotdb-server.pfx` certificate under the IoTDB security root directory and the `example-client.pfx` directory under the client security directory. When the Client and Server connect bidirectionally, they will send the root directory certificate to each other. If it is the first time the other party sees this certificate, it will be placed in the reject dir. If the certificate is in the trusted/certs, then the other party can trust it. + +3. **It is Recommended to Use Java 17+:** + In JVM 8 versions, there may be a key length restriction, resulting in an "Illegal key size" error. For specific versions (such as jdk.1.8u151+), you can add `Security.`_`setProperty`_`("crypto.policy", "unlimited");`; in the create client of ClientExampleRunner to solve this, or you can download the unlimited package `local_policy.jar` and `US_export_policy` to replace the packages in the `JDK/jre/lib/security`. Download link: . diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-Python-Native-API.md b/src/UserGuide/V1.3.3/API/Programming-Python-Native-API.md similarity index 82% rename from src/UserGuide/V2.0.1/Tree/API/Programming-Python-Native-API.md rename to src/UserGuide/V1.3.3/API/Programming-Python-Native-API.md index 446b0cd53..370522c1c 100644 --- a/src/UserGuide/V2.0.1/Tree/API/Programming-Python-Native-API.md +++ b/src/UserGuide/V1.3.3/API/Programming-Python-Native-API.md @@ -1,22 +1,19 @@ # Python Native API @@ -25,15 +22,13 @@ You have to install thrift (>=0.13) before using the package. - - ## How to use (Example) First, download the package: `pip3 install apache-iotdb` -You can get an example of using the package to read and write data at here:[Session Example](https://github.com/apache/iotdb/blob/rc/2.0.1/iotdb-client/client-py/session_example.py) +You can get an example of using the package to read and write data at here: [Session Example](https://github.com/apache/iotdb/blob/rc/1.3.3/iotdb-client/client-py/SessionExample.py) -An example of aligned timeseries: [Aligned Timeseries Session Example](https://github.com/apache/iotdb/blob/rc/2.0.1/iotdb-client/client-py/session_aligned_timeseries_example.py) +An example of aligned timeseries: [Aligned Timeseries Session Example](https://github.com/apache/iotdb/blob/rc/1.3.3/iotdb-client/client-py/SessionAlignedTimeseriesExample.py) (you need to add `import iotdb` in the head of the file) @@ -54,7 +49,7 @@ session.close() ## Initialization -* Initialize a Session +- Initialize a Session ```python session = Session( @@ -68,7 +63,7 @@ session = Session( ) ``` -* Initialize a Session to connect multiple nodes +- Initialize a Session to connect multiple nodes ```python session = Session.init_from_node_urls( @@ -81,7 +76,7 @@ session = Session.init_from_node_urls( ) ``` -* Open a session, with a parameter to specify whether to enable RPC compression +- Open a session, with a parameter to specify whether to enable RPC compression ```python session.open(enable_rpc_compression=False) @@ -89,11 +84,12 @@ session.open(enable_rpc_compression=False) Notice: this RPC compression status of client must comply with that of IoTDB server -* Close a Session +- Close a Session ```python session.close() ``` + ## Managing Session through SessionPool Utilizing SessionPool to manage sessions eliminates the need to worry about session reuse. When the number of session connections reaches the maximum capacity of the pool, requests for acquiring a session will be blocked, and you can set the blocking wait time through parameters. After using a session, it should be returned to the SessionPool using the `putBack` method for proper management. @@ -110,7 +106,9 @@ wait_timeout_in_ms = 3000 # # Create the connection pool session_pool = SessionPool(pool_config, max_pool_size, wait_timeout_in_ms) ``` -### Create a SessionPool using distributed nodes. + +### Create a SessionPool using distributed nodes + ```python pool_config = PoolConfig(node_urls=node_urls=["127.0.0.1:6667", "127.0.0.1:6668", "127.0.0.1:6669"], user_name=username, password=password, fetch_size=1024, @@ -118,6 +116,7 @@ pool_config = PoolConfig(node_urls=node_urls=["127.0.0.1:6667", "127.0.0.1:6668" max_pool_size = 5 wait_timeout_in_ms = 3000 ``` + ### Acquiring a session through SessionPool and manually calling PutBack after use ```python @@ -136,33 +135,34 @@ session_pool.close() ### Database Management -* CREATE DATABASE +- CREATE DATABASE ```python session.set_storage_group(group_name) ``` -* Delete one or several databases +- Delete one or several databases ```python session.delete_storage_group(group_name) session.delete_storage_groups(group_name_lst) ``` + ### Timeseries Management -* Create one or multiple timeseries +- Create one or multiple timeseries ```python session.create_time_series(ts_path, data_type, encoding, compressor, props=None, tags=None, attributes=None, alias=None) - + session.create_multi_time_series( ts_path_lst, data_type_lst, encoding_lst, compressor_lst, props_lst=None, tags_lst=None, attributes_lst=None, alias_lst=None ) ``` -* Create aligned timeseries +- Create aligned timeseries ```python session.create_aligned_time_series( @@ -172,13 +172,13 @@ session.create_aligned_time_series( Attention: Alias of measurements are **not supported** currently. -* Delete one or several timeseries +- Delete one or several timeseries ```python session.delete_time_series(paths_list) ``` -* Check whether the specific timeseries exists +- Check whether the specific timeseries exists ```python session.check_time_series_exists(path) @@ -190,14 +190,13 @@ session.check_time_series_exists(path) It is recommended to use insertTablet to help improve write efficiency. -* Insert a Tablet,which is multiple rows of a device, each row has the same measurements - * **Better Write Performance** - * **Support null values**: fill the null value with any value, and then mark the null value via BitMap (from v0.13) - +- Insert a Tablet,which is multiple rows of a device, each row has the same measurements + - **Better Write Performance** + - **Support null values**: fill the null value with any value, and then mark the null value via BitMap (from v0.13) We have two implementations of Tablet in Python API. -* Normal Tablet +- Normal Tablet ```python values_ = [ @@ -224,12 +223,14 @@ tablet_ = Tablet( ) session.insert_tablet(tablet_) ``` -* Numpy Tablet + +- Numpy Tablet Comparing with Tablet, Numpy Tablet is using [numpy.ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) to record data. With less memory footprint and time cost of serialization, the insert performance will be better. **Notice** + 1. time and numerical value columns in Tablet is ndarray 2. recommended to use the specific dtypes to each ndarray, see the example below (if not, the default dtypes are also ok). @@ -282,19 +283,19 @@ np_tablet_with_none = NumpyTablet( session.insert_tablet(np_tablet_with_none) ``` -* Insert multiple Tablets +- Insert multiple Tablets ```python session.insert_tablets(tablet_lst) ``` -* Insert a Record +- Insert a Record ```python session.insert_record(device_id, timestamp, measurements_, data_types_, values_) ``` -* Insert multiple Records +- Insert multiple Records ```python session.insert_records( @@ -302,10 +303,9 @@ session.insert_records( ) ``` -* Insert multiple Records that belong to the same device. +- Insert multiple Records that belong to the same device. With type info the server has no need to do type inference, which leads a better performance - ```python session.insert_records_of_one_device(device_id, time_list, measurements_list, data_types_list, values_list) ``` @@ -314,7 +314,7 @@ session.insert_records_of_one_device(device_id, time_list, measurements_list, da When the data is of String type, we can use the following interface to perform type inference based on the value of the value itself. For example, if value is "true" , it can be automatically inferred to be a boolean type. If value is "3.2" , it can be automatically inferred as a flout type. Without type information, server has to do type inference, which may cost some time. -* Insert a Record, which contains multiple measurement value of a device at a timestamp +- Insert a Record, which contains multiple measurement value of a device at a timestamp ```python session.insert_str_record(device_id, timestamp, measurements, string_values) @@ -324,36 +324,38 @@ session.insert_str_record(device_id, timestamp, measurements, string_values) The Insert of aligned timeseries uses interfaces like insert_aligned_XXX, and others are similar to the above interfaces: -* insert_aligned_record -* insert_aligned_records -* insert_aligned_records_of_one_device -* insert_aligned_tablet -* insert_aligned_tablets - +- insert_aligned_record +- insert_aligned_records +- insert_aligned_records_of_one_device +- insert_aligned_tablet +- insert_aligned_tablets ## IoTDB-SQL Interface -* Execute query statement +- Execute query statement ```python session.execute_query_statement(sql) ``` -* Execute non query statement +- Execute non query statement ```python session.execute_non_query_statement(sql) ``` -* Execute statement +- Execute statement ```python session.execute_statement(sql) ``` ## Schema Template + ### Create Schema Template + The step for creating a metadata template is as follows + 1. Create the template class 2. Adding MeasurementNode 3. Execute create schema template function @@ -371,70 +373,87 @@ template.add_template(m_node_z) session.create_schema_template(template) ``` + ### Modify Schema Template measurements + Modify measurements in a template, the template must be already created. These are functions that add or delete some measurement nodes. -* add node in template + +- add node in template + ```python session.add_measurements_in_template(template_name, measurements_path, data_types, encodings, compressors, is_aligned) ``` -* delete node in template +- delete node in template + ```python session.delete_node_in_template(template_name, path) ``` ### Set Schema Template + ```python session.set_schema_template(template_name, prefix_path) ``` ### Uset Schema Template + ```python session.unset_schema_template(template_name, prefix_path) ``` ### Show Schema Template -* Show all schema templates + +- Show all schema templates + ```python session.show_all_templates() ``` -* Count all measurements in templates + +- Count all measurements in templates + ```python session.count_measurements_in_template(template_name) ``` -* Judge whether the path is measurement or not in templates, This measurement must be in the template +- Judge whether the path is measurement or not in templates, This measurement must be in the template + ```python session.count_measurements_in_template(template_name, path) ``` -* Judge whether the path is exist or not in templates, This path may not belong to the template +- Judge whether the path is exist or not in templates, This path may not belong to the template + ```python session.is_path_exist_in_template(template_name, path) ``` -* Show nodes under in schema template +- Show nodes under in schema template + ```python session.show_measurements_in_template(template_name) ``` -* Show the path prefix where a schema template is set +- Show the path prefix where a schema template is set + ```python session.show_paths_template_set_on(template_name) ``` -* Show the path prefix where a schema template is used (i.e. the time series has been created) +- Show the path prefix where a schema template is used (i.e. the time series has been created) + ```python session.show_paths_template_using_on(template_name) ``` ### Drop Schema Template + Delete an existing metadata template,dropping an already set template is not supported + ```python session.drop_schema_template("template_python") ``` - ## Pandas Support To easily transform a query result to a [Pandas Dataframe](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) @@ -462,12 +481,12 @@ session.close() df = ... ``` - ## IoTDB Testcontainer -The Test Support is based on the lib `testcontainers` (https://testcontainers-python.readthedocs.io/en/latest/index.html) which you need to install in your project if you want to use the feature. +The Test Support is based on the lib `testcontainers` () which you need to install in your project if you want to use the feature. To start (and stop) an IoTDB Database in a Docker container simply do: + ```python class MyTestCase(unittest.TestCase): @@ -484,13 +503,15 @@ by default it will load the image `apache/iotdb:latest`, if you want a specific ## IoTDB DBAPI -IoTDB DBAPI implements the Python DB API 2.0 specification (https://peps.python.org/pep-0249/), which defines a common +IoTDB DBAPI implements the Python DB API 2.0 specification (), which defines a common interface for accessing databases in Python. ### Examples -+ Initialization + +- Initialization The initialized parameters are consistent with the session part (except for the sqlalchemy_mode). + ```python from iotdb.dbapi import connect @@ -501,23 +522,27 @@ password_ = "root" conn = connect(ip, port_, username_, password_,fetch_size=1024,zone_id="UTC+8",sqlalchemy_mode=False) cursor = conn.cursor() ``` -+ simple SQL statement execution + +- simple SQL statement execution + ```python cursor.execute("SELECT ** FROM root") for row in cursor.fetchall(): print(row) ``` -+ execute SQL with parameter +- execute SQL with parameter IoTDB DBAPI supports pyformat style parameters + ```python cursor.execute("SELECT ** FROM root WHERE time < %(time)s",{"time":"2017-11-01T00:08:00.000"}) for row in cursor.fetchall(): print(row) ``` -+ execute SQL with parameter sequences +- execute SQL with parameter sequences + ```python seq_of_parameters = [ {"timestamp": 1, "temperature": 1}, @@ -530,17 +555,21 @@ sql = "insert into root.cursor(timestamp,temperature) values(%(timestamp)s,%(tem cursor.executemany(sql,seq_of_parameters) ``` -+ close the connection and cursor +- close the connection and cursor + ```python cursor.close() conn.close() ``` ## IoTDB SQLAlchemy Dialect (Experimental) + The SQLAlchemy dialect of IoTDB is written to adapt to Apache Superset. This part is still being improved. Please do not use it in the production environment! + ### Mapping of the metadata + The data model used by SQLAlchemy is a relational data model, which describes the relationships between different entities through tables. While the data model of IoTDB is a hierarchical data model, which organizes the data through a tree structure. In order to adapt IoTDB to the dialect of SQLAlchemy, the original data model in IoTDB needs to be reorganized. @@ -554,25 +583,27 @@ The metadata in the IoTDB are: 4. Measurement The metadata in the SQLAlchemy are: + 1. Schema 2. Table 3. Column The mapping relationship between them is: -| The metadata in the SQLAlchemy | The metadata in the IoTDB | -| -------------------- | -------------------------------------------- | -| Schema | Database | -| Table | Path ( from database to entity ) + Entity | -| Column | Measurement | +| The metadata in the SQLAlchemy | The metadata in the IoTDB | +| ------------------------------ | ----------------------------------------- | +| Schema | Database | +| Table | Path ( from database to entity ) + Entity | +| Column | Measurement | The following figure shows the relationship between the two more intuitively: ![sqlalchemy-to-iotdb](https://alioss.timecho.com/docs/img/UserGuide/API/IoTDB-SQLAlchemy/sqlalchemy-to-iotdb.png?raw=true) ### Data type mapping + | data type in IoTDB | data type in SQLAlchemy | -|--------------------|-------------------------| +| ------------------ | ----------------------- | | BOOLEAN | Boolean | | INT32 | Integer | | INT64 | BigInteger | @@ -583,7 +614,7 @@ The following figure shows the relationship between the two more intuitively: ### Example -+ execute statement +- execute statement ```python from sqlalchemy import create_engine @@ -595,7 +626,7 @@ for row in result.fetchall(): print(row) ``` -+ ORM (now only simple queries are supported) +- ORM (now only simple queries are supported) ```python from sqlalchemy import create_engine, Column, Float, BigInteger, MetaData @@ -626,49 +657,39 @@ for row in res: print(row) ``` - ## Developers ### Introduction This is an example of how to connect to IoTDB with python, using the thrift rpc interfaces. Things are almost the same on Windows or Linux, but pay attention to the difference like path separator. - - ### Prerequisites Python3.7 or later is preferred. You have to install Thrift (0.11.0 or later) to compile our thrift file into python code. Below is the official tutorial of installation, eventually, you should have a thrift executable. -``` -http://thrift.apache.org/docs/install/ -``` + Before starting you need to install `requirements_dev.txt` in your python environment, e.g. by calling + ```shell pip install -r requirements_dev.txt ``` - - ### Compile the thrift library and Debug -In the root of IoTDB's source code folder, run `mvn clean generate-sources -pl iotdb-client/client-py -am`. +In the root of IoTDB's source code folder, run `mvn clean generate-sources -pl iotdb-client/client-py -am`. This will automatically delete and repopulate the folder `iotdb/thrift` with the generated thrift files. This folder is ignored from git and should **never be pushed to git!** **Notice** Do not upload `iotdb/thrift` to the git repo. - - - ### Session Client & Example We packed up the Thrift interface in `client-py/src/iotdb/Session.py` (similar with its Java counterpart), also provided an example file `client-py/src/SessionExample.py` of how to use the session module. please read it carefully. - Or, another simple example: ```python @@ -684,8 +705,6 @@ zone = session.get_time_zone() session.close() ``` - - ### Tests Please add your custom tests in `tests` folder. @@ -694,15 +713,11 @@ To run all defined tests just type `pytest .` in the root folder. **Notice** Some tests need docker to be started on your system as a test instance is started in a docker container using [testcontainers](https://testcontainers-python.readthedocs.io/en/latest/index.html). - - ### Futher Tools [black](https://pypi.org/project/black/) and [flake8](https://pypi.org/project/flake8/) are installed for autoformatting and linting. Both can be run by `black .` or `flake8 .` respectively. - - ## Releasing To do a release just ensure that you have the right set of generated thrift files. @@ -710,23 +725,18 @@ Then run linting and auto-formatting. Then, ensure that all tests work (via `pytest .`). Then you are good to go to do a release! - - ### Preparing your environment First, install all necessary dev dependencies via `pip install -r requirements_dev.txt`. - - ### Doing the Release There is a convenient script `release.sh` to do all steps for a release. Namely, these are -* Remove all transient directories from last release (if exists) -* (Re-)generate all generated sources via mvn -* Run Linting (flake8) -* Run Tests via pytest -* Build -* Release to pypi - +- Remove all transient directories from last release (if exists) +- (Re-)generate all generated sources via mvn +- Run Linting (flake8) +- Run Tests via pytest +- Build +- Release to pypi diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-Rust-Native-API.md b/src/UserGuide/V1.3.3/API/Programming-Rust-Native-API.md similarity index 63% rename from src/UserGuide/V2.0.1/Tree/API/Programming-Rust-Native-API.md rename to src/UserGuide/V1.3.3/API/Programming-Rust-Native-API.md index f58df68fc..4ec73a52b 100644 --- a/src/UserGuide/V2.0.1/Tree/API/Programming-Rust-Native-API.md +++ b/src/UserGuide/V1.3.3/API/Programming-Rust-Native-API.md @@ -1,78 +1,77 @@ # Rust Native API Native API -IoTDB uses Thrift as a cross language RPC framework, so access to IoTDB can be achieved through the interface provided by Thrift. +IoTDB uses Thrift as a cross language RPC framework, so access to IoTDB can be achieved through the interface provided by Thrift. This document will introduce how to generate a native Rust interface that can access IoTDB. ## Dependents - * JDK >= 1.8 - * Rust >= 1.0.0 - * thrift 0.14.1 - * Linux、Macos or like unix - * Windows+bash +- JDK >= 1.8 +- Rust >= 1.0.0 +- thrift 0.14.1 +- Linux、Macos or like unix +- Windows+bash Thrift (0.14.1 or higher) must be installed to compile Thrift files into Rust code. The following is the official installation tutorial, and in the end, you should receive a Thrift executable file. -``` -http://thrift.apache.org/docs/install/ -``` + ## Compile the Thrift library and generate the Rust native interface 1. Find the `pom.xml` file in the root directory of the IoTDB source code folder. 2. Open the `pom.xml` file and find the following content: + ```xml - - generate-thrift-sources-python - generate-sources - - compile - - - py - ${project.build.directory}/generated-sources-python/ - - + + generate-thrift-sources-python + generate-sources + + compile + + + py + ${project.build.directory}/generated-sources-python/ + + ``` + 3. Duplicate this block and change the `id`, `generator` and `outputDirectory` to this: + ```xml - - generate-thrift-sources-rust - generate-sources - - compile - - - rs - ${project.build.directory}/generated-sources-rust/ - - + + generate-thrift-sources-rust + generate-sources + + compile + + + rs + ${project.build.directory}/generated-sources-rust/ + + ``` + 4. In the root directory of the IoTDB source code folder,run `mvn clean generate-sources`. -This command will automatically delete the files in `iotdb/iotdb-protocol/thrift/target` and `iotdb/iotdb-protocol/thrift-commons/target`, and repopulate the folder with the newly generated files. -The newly generated Rust sources will be located in `iotdb/iotdb-protocol/thrift/target/generated-sources-rust` in the various modules of the `iotdb-protocol` module. + This command will automatically delete the files in `iotdb/iotdb-protocol/thrift/target` and `iotdb/iotdb-protocol/thrift-commons/target`, and repopulate the folder with the newly generated files. + The newly generated Rust sources will be located in `iotdb/iotdb-protocol/thrift/target/generated-sources-rust` in the various modules of the `iotdb-protocol` module. ## Using the Rust native interface @@ -80,7 +79,7 @@ Copy `iotdb/iotdb-protocol/thrift/target/generated-sources-rust/` and `iotdb/iot ## RPC interface -``` +```cpp // open a session TSOpenSessionResp openSession(1:TSOpenSessionReq req); @@ -96,7 +95,7 @@ TSStatus executeBatchStatement(1:TSExecuteBatchStatementReq req); // execute query SQL statement TSExecuteStatementResp executeQueryStatement(1:TSExecuteStatementReq req); -// execute insert, delete and update SQL statement +// execute insert, delete and update SQL statement TSExecuteStatementResp executeUpdateStatement(1:TSExecuteStatementReq req); // fetch next query result @@ -105,7 +104,7 @@ TSFetchResultsResp fetchResults(1:TSFetchResultsReq req) // fetch meta data TSFetchMetadataResp fetchMetadata(1:TSFetchMetadataReq req) -// cancel a query +// cancel a query TSStatus cancelOperation(1:TSCancelOperationReq req); // close a query dataset diff --git a/src/UserGuide/V2.0.1/Tree/API/RestServiceV1.md b/src/UserGuide/V1.3.3/API/RestServiceV1.md similarity index 57% rename from src/UserGuide/V2.0.1/Tree/API/RestServiceV1.md rename to src/UserGuide/V1.3.3/API/RestServiceV1.md index 775235fed..9d46c3c65 100644 --- a/src/UserGuide/V2.0.1/Tree/API/RestServiceV1.md +++ b/src/UserGuide/V1.3.3/API/RestServiceV1.md @@ -1,25 +1,23 @@ -# RESTful API V1(Not Recommend) +# RESTful API V1(Not Recommend) + IoTDB's RESTful services can be used for query, write, and management operations, using the OpenAPI standard to define interfaces and generate frameworks. ## Enable RESTful Services @@ -33,6 +31,7 @@ RESTful services are disabled by default. ``` ## Authentication + Except the liveness probe API `/ping`, RESTful services use the basic authentication. Each URL request needs to carry `'Authorization': 'Basic ' + base64.encode(username + ':' + password)`. The username used in the following examples is: `root`, and password is: `root`. @@ -48,24 +47,26 @@ Authorization: Basic cm9vdDpyb290 HTTP Status Code:`401` HTTP response body: - ```json - { - "code": 600, - "message": "WRONG_LOGIN_PASSWORD_ERROR" - } - ``` + + ```json + { + "code": 600, + "message": "WRONG_LOGIN_PASSWORD_ERROR" + } + ``` - If the `Authorization` header is missing,the following error is returned: HTTP Status Code:`401` HTTP response body: - ```json - { - "code": 603, - "message": "UNINITIALIZED_AUTH_ERROR" - } - ``` + + ```json + { + "code": 603, + "message": "UNINITIALIZED_AUTH_ERROR" + } + ``` ## Interface @@ -79,7 +80,7 @@ Request path: `http://ip:port/ping` The user name used in the example is: root, password: root -Example request: +Example request: ```shell $ curl http://127.0.0.1:18080/ping @@ -92,10 +93,10 @@ Response status codes: Response parameters: -|parameter name |parameter type |parameter describe| -|:--- | :--- | :---| -|code | integer | status code | -| message | string | message | +| parameter name | parameter type | parameter describe | +| :------------- | :------------- | :----------------- | +| code | integer | status code | +| message | string | message | Sample response: @@ -131,18 +132,18 @@ Request path: `http://ip:port/rest/v1/query` Parameter Description: -| parameter name | parameter type | required | parameter description | -|----------------| -------------- | -------- | ------------------------------------------------------------ | -| sql | string | yes | | -| rowLimit | integer | no | The maximum number of rows in the result set that can be returned by a query.
If this parameter is not set, the `rest_query_default_row_size_limit` of the configuration file will be used as the default value.
When the number of rows in the returned result set exceeds the limit, the status code `411` will be returned. | +| parameter name | parameter type | required | parameter description | +| -------------- | -------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| sql | string | yes | | +| rowLimit | integer | no | The maximum number of rows in the result set that can be returned by a query.
If this parameter is not set, the `rest_query_default_row_size_limit` of the configuration file will be used as the default value.
When the number of rows in the returned result set exceeds the limit, the status code `411` will be returned. | Response parameters: -| parameter name | parameter type | parameter description | -|----------------| -------------- | ------------------------------------------------------------ | -| expressions | array | Array of result set column names for data query, `null` for metadata query | -| columnNames | array | Array of column names for metadata query result set, `null` for data query | -| timestamps | array | Timestamp column, `null` for metadata query | +| parameter name | parameter type | parameter description | +| -------------- | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| expressions | array | Array of result set column names for data query, `null` for metadata query | +| columnNames | array | Array of column names for metadata query result set, `null` for data query | +| timestamps | array | Timestamp column, `null` for metadata query | | values | array | A two-dimensional array, the first dimension has the same length as the result set column name array, and the second dimension array represents a column of the result set | **Examples:** @@ -151,38 +152,24 @@ Tip: Statements like `select * from root.xx.**` are not recommended because thos **Expression query** - ```shell - curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"sql":"select s3, s4, s3 + 1 from root.sg27 limit 2"}' http://127.0.0.1:18080/rest/v1/query - ```` +```shell +curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"sql":"select s3, s4, s3 + 1 from root.sg27 limit 2"}' http://127.0.0.1:18080/rest/v1/query +``` + Response instance - ```json - { - "expressions": [ - "root.sg27.s3", - "root.sg27.s4", - "root.sg27.s3 + 1" - ], - "columnNames": null, - "timestamps": [ - 1635232143960, - 1635232153960 - ], - "values": [ - [ - 11, - null - ], - [ - false, - true - ], - [ - 12.0, - null - ] - ] - } - ``` + +```json +{ + "expressions": ["root.sg27.s3", "root.sg27.s4", "root.sg27.s3 + 1"], + "columnNames": null, + "timestamps": [1635232143960, 1635232153960], + "values": [ + [11, null], + [false, true], + [12.0, null] + ] +} +``` **Show child paths** @@ -193,16 +180,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": [ - "child paths" - ], + "columnNames": ["child paths"], "timestamps": null, - "values": [ - [ - "root.sg27", - "root.sg28" - ] - ] + "values": [["root.sg27", "root.sg28"]] } ``` @@ -215,16 +195,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": [ - "child nodes" - ], + "columnNames": ["child nodes"], "timestamps": null, - "values": [ - [ - "sg27", - "sg28" - ] - ] + "values": [["sg27", "sg28"]] } ``` @@ -237,20 +210,11 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": [ - "database", - "ttl" - ], + "columnNames": ["database", "ttl"], "timestamps": null, "values": [ - [ - "root.sg27", - "root.sg28" - ], - [ - null, - null - ] + ["root.sg27", "root.sg28"], + [null, null] ] } ``` @@ -264,19 +228,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": [ - "database", - "ttl" - ], + "columnNames": ["database", "ttl"], "timestamps": null, - "values": [ - [ - "root.sg27" - ], - [ - null - ] - ] + "values": [["root.sg27"], [null]] } ``` @@ -339,54 +293,14 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ], "timestamps": null, "values": [ - [ - "root.sg27.s3", - "root.sg27.s4", - "root.sg28.s3", - "root.sg28.s4" - ], - [ - null, - null, - null, - null - ], - [ - "root.sg27", - "root.sg27", - "root.sg28", - "root.sg28" - ], - [ - "INT32", - "BOOLEAN", - "INT32", - "BOOLEAN" - ], - [ - "RLE", - "RLE", - "RLE", - "RLE" - ], - [ - "SNAPPY", - "SNAPPY", - "SNAPPY", - "SNAPPY" - ], - [ - null, - null, - null, - null - ], - [ - null, - null, - null, - null - ] + ["root.sg27.s3", "root.sg27.s4", "root.sg28.s3", "root.sg28.s4"], + [null, null, null, null], + ["root.sg27", "root.sg27", "root.sg28", "root.sg28"], + ["INT32", "BOOLEAN", "INT32", "BOOLEAN"], + ["RLE", "RLE", "RLE", "RLE"], + ["SNAPPY", "SNAPPY", "SNAPPY", "SNAPPY"], + [null, null, null, null], + [null, null, null, null] ] } ``` @@ -412,54 +326,14 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ], "timestamps": null, "values": [ - [ - "root.sg28.s4", - "root.sg27.s4", - "root.sg28.s3", - "root.sg27.s3" - ], - [ - null, - null, - null, - null - ], - [ - "root.sg28", - "root.sg27", - "root.sg28", - "root.sg27" - ], - [ - "BOOLEAN", - "BOOLEAN", - "INT32", - "INT32" - ], - [ - "RLE", - "RLE", - "RLE", - "RLE" - ], - [ - "SNAPPY", - "SNAPPY", - "SNAPPY", - "SNAPPY" - ], - [ - null, - null, - null, - null - ], - [ - null, - null, - null, - null - ] + ["root.sg28.s4", "root.sg27.s4", "root.sg28.s3", "root.sg27.s3"], + [null, null, null, null], + ["root.sg28", "root.sg27", "root.sg28", "root.sg27"], + ["BOOLEAN", "BOOLEAN", "INT32", "INT32"], + ["RLE", "RLE", "RLE", "RLE"], + ["SNAPPY", "SNAPPY", "SNAPPY", "SNAPPY"], + [null, null, null, null], + [null, null, null, null] ] } ``` @@ -473,15 +347,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": [ - "count" - ], + "columnNames": ["count"], "timestamps": null, - "values": [ - [ - 4 - ] - ] + "values": [[4]] } ``` @@ -494,15 +362,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": [ - "count" - ], + "columnNames": ["count"], "timestamps": null, - "values": [ - [ - 4 - ] - ] + "values": [[4]] } ``` @@ -515,20 +377,11 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": [ - "devices", - "isAligned" - ], + "columnNames": ["devices", "isAligned"], "timestamps": null, "values": [ - [ - "root.sg27", - "root.sg28" - ], - [ - "false", - "false" - ] + ["root.sg27", "root.sg28"], + ["false", "false"] ] } ``` @@ -542,25 +395,12 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": [ - "devices", - "database", - "isAligned" - ], + "columnNames": ["devices", "database", "isAligned"], "timestamps": null, "values": [ - [ - "root.sg27", - "root.sg28" - ], - [ - "root.sg27", - "root.sg28" - ], - [ - "false", - "false" - ] + ["root.sg27", "root.sg28"], + ["root.sg27", "root.sg28"], + ["false", "false"] ] } ``` @@ -574,15 +414,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": [ - "user" - ], + "columnNames": ["user"], "timestamps": null, - "values": [ - [ - "root" - ] - ] + "values": [["root"]] } ``` @@ -594,22 +428,10 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { - "expressions": [ - "count(root.sg27.s3)", - "count(root.sg27.s4)" - ], + "expressions": ["count(root.sg27.s3)", "count(root.sg27.s4)"], "columnNames": null, - "timestamps": [ - 0 - ], - "values": [ - [ - 1 - ], - [ - 2 - ] - ] + "timestamps": [0], + "values": [[1], [2]] } ``` @@ -622,19 +444,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": [ - "count(root.sg27.*)", - "count(root.sg28.*)" - ], + "columnNames": ["count(root.sg27.*)", "count(root.sg28.*)"], "timestamps": null, - "values": [ - [ - 3 - ], - [ - 3 - ] - ] + "values": [[3], [3]] } ``` @@ -646,48 +458,15 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { - "expressions": [ - "count(root.sg27.s3)", - "count(root.sg27.s4)" - ], + "expressions": ["count(root.sg27.s3)", "count(root.sg27.s4)"], "columnNames": null, "timestamps": [ - 1635232143960, - 1635232144960, - 1635232145960, - 1635232146960, - 1635232147960, - 1635232148960, - 1635232149960, - 1635232150960, - 1635232151960, - 1635232152960 + 1635232143960, 1635232144960, 1635232145960, 1635232146960, 1635232147960, + 1635232148960, 1635232149960, 1635232150960, 1635232151960, 1635232152960 ], "values": [ - [ - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - [ - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ] + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0] ] } ``` @@ -701,25 +480,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" - ```json { "expressions": null, - "columnNames": [ - "timeseries", - "value", - "dataType" - ], - "timestamps": [ - 1635232143960 - ], - "values": [ - [ - "root.sg27.s3" - ], - [ - "11" - ], - [ - "INT32" - ] - ] + "columnNames": ["timeseries", "value", "dataType"], + "timestamps": [1635232143960], + "values": [["root.sg27.s3"], ["11"], ["INT32"]] } ``` @@ -772,23 +535,25 @@ Request path: `http://ip:port/rest/v1/nonQuery` Parameter Description: -|parameter name |parameter type |parameter describe| -|:--- | :--- | :---| -| sql | string | query content | +| parameter name | parameter type | parameter describe | +| :------------- | :------------- | :----------------- | +| sql | string | query content | Example request: + ```shell curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"sql":"CREATE DATABASE root.ln"}' http://127.0.0.1:18080/rest/v1/nonQuery ``` Response parameters: -|parameter name |parameter type |parameter describe| -|:--- | :--- | :---| -| code | integer | status code | -| message | string | message | +| parameter name | parameter type | parameter describe | +| :------------- | :------------- | :----------------- | +| code | integer | status code | +| message | string | message | Sample response: + ```json { "code": 200, @@ -796,8 +561,6 @@ Sample response: } ``` - - ### insertTablet Request method: `POST` @@ -808,28 +571,30 @@ Request path: `http://ip:port/rest/v1/insertTablet` Parameter Description: -| parameter name |parameter type |is required|parameter describe| -|:---------------| :--- | :---| :---| -| timestamps | array | yes | Time column | -| measurements | array | yes | The name of the measuring point | -| dataTypes | array | yes | The data type | -| values | array | yes | Value columns, the values in each column can be `null` | -| isAligned | boolean | yes | Whether to align the timeseries | -| deviceId | string | yes | Device name | +| parameter name | parameter type | is required | parameter describe | +| :------------- | :------------- | :---------- | :----------------------------------------------------- | +| timestamps | array | yes | Time column | +| measurements | array | yes | The name of the measuring point | +| dataTypes | array | yes | The data type | +| values | array | yes | Value columns, the values in each column can be `null` | +| isAligned | boolean | yes | Whether to align the timeseries | +| deviceId | string | yes | Device name | Example request: + ```shell curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"timestamps":[1635232143960,1635232153960],"measurements":["s3","s4"],"dataTypes":["INT32","BOOLEAN"],"values":[[11,null],[false,true]],"isAligned":false,"deviceId":"root.sg27"}' http://127.0.0.1:18080/rest/v1/insertTablet ``` Sample response: -|parameter name |parameter type |parameter describe| -|:--- | :--- | :---| -| code | integer | status code | -| message | string | message | +| parameter name | parameter type | parameter describe | +| :------------- | :------------- | :----------------- | +| code | integer | status code | +| message | string | message | Sample response: + ```json { "code": 200, @@ -841,83 +606,79 @@ Sample response: The configuration is located in 'iotdb-system.properties'. -* Set 'enable_rest_service' to 'true' to enable the module, and 'false' to disable the module. By default, this value is' false '. +- Set 'enable_rest_service' to 'true' to enable the module, and 'false' to disable the module. By default, this value is' false '. ```properties enable_rest_service=true ``` -* This parameter is valid only when 'enable_REST_service =true'. Set 'rest_service_port' to a number (1025 to 65535) to customize the REST service socket port. By default, the value is 18080. +- This parameter is valid only when 'enable_REST_service =true'. Set 'rest_service_port' to a number (1025 to 65535) to customize the REST service socket port. By default, the value is 18080. ```properties rest_service_port=18080 ``` -* Set 'enable_swagger' to 'true' to display rest service interface information through swagger, and 'false' to do not display the rest service interface information through the swagger. By default, this value is' false '. +- Set 'enable_swagger' to 'true' to display rest service interface information through swagger, and 'false' to do not display the rest service interface information through the swagger. By default, this value is' false '. ```properties enable_swagger=false ``` -* The maximum number of rows in the result set that can be returned by a query. When the number of rows in the returned result set exceeds the limit, the status code `411` is returned. +- The maximum number of rows in the result set that can be returned by a query. When the number of rows in the returned result set exceeds the limit, the status code `411` is returned. -````properties +```properties rest_query_default_row_size_limit=10000 -```` +``` -* Expiration time for caching customer login information (used to speed up user authentication, in seconds, 8 hours by default) +- Expiration time for caching customer login information (used to speed up user authentication, in seconds, 8 hours by default) ```properties cache_expire=28800 ``` - -* Maximum number of users stored in the cache (default: 100) +- Maximum number of users stored in the cache (default: 100) ```properties cache_max_num=100 ``` -* Initial cache size (default: 10) +- Initial cache size (default: 10) ```properties cache_init_num=10 ``` -* REST Service whether to enable SSL configuration, set 'enable_https' to' true 'to enable the module, and set' false 'to disable the module. By default, this value is' false '. +- REST Service whether to enable SSL configuration, set 'enable_https' to' true 'to enable the module, and set' false 'to disable the module. By default, this value is' false '. ```properties enable_https=false ``` -* keyStore location path (optional) +- keyStore location path (optional) ```properties key_store_path= ``` - -* keyStore password (optional) +- keyStore password (optional) ```properties key_store_pwd= ``` - -* trustStore location path (optional) +- trustStore location path (optional) ```properties trust_store_path= ``` -* trustStore password (optional) +- trustStore password (optional) ```properties trust_store_pwd= ``` - -* SSL timeout period, in seconds +- SSL timeout period, in seconds ```properties idle_timeout=5000 diff --git a/src/UserGuide/V2.0.1/Tree/API/RestServiceV2.md b/src/UserGuide/V1.3.3/API/RestServiceV2.md similarity index 60% rename from src/UserGuide/V2.0.1/Tree/API/RestServiceV2.md rename to src/UserGuide/V1.3.3/API/RestServiceV2.md index 6c6011bf5..36dbf72f0 100644 --- a/src/UserGuide/V2.0.1/Tree/API/RestServiceV2.md +++ b/src/UserGuide/V1.3.3/API/RestServiceV2.md @@ -1,25 +1,23 @@ -# RESTful API V2 +# RESTful API V2 + IoTDB's RESTful services can be used for query, write, and management operations, using the OpenAPI standard to define interfaces and generate frameworks. ## Enable RESTful Services @@ -33,6 +31,7 @@ RESTful services are disabled by default. ``` ## Authentication + Except the liveness probe API `/ping`, RESTful services use the basic authentication. Each URL request needs to carry `'Authorization': 'Basic ' + base64.encode(username + ':' + password)`. The username used in the following examples is: `root`, and password is: `root`. @@ -48,24 +47,26 @@ Authorization: Basic cm9vdDpyb290 HTTP Status Code:`401` HTTP response body: - ```json - { - "code": 600, - "message": "WRONG_LOGIN_PASSWORD_ERROR" - } - ``` + + ```json + { + "code": 600, + "message": "WRONG_LOGIN_PASSWORD_ERROR" + } + ``` - If the `Authorization` header is missing,the following error is returned: HTTP Status Code:`401` HTTP response body: - ```json - { - "code": 603, - "message": "UNINITIALIZED_AUTH_ERROR" - } - ``` + + ```json + { + "code": 603, + "message": "UNINITIALIZED_AUTH_ERROR" + } + ``` ## Interface @@ -79,7 +80,7 @@ Request path: `http://ip:port/ping` The user name used in the example is: root, password: root -Example request: +Example request: ```shell $ curl http://127.0.0.1:18080/ping @@ -92,10 +93,10 @@ Response status codes: Response parameters: -|parameter name |parameter type |parameter describe| -|:--- | :--- | :---| -|code | integer | status code | -| message | string | message | +| parameter name | parameter type | parameter describe | +| :------------- | :------------- | :----------------- | +| code | integer | status code | +| message | string | message | Sample response: @@ -131,18 +132,18 @@ Request path: `http://ip:port/rest/v2/query` Parameter Description: -| parameter name | parameter type | required | parameter description | -|----------------| -------------- | -------- | ------------------------------------------------------------ | -| sql | string | yes | | +| parameter name | parameter type | required | parameter description | +| -------------- | -------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| sql | string | yes | | | row_limit | integer | no | The maximum number of rows in the result set that can be returned by a query.
If this parameter is not set, the `rest_query_default_row_size_limit` of the configuration file will be used as the default value.
When the number of rows in the returned result set exceeds the limit, the status code `411` will be returned. | Response parameters: -| parameter name | parameter type | parameter description | -|----------------| -------------- | ------------------------------------------------------------ | -| expressions | array | Array of result set column names for data query, `null` for metadata query | -| column_names | array | Array of column names for metadata query result set, `null` for data query | -| timestamps | array | Timestamp column, `null` for metadata query | +| parameter name | parameter type | parameter description | +| -------------- | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| expressions | array | Array of result set column names for data query, `null` for metadata query | +| column_names | array | Array of column names for metadata query result set, `null` for data query | +| timestamps | array | Timestamp column, `null` for metadata query | | values | array | A two-dimensional array, the first dimension has the same length as the result set column name array, and the second dimension array represents a column of the result set | **Examples:** @@ -153,33 +154,17 @@ Tip: Statements like `select * from root.xx.**` are not recommended because thos ```shell curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"sql":"select s3, s4, s3 + 1 from root.sg27 limit 2"}' http://127.0.0.1:18080/rest/v2/query -```` +``` ```json { - "expressions": [ - "root.sg27.s3", - "root.sg27.s4", - "root.sg27.s3 + 1" - ], + "expressions": ["root.sg27.s3", "root.sg27.s4", "root.sg27.s3 + 1"], "column_names": null, - "timestamps": [ - 1635232143960, - 1635232153960 - ], + "timestamps": [1635232143960, 1635232153960], "values": [ - [ - 11, - null - ], - [ - false, - true - ], - [ - 12.0, - null - ] + [11, null], + [false, true], + [12.0, null] ] } ``` @@ -193,16 +178,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": [ - "child paths" - ], + "column_names": ["child paths"], "timestamps": null, - "values": [ - [ - "root.sg27", - "root.sg28" - ] - ] + "values": [["root.sg27", "root.sg28"]] } ``` @@ -215,16 +193,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": [ - "child nodes" - ], + "column_names": ["child nodes"], "timestamps": null, - "values": [ - [ - "sg27", - "sg28" - ] - ] + "values": [["sg27", "sg28"]] } ``` @@ -237,20 +208,11 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": [ - "database", - "ttl" - ], + "column_names": ["database", "ttl"], "timestamps": null, "values": [ - [ - "root.sg27", - "root.sg28" - ], - [ - null, - null - ] + ["root.sg27", "root.sg28"], + [null, null] ] } ``` @@ -264,19 +226,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": [ - "database", - "ttl" - ], + "column_names": ["database", "ttl"], "timestamps": null, - "values": [ - [ - "root.sg27" - ], - [ - null - ] - ] + "values": [["root.sg27"], [null]] } ``` @@ -339,54 +291,14 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ], "timestamps": null, "values": [ - [ - "root.sg27.s3", - "root.sg27.s4", - "root.sg28.s3", - "root.sg28.s4" - ], - [ - null, - null, - null, - null - ], - [ - "root.sg27", - "root.sg27", - "root.sg28", - "root.sg28" - ], - [ - "INT32", - "BOOLEAN", - "INT32", - "BOOLEAN" - ], - [ - "RLE", - "RLE", - "RLE", - "RLE" - ], - [ - "SNAPPY", - "SNAPPY", - "SNAPPY", - "SNAPPY" - ], - [ - null, - null, - null, - null - ], - [ - null, - null, - null, - null - ] + ["root.sg27.s3", "root.sg27.s4", "root.sg28.s3", "root.sg28.s4"], + [null, null, null, null], + ["root.sg27", "root.sg27", "root.sg28", "root.sg28"], + ["INT32", "BOOLEAN", "INT32", "BOOLEAN"], + ["RLE", "RLE", "RLE", "RLE"], + ["SNAPPY", "SNAPPY", "SNAPPY", "SNAPPY"], + [null, null, null, null], + [null, null, null, null] ] } ``` @@ -412,54 +324,14 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ], "timestamps": null, "values": [ - [ - "root.sg28.s4", - "root.sg27.s4", - "root.sg28.s3", - "root.sg27.s3" - ], - [ - null, - null, - null, - null - ], - [ - "root.sg28", - "root.sg27", - "root.sg28", - "root.sg27" - ], - [ - "BOOLEAN", - "BOOLEAN", - "INT32", - "INT32" - ], - [ - "RLE", - "RLE", - "RLE", - "RLE" - ], - [ - "SNAPPY", - "SNAPPY", - "SNAPPY", - "SNAPPY" - ], - [ - null, - null, - null, - null - ], - [ - null, - null, - null, - null - ] + ["root.sg28.s4", "root.sg27.s4", "root.sg28.s3", "root.sg27.s3"], + [null, null, null, null], + ["root.sg28", "root.sg27", "root.sg28", "root.sg27"], + ["BOOLEAN", "BOOLEAN", "INT32", "INT32"], + ["RLE", "RLE", "RLE", "RLE"], + ["SNAPPY", "SNAPPY", "SNAPPY", "SNAPPY"], + [null, null, null, null], + [null, null, null, null] ] } ``` @@ -473,15 +345,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": [ - "count" - ], + "column_names": ["count"], "timestamps": null, - "values": [ - [ - 4 - ] - ] + "values": [[4]] } ``` @@ -494,15 +360,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": [ - "count" - ], + "column_names": ["count"], "timestamps": null, - "values": [ - [ - 4 - ] - ] + "values": [[4]] } ``` @@ -515,20 +375,11 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": [ - "devices", - "isAligned" - ], + "column_names": ["devices", "isAligned"], "timestamps": null, "values": [ - [ - "root.sg27", - "root.sg28" - ], - [ - "false", - "false" - ] + ["root.sg27", "root.sg28"], + ["false", "false"] ] } ``` @@ -542,25 +393,12 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": [ - "devices", - "database", - "isAligned" - ], + "column_names": ["devices", "database", "isAligned"], "timestamps": null, "values": [ - [ - "root.sg27", - "root.sg28" - ], - [ - "root.sg27", - "root.sg28" - ], - [ - "false", - "false" - ] + ["root.sg27", "root.sg28"], + ["root.sg27", "root.sg28"], + ["false", "false"] ] } ``` @@ -574,15 +412,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": [ - "user" - ], + "column_names": ["user"], "timestamps": null, - "values": [ - [ - "root" - ] - ] + "values": [["root"]] } ``` @@ -594,22 +426,10 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { - "expressions": [ - "count(root.sg27.s3)", - "count(root.sg27.s4)" - ], + "expressions": ["count(root.sg27.s3)", "count(root.sg27.s4)"], "column_names": null, - "timestamps": [ - 0 - ], - "values": [ - [ - 1 - ], - [ - 2 - ] - ] + "timestamps": [0], + "values": [[1], [2]] } ``` @@ -622,19 +442,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": [ - "count(root.sg27.*)", - "count(root.sg28.*)" - ], + "column_names": ["count(root.sg27.*)", "count(root.sg28.*)"], "timestamps": null, - "values": [ - [ - 3 - ], - [ - 3 - ] - ] + "values": [[3], [3]] } ``` @@ -646,48 +456,15 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { - "expressions": [ - "count(root.sg27.s3)", - "count(root.sg27.s4)" - ], + "expressions": ["count(root.sg27.s3)", "count(root.sg27.s4)"], "column_names": null, "timestamps": [ - 1635232143960, - 1635232144960, - 1635232145960, - 1635232146960, - 1635232147960, - 1635232148960, - 1635232149960, - 1635232150960, - 1635232151960, - 1635232152960 + 1635232143960, 1635232144960, 1635232145960, 1635232146960, 1635232147960, + 1635232148960, 1635232149960, 1635232150960, 1635232151960, 1635232152960 ], "values": [ - [ - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - [ - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ] + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0] ] } ``` @@ -701,25 +478,9 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" - ```json { "expressions": null, - "column_names": [ - "timeseries", - "value", - "dataType" - ], - "timestamps": [ - 1635232143960 - ], - "values": [ - [ - "root.sg27.s3" - ], - [ - "11" - ], - [ - "INT32" - ] - ] + "column_names": ["timeseries", "value", "dataType"], + "timestamps": [1635232143960], + "values": [["root.sg27.s3"], ["11"], ["INT32"]] } ``` @@ -772,23 +533,25 @@ Request path: `http://ip:port/rest/v2/nonQuery` Parameter Description: -|parameter name |parameter type |parameter describe| -|:--- | :--- | :---| -| sql | string | query content | +| parameter name | parameter type | parameter describe | +| :------------- | :------------- | :----------------- | +| sql | string | query content | Example request: + ```shell curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"sql":"CREATE DATABASE root.ln"}' http://127.0.0.1:18080/rest/v2/nonQuery ``` Response parameters: -|parameter name |parameter type |parameter describe| -|:--- | :--- | :---| -| code | integer | status code | -| message | string | message | +| parameter name | parameter type | parameter describe | +| :------------- | :------------- | :----------------- | +| code | integer | status code | +| message | string | message | Sample response: + ```json { "code": 200, @@ -796,8 +559,6 @@ Sample response: } ``` - - ### insertTablet Request method: `POST` @@ -808,28 +569,30 @@ Request path: `http://ip:port/rest/v2/insertTablet` Parameter Description: -| parameter name |parameter type |is required|parameter describe| -|:---------------| :--- | :---| :---| -| timestamps | array | yes | Time column | -| measurements | array | yes | The name of the measuring point | -| data_types | array | yes | The data type | -| values | array | yes | Value columns, the values in each column can be `null` | -| is_aligned | boolean | yes | Whether to align the timeseries | -| device | string | yes | Device name | +| parameter name | parameter type | is required | parameter describe | +| :------------- | :------------- | :---------- | :----------------------------------------------------- | +| timestamps | array | yes | Time column | +| measurements | array | yes | The name of the measuring point | +| data_types | array | yes | The data type | +| values | array | yes | Value columns, the values in each column can be `null` | +| is_aligned | boolean | yes | Whether to align the timeseries | +| device | string | yes | Device name | Example request: + ```shell curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"timestamps":[1635232143960,1635232153960],"measurements":["s3","s4"],"data_types":["INT32","BOOLEAN"],"values":[[11,null],[false,true]],"is_aligned":false,"device":"root.sg27"}' http://127.0.0.1:18080/rest/v2/insertTablet ``` Sample response: -|parameter name |parameter type |parameter describe| -|:--- | :--- | :---| -| code | integer | status code | -| message | string | message | +| parameter name | parameter type | parameter describe | +| :------------- | :------------- | :----------------- | +| code | integer | status code | +| message | string | message | Sample response: + ```json { "code": 200, @@ -847,28 +610,30 @@ Request path: `http://ip:port/rest/v2/insertRecords` Parameter Description: -| parameter name |parameter type |is required|parameter describe| -|:------------------| :--- | :---| :---| -| timestamps | array | yes | Time column | -| measurements_list | array | yes | The name of the measuring point | -| data_types_list | array | yes | The data type | -| values_list | array | yes | Value columns, the values in each column can be `null` | -| devices | string | yes | Device name | -| is_aligned | boolean | yes | Whether to align the timeseries | +| parameter name | parameter type | is required | parameter describe | +| :---------------- | :------------- | :---------- | :----------------------------------------------------- | +| timestamps | array | yes | Time column | +| measurements_list | array | yes | The name of the measuring point | +| data_types_list | array | yes | The data type | +| values_list | array | yes | Value columns, the values in each column can be `null` | +| devices | string | yes | Device name | +| is_aligned | boolean | yes | Whether to align the timeseries | Example request: + ```shell curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"timestamps":[1635232113960,1635232151960,1635232143960,1635232143960],"measurements_list":[["s33","s44"],["s55","s66"],["s77","s88"],["s771","s881"]],"data_types_list":[["INT32","INT64"],["FLOAT","DOUBLE"],["FLOAT","DOUBLE"],["BOOLEAN","TEXT"]],"values_list":[[1,11],[2.1,2],[4,6],[false,"cccccc"]],"is_aligned":false,"devices":["root.s1","root.s1","root.s1","root.s3"]}' http://127.0.0.1:18080/rest/v2/insertRecords ``` Sample response: -|parameter name |parameter type |parameter describe| -|:--- | :--- | :---| -| code | integer | status code | -| message | string | message | +| parameter name | parameter type | parameter describe | +| :------------- | :------------- | :----------------- | +| code | integer | status code | +| message | string | message | Sample response: + ```json { "code": 200, @@ -876,88 +641,83 @@ Sample response: } ``` - ## Configuration The configuration is located in 'iotdb-system.properties'. -* Set 'enable_rest_service' to 'true' to enable the module, and 'false' to disable the module. By default, this value is' false '. +- Set 'enable_rest_service' to 'true' to enable the module, and 'false' to disable the module. By default, this value is' false '. ```properties enable_rest_service=true ``` -* This parameter is valid only when 'enable_REST_service =true'. Set 'rest_service_port' to a number (1025 to 65535) to customize the REST service socket port. By default, the value is 18080. +- This parameter is valid only when 'enable_REST_service =true'. Set 'rest_service_port' to a number (1025 to 65535) to customize the REST service socket port. By default, the value is 18080. ```properties rest_service_port=18080 ``` -* Set 'enable_swagger' to 'true' to display rest service interface information through swagger, and 'false' to do not display the rest service interface information through the swagger. By default, this value is' false '. +- Set 'enable_swagger' to 'true' to display rest service interface information through swagger, and 'false' to do not display the rest service interface information through the swagger. By default, this value is' false '. ```properties enable_swagger=false ``` -* The maximum number of rows in the result set that can be returned by a query. When the number of rows in the returned result set exceeds the limit, the status code `411` is returned. +- The maximum number of rows in the result set that can be returned by a query. When the number of rows in the returned result set exceeds the limit, the status code `411` is returned. -````properties +```properties rest_query_default_row_size_limit=10000 -```` +``` -* Expiration time for caching customer login information (used to speed up user authentication, in seconds, 8 hours by default) +- Expiration time for caching customer login information (used to speed up user authentication, in seconds, 8 hours by default) ```properties cache_expire=28800 ``` - -* Maximum number of users stored in the cache (default: 100) +- Maximum number of users stored in the cache (default: 100) ```properties cache_max_num=100 ``` -* Initial cache size (default: 10) +- Initial cache size (default: 10) ```properties cache_init_num=10 ``` -* REST Service whether to enable SSL configuration, set 'enable_https' to' true 'to enable the module, and set' false 'to disable the module. By default, this value is' false '. +- REST Service whether to enable SSL configuration, set 'enable_https' to' true 'to enable the module, and set' false 'to disable the module. By default, this value is' false '. ```properties enable_https=false ``` -* keyStore location path (optional) +- keyStore location path (optional) ```properties key_store_path= ``` - -* keyStore password (optional) +- keyStore password (optional) ```properties key_store_pwd= ``` - -* trustStore location path (optional) +- trustStore location path (optional) ```properties trust_store_path= ``` -* trustStore password (optional) +- trustStore password (optional) ```properties trust_store_pwd= ``` - -* SSL timeout period, in seconds +- SSL timeout period, in seconds ```properties idle_timeout=5000 diff --git a/src/UserGuide/V2.0.1/Tree/Background-knowledge/Cluster-Concept.md b/src/UserGuide/V1.3.3/Background-knowledge/Cluster-Concept.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Background-knowledge/Cluster-Concept.md rename to src/UserGuide/V1.3.3/Background-knowledge/Cluster-Concept.md diff --git a/src/UserGuide/V2.0.1/Tree/Background-knowledge/Cluster-Concept_apache.md b/src/UserGuide/V1.3.3/Background-knowledge/Cluster-Concept_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Background-knowledge/Cluster-Concept_apache.md rename to src/UserGuide/V1.3.3/Background-knowledge/Cluster-Concept_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/Background-knowledge/Cluster-Concept_timecho.md b/src/UserGuide/V1.3.3/Background-knowledge/Cluster-Concept_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Background-knowledge/Cluster-Concept_timecho.md rename to src/UserGuide/V1.3.3/Background-knowledge/Cluster-Concept_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Background-knowledge/Data-Type.md b/src/UserGuide/V1.3.3/Background-knowledge/Data-Type.md similarity index 71% rename from src/UserGuide/V2.0.1/Tree/Background-knowledge/Data-Type.md rename to src/UserGuide/V1.3.3/Background-knowledge/Data-Type.md index bc1f03e1a..03fcf7a6e 100644 --- a/src/UserGuide/V2.0.1/Tree/Background-knowledge/Data-Type.md +++ b/src/UserGuide/V1.3.3/Background-knowledge/Data-Type.md @@ -1,22 +1,19 @@ # Data Type @@ -25,40 +22,41 @@ IoTDB supports the following data types: -* BOOLEAN (Boolean) -* INT32 (Integer) -* INT64 (Long Integer) -* FLOAT (Single Precision Floating Point) -* DOUBLE (Double Precision Floating Point) -* TEXT (Long String) -* STRING(String) -* BLOB(Large binary Object) -* TIMESTAMP(Timestamp) -* DATE(Date) - +- BOOLEAN (Boolean) +- INT32 (Integer) +- INT64 (Long Integer) +- FLOAT (Single Precision Floating Point) +- DOUBLE (Double Precision Floating Point) +- TEXT (Long String) +- STRING(String) +- BLOB(Large binary Object) +- TIMESTAMP(Timestamp) +- DATE(Date) + The difference between STRING and TEXT types is that STRING type has more statistical information and can be used to optimize value filtering queries, while TEXT type is suitable for storing long strings. ### Float Precision -The time series of **FLOAT** and **DOUBLE** type can specify (MAX\_POINT\_NUMBER, see [this page](../SQL-Manual/SQL-Manual.md) for more information on how to specify), which is the number of digits after the decimal point of the floating point number, if the encoding method is [RLE](../Technical-Insider/Encoding-and-Compression.md) or [TS\_2DIFF](../Technical-Insider/Encoding-and-Compression.md). If MAX\_POINT\_NUMBER is not specified, the system will use [float\_precision](../Reference/DataNode-Config-Manual.md) in the configuration file `iotdb-system.properties`. +The time series of **FLOAT** and **DOUBLE** type can specify (MAX_POINT_NUMBER, see [this page](../SQL-Manual/SQL-Manual.md) for more information on how to specify), which is the number of digits after the decimal point of the floating point number, if the encoding method is [RLE](../Technical-Insider/Encoding-and-Compression.md) or [TS_2DIFF](../Technical-Insider/Encoding-and-Compression.md). If MAX_POINT_NUMBER is not specified, the system will use [float_precision](../Reference/DataNode-Config-Manual.md) in the configuration file `iotdb-system.properties`. ```sql CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; ``` -* For Float data value, The data range is (-Integer.MAX_VALUE, Integer.MAX_VALUE), rather than Float.MAX_VALUE, and the max_point_number is 19, caused by the limition of function Math.round(float) in Java. -* For Double data value, The data range is (-Long.MAX_VALUE, Long.MAX_VALUE), rather than Double.MAX_VALUE, and the max_point_number is 19, caused by the limition of function Math.round(double) in Java (Long.MAX_VALUE=9.22E18). +- For Float data value, The data range is (-Integer.MAX_VALUE, Integer.MAX_VALUE), rather than Float.MAX_VALUE, and the max_point_number is 19, caused by the limitation of function Math.round(float) in Java. +- For Double data value, The data range is (-Long.MAX_VALUE, Long.MAX_VALUE), rather than Double.MAX_VALUE, and the max_point_number is 19, caused by the limitation of function Math.round(double) in Java (Long.MAX_VALUE=9.22E18). ### Data Type Compatibility When the written data type is inconsistent with the data type of time-series, + - If the data type of time-series is not compatible with the written data type, the system will give an error message. - If the data type of time-series is compatible with the written data type, the system will automatically convert the data type. The compatibility of each data type is shown in the following table: | Series Data Type | Supported Written Data Types | -|------------------|------------------------------| +| ---------------- | ---------------------------- | | BOOLEAN | BOOLEAN | | INT32 | INT32 | | INT64 | INT32 INT64 | @@ -74,12 +72,10 @@ The timestamp is the time point at which data is produced. It includes absolute Absolute timestamps in IoTDB are divided into two types: LONG and DATETIME (including DATETIME-INPUT and DATETIME-DISPLAY). When a user inputs a timestamp, he can use a LONG type timestamp or a DATETIME-INPUT type timestamp, and the supported formats of the DATETIME-INPUT type timestamp are shown in the table below: -
+::: center **Supported formats of DATETIME-INPUT type timestamp** - - | Format | | :--------------------------: | | yyyy-MM-dd HH:mm:ss | @@ -96,16 +92,14 @@ Absolute timestamps in IoTDB are divided into two types: LONG and DATETIME (incl | yyyy.MM.dd HH:mm:ss.SSSZZ | | ISO8601 standard time format | -
- +::: IoTDB can support LONG types and DATETIME-DISPLAY types when displaying timestamps. The DATETIME-DISPLAY type can support user-defined time formats. The syntax of the custom time format is shown in the table below: -
+::: center **The syntax of the custom time format** - | Symbol | Meaning | Presentation | Examples | | :----: | :-------------------------: | :----------: | :--------------------------------: | | G | era | era | era | @@ -138,25 +132,23 @@ IoTDB can support LONG types and DATETIME-DISPLAY types when displaying timestam | ' | escape for text | delimiter | | | '' | single quote | literal | ' | -
+::: ### Relative timestamp -Relative time refers to the time relative to the server time ```now()``` and ```DATETIME``` time. +Relative time refers to the time relative to the server time `now()` and `DATETIME` time. - Syntax: +Syntax: - ``` - Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ - RelativeTime = (now() | DATETIME) ((+|-) Duration)+ - - ``` +``` +Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ +RelativeTime = (now() | DATETIME) ((+|-) Duration)+ +``` -
+::: center **The syntax of the duration unit** - | Symbol | Meaning | Presentation | Examples | | :----: | :---------: | :----------------------: | :------: | | y | year | 1y=365 days | 1y | @@ -172,13 +164,13 @@ Relative time refers to the time relative to the server time ```now()``` and ``` | us | microsecond | 1us=1000 nanoseconds | 1us | | ns | nanosecond | 1ns=1 nanosecond | 1ns | -
+::: - eg: +eg: - ``` - now() - 1d2h //1 day and 2 hours earlier than the current server time - now() - 1w //1 week earlier than the current server time - ``` +``` +now() - 1d2h //1 day and 2 hours earlier than the current server time +now() - 1w //1 week earlier than the current server time +``` - > Note:There must be spaces on the left and right of '+' and '-'. +> Note:There must be spaces on the left and right of '+' and '-'. diff --git a/src/UserGuide/V2.0.1/Tree/Basic-Concept/Data-Model-and-Terminology.md b/src/UserGuide/V1.3.3/Basic-Concept/Data-Model-and-Terminology.md similarity index 99% rename from src/UserGuide/V2.0.1/Tree/Basic-Concept/Data-Model-and-Terminology.md rename to src/UserGuide/V1.3.3/Basic-Concept/Data-Model-and-Terminology.md index e1aeb3564..7a10118a9 100644 --- a/src/UserGuide/V2.0.1/Tree/Basic-Concept/Data-Model-and-Terminology.md +++ b/src/UserGuide/V1.3.3/Basic-Concept/Data-Model-and-Terminology.md @@ -94,6 +94,7 @@ The following are the constraints on the `nodeName`: If you need to use special characters in the path node name, you can use reverse quotation marks to reference the path node name. For specific usage, please refer to [Reverse Quotation Marks](../Reference/Syntax-Rule.md#reverse-quotation-marks). + ### Path Pattern In order to make it easier and faster to express multiple timeseries paths, IoTDB provides users with the path pattern. Users can construct a path pattern by using wildcard `*` and `**`. Wildcard can appear in any node of the path. diff --git a/src/UserGuide/V2.0.1/Tree/Basic-Concept/Navigating_Time_Series_Data.md b/src/UserGuide/V1.3.3/Basic-Concept/Navigating_Time_Series_Data.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Basic-Concept/Navigating_Time_Series_Data.md rename to src/UserGuide/V1.3.3/Basic-Concept/Navigating_Time_Series_Data.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata.md b/src/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata.md similarity index 99% rename from src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata.md rename to src/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata.md index e0ddf712e..4eb80c594 100644 --- a/src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata.md +++ b/src/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata.md @@ -20,4 +20,4 @@ redirectTo: Operate-Metadata_apache.html specific language governing permissions and limitations under the License. ---> \ No newline at end of file +--> diff --git a/src/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata_apache.md b/src/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata_apache.md rename to src/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata_timecho.md b/src/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata_timecho.md rename to src/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Basic-Concept/Query-Data.md b/src/UserGuide/V1.3.3/Basic-Concept/Query-Data.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Basic-Concept/Query-Data.md rename to src/UserGuide/V1.3.3/Basic-Concept/Query-Data.md diff --git a/src/UserGuide/V2.0.1/Tree/Basic-Concept/Write-Delete-Data.md b/src/UserGuide/V1.3.3/Basic-Concept/Write-Delete-Data.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Basic-Concept/Write-Delete-Data.md rename to src/UserGuide/V1.3.3/Basic-Concept/Write-Delete-Data.md diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/AINode_Deployment_timecho.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/AINode_Deployment_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/AINode_Deployment_timecho.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/AINode_Deployment_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Cluster-Deployment_apache.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Cluster-Deployment_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Cluster-Deployment_apache.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/Cluster-Deployment_apache.md diff --git a/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Cluster-Deployment_timecho.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Cluster-Deployment_timecho.md new file mode 100644 index 000000000..08579e8a7 --- /dev/null +++ b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Cluster-Deployment_timecho.md @@ -0,0 +1,384 @@ + +# Cluster Deployment + +This section describes how to manually deploy an instance that includes 3 ConfigNodes and 3 DataNodes, commonly known as a 3C3D cluster. + +
+ +
+ +## Note + +1. Before installation, ensure that the system is complete by referring to [System configuration](./Environment-Requirements.md) + +2. It is recommended to prioritize using `hostname` for IP configuration during deployment, which can avoid the problem of modifying the host IP in the later stage and causing the database to fail to start. To set the host name, you need to configure /etc/hosts on the target server. For example, if the local IP is 192.168.1.3 and the host name is iotdb-1, you can use the following command to set the server's host name and configure the `cn_internal_address` and `dn_internal_address` of IoTDB using the host name. + ``` shell + echo "192.168.1.3 iotdb-1" >> /etc/hosts + ``` + +3. Some parameters cannot be modified after the first startup. Please refer to the "Parameter Configuration" section below for settings. + +4. Whether in linux or windows, ensure that the IoTDB installation path does not contain Spaces and Chinese characters to avoid software exceptions. + +5. Please note that when installing and deploying IoTDB (including activating and using software), it is necessary to use the same user for operations. You can: +- Using root user (recommended): Using root user can avoid issues such as permissions. +- Using a fixed non root user: + - Using the same user operation: Ensure that the same user is used for start, activation, stop, and other operations, and do not switch users. + - Avoid using sudo: Try to avoid using sudo commands as they execute commands with root privileges, which may cause confusion or security issues. + +6. It is recommended to deploy a monitoring panel, which can monitor important operational indicators and keep track of database operation status at any time. The monitoring panel can be obtained by contacting the business department,The steps for deploying a monitoring panel can refer to:[Monitoring Panel Deployment](./Monitoring-panel-deployment.md) + +## Preparation Steps + +1. Prepare the IoTDB database installation package: iotdb enterprise- {version}-bin.zip(The installation package can be obtained from:[IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_timecho.md)) +2. Configure the operating system environment according to environmental requirements(The system environment configuration can be found in:[Environment Requirement](https://www.timecho.com/docs/UserGuide/latest/Deployment-and-Maintenance/Environment-Requirements.html)) + +## Installation Steps + +Assuming there are three Linux servers now, the IP addresses and service roles are assigned as follows: + +| Node IP | Host Name | Service | +| ----------- | --------- | -------------------- | +| 192.168.1.3 | iotdb-1 | ConfigNode、DataNode | +| 192.168.1.4 | iotdb-2 | ConfigNode、DataNode | +| 192.168.1.5 | iotdb-3 | ConfigNode、DataNode | + +### Set Host Name + +On three machines, configure the host names separately. To set the host names, configure `/etc/hosts` on the target server. Use the following command: + +```Bash +echo "192.168.1.3 iotdb-1" >> /etc/hosts +echo "192.168.1.4 iotdb-2" >> /etc/hosts +echo "192.168.1.5 iotdb-3" >> /etc/hosts +``` + +### Configuration + +Unzip the installation package and enter the installation directory + +```Plain +unzip iotdb-enterprise-{version}-bin.zip +cd iotdb-enterprise-{version}-bin +``` + +#### Environment script configuration + +- `./conf/confignode-env.sh` configuration + + | **Configuration** | **Description** | **Default** | **Recommended value** | **Note** | + | :---------------- | :----------------------------------------------------------- | :---------- | :----------------------------------------------------------- | :---------------------------------- | + | MEMORY_SIZE | The total amount of memory that IoTDB ConfigNode nodes can use | - | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect | + +- `./conf/datanode-env.sh` configuration + + | **Configuration** | **Description** | **Default** | **Recommended value** | **Note** | + | :---------------- | :----------------------------------------------------------- | :---------- | :----------------------------------------------------------- | :---------------------------------- | + | MEMORY_SIZE | The total amount of memory that IoTDB DataNode nodes can use | - | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect | + +#### General Configuration + +Open the general configuration file `./conf/iotdb-system.properties`,The following parameters can be set according to the deployment method: + +| **Configuration** | **Description** | 192.168.1.3 | 192.168.1.4 | 192.168.1.5 | +| ------------------------- | ------------------------------------------------------------ | -------------- | -------------- | -------------- | +| cluster_name | Cluster Name | defaultCluster | defaultCluster | defaultCluster | +| schema_replication_factor | The number of metadata replicas, the number of DataNodes should not be less than this number | 3 | 3 | 3 | +| data_replication_factor | The number of data replicas should not be less than this number of DataNodes | 2 | 2 | 2 | + +#### ConfigNode Configuration + +Open the ConfigNode configuration file `./conf/iotdb-system.properties`,Set the following parameters + +| **Configuration** | **Description** | **Default** | **Recommended value** | 192.168.1.3 | 192.168.1.4 | 192.168.1.5 | Note | +| ------------------- | ------------------------------------------------------------ | --------------- | ------------------------------------------------------------ | ------------- | ------------- | ------------- | ---------------------------------------- | +| cn_internal_address | The address used by ConfigNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | iotdb-1 | iotdb-2 | iotdb-3 | Cannot be modified after initial startup | +| cn_internal_port | The port used by ConfigNode for communication within the cluster | 10710 | 10710 | 10710 | 10710 | 10710 | Cannot be modified after initial startup | +| cn_consensus_port | The port used for ConfigNode replica group consensus protocol communication | 10720 | 10720 | 10720 | 10720 | 10720 | Cannot be modified after initial startup | +| cn_seed_config_node | The address of the ConfigNode that the node connects to when registering to join the cluster, `cn_internal_address:cn_internal_port` | 127.0.0.1:10710 | The first CongfigNode's `cn_internal-address: cn_internal_port` | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | Cannot be modified after initial startup | + +#### DataNode Configuration + +Open DataNode Configuration File `./conf/iotdb-system.properties`,Set the following parameters: + +| **Configuration** | **Description** | **Default** | **Recommended value** | 192.168.1.3 | 192.168.1.4 | 192.168.1.5 | Note | +| ------------------------------- | ------------------------------------------------------------ | --------------- | ------------------------------------------------------------ | ------------- | ------------- | ------------- | ---------------------------------------- | +| dn_rpc_address | The address of the client RPC service | 127.0.0.1 | Recommend using the **IPV4 address or hostname** of the server where it is located | iotdb-1 |iotdb-2 | iotdb-3 | Restarting the service takes effect | +| dn_rpc_port | The port of the client RPC service | 6667 | 6667 | 6667 | 6667 | 6667 | Restarting the service takes effect | +| dn_internal_address | The address used by DataNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | iotdb-1 | iotdb-2 | iotdb-3 | Cannot be modified after initial startup | +| dn_internal_port | The port used by DataNode for communication within the cluster | 10730 | 10730 | 10730 | 10730 | 10730 | Cannot be modified after initial startup | +| dn_mpp_data_exchange_port | The port used by DataNode to receive data streams | 10740 | 10740 | 10740 | 10740 | 10740 | Cannot be modified after initial startup | +| dn_data_region_consensus_port | The port used by DataNode for data replica consensus protocol communication | 10750 | 10750 | 10750 | 10750 | 10750 | Cannot be modified after initial startup | +| dn_schema_region_consensus_port | The port used by DataNode for metadata replica consensus protocol communication | 10760 | 10760 | 10760 | 10760 | 10760 | Cannot be modified after initial startup | +| dn_seed_config_node | The address of the ConfigNode that the node connects to when registering to join the cluster, i.e. `cn_internal-address: cn_internal_port` | 127.0.0.1:10710 | The first CongfigNode's cn_internal-address: cn_internal_port | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | Cannot be modified after initial startup | + +> ❗️Attention: Editors such as VSCode Remote do not have automatic configuration saving function. Please ensure that the modified files are saved persistently, otherwise the configuration items will not take effect + +### Start ConfigNode + +Start the first confignode of IoTDB-1 first, ensuring that the seed confignode node starts first, and then start the second and third confignode nodes in sequence + +```Bash +cd sbin +./start-confignode.sh -d #"- d" parameter will start in the background +``` + +If the startup fails, please refer to [Common Questions](#common-questions). + + +### Activate Database + +#### Method 1: Activate file copy activation + +- After starting three confignode nodes in sequence, copy the `activation` folder of each machine and the `system_info` file of each machine to the Timecho staff; +- The staff will return the license files for each ConfigNode node, where 3 license files will be returned; +- Put the three license files into the `activation` folder of the corresponding ConfigNode node; + +#### Method 2: Activate Script Activation + +- Obtain the machine codes of three machines in sequence, enter the `sbin` directory of the installation directory, and execute the activation script `start activate.sh`: + + ```Bash + cd sbin + ./start-activate.sh + ``` + +- The following information is displayed, where the machine code of one machine is displayed: + + ```Bash + Please copy the system_info's content and send it to Timecho: + 01-KU5LDFFN-PNBEHDRH + Please enter license: + ``` + +- The other two nodes execute the activation script `start activate.sh` in sequence, and then copy the machine codes of the three machines obtained to the Timecho staff +- The staff will return 3 activation codes, which normally correspond to the order of the provided 3 machine codes. Please paste each activation code into the previous command line prompt `Please enter license:`, as shown below: + + ```Bash + Please enter license: + Jw+MmF+Atxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx5bAOXNeob5l+HO5fEMgzrW8OJPh26Vl6ljKUpCvpTiw== + License has been stored to sbin/../activation/license + Import completed. Please start cluster and excute 'show cluster' to verify activation status + ``` + +### Start DataNode + + Enter the `sbin` directory of iotdb and start three datanode nodes in sequence: + +```Bash +cd sbin +./start-datanode.sh -d #"- d" parameter will start in the background +``` + +### Verify Deployment + +Can be executed directly Cli startup script in `./sbin` directory: + +```Plain +./start-cli.sh -h ip(local IP or domain name) -p port(6667) +``` + + After successful startup, the following interface will appear displaying successful installation of IOTDB. + +![](https://alioss.timecho.com/docs/img/%E4%BC%81%E4%B8%9A%E7%89%88%E6%88%90%E5%8A%9F.png) + +After the installation success interface appears, continue to check if the activation is successful and use the `show cluster` command. + +When you see the display of `Activated` on the far right, it indicates successful activation. + +![](https://alioss.timecho.com/docs/img/%E4%BC%81%E4%B8%9A%E7%89%88%E6%BF%80%E6%B4%BB.png) + +> The appearance of `ACTIVATED (W)` indicates passive activation, which means that this Configurable Node does not have a license file (or has not issued the latest license file with a timestamp), and its activation depends on other Activated Configurable Nodes in the cluster. At this point, it is recommended to check if the license file has been placed in the license folder. If not, please place the license file. If a license file already exists, it may be due to inconsistency between the license file of this node and the information of other nodes. Please contact Timecho staff to reapply. + +## Node Maintenance Steps + +### ConfigNode Node Maintenance + +ConfigNode node maintenance is divided into two types of operations: adding and removing ConfigNodes, with two common use cases: +- Cluster expansion: For example, when there is only one ConfigNode in the cluster, and you want to increase the high availability of ConfigNode nodes, you can add two ConfigNodes, making a total of three ConfigNodes in the cluster. +- Cluster failure recovery: When the machine where a ConfigNode is located fails, making the ConfigNode unable to run normally, you can remove this ConfigNode and then add a new ConfigNode to the cluster. + +> ❗️Note, after completing ConfigNode node maintenance, you need to ensure that there are 1 or 3 ConfigNodes running normally in the cluster. Two ConfigNodes do not have high availability, and more than three ConfigNodes will lead to performance loss. + +#### Adding ConfigNode Nodes + +Script command: +```shell +# Linux / MacOS +# First switch to the IoTDB root directory +sbin/start-confignode.sh + +# Windows +# First switch to the IoTDB root directory +sbin/start-confignode.bat +``` + +Parameter introduction: + +| Parameter | Description | Is it required | +| :--- | :--------------------------------------------- | :----------- | +| -v | Show version information | No | +| -f | Run the script in the foreground, do not put it in the background | No | +| -d | Start in daemon mode, i.e. run in the background | No | +| -p | Specify a file to store the process ID for process management | No | +| -c | Specify the path to the configuration file folder, the script will load the configuration file from here | No | +| -g | Print detailed garbage collection (GC) information | No | +| -H | Specify the path of the Java heap dump file, used when JVM memory overflows | No | +| -E | Specify the path of the JVM error log file | No | +| -D | Define system properties, in the format key=value | No | +| -X | Pass -XX parameters directly to the JVM | No | +| -h | Help instruction | No | + +#### Removing ConfigNode Nodes + +First connect to the cluster through the CLI and confirm the internal address and port number of the ConfigNode you want to remove by using `show confignodes`: + +```Bash +IoTDB> show confignodes ++------+-------+---------------+------------+--------+ +|NodeID| Status|InternalAddress|InternalPort| Role| ++------+-------+---------------+------------+--------+ +| 0|Running| 127.0.0.1| 10710| Leader| +| 1|Running| 127.0.0.1| 10711|Follower| +| 2|Running| 127.0.0.1| 10712|Follower| ++------+-------+---------------+------------+--------+ +Total line number = 3 +It costs 0.030s +``` + +Then use the script to remove the DataNode. Script command: + +```Bash +# Linux / MacOS +sbin/remove-confignode.sh [confignode_id] + +#Windows +sbin/remove-confignode.bat [confignode_id] + +``` + +### DataNode Node Maintenance + +There are two common scenarios for DataNode node maintenance: + +- Cluster expansion: For the purpose of expanding cluster capabilities, add new DataNodes to the cluster +- Cluster failure recovery: When a machine where a DataNode is located fails, making the DataNode unable to run normally, you can remove this DataNode and add a new DataNode to the cluster + +> ❗️Note, in order for the cluster to work normally, during the process of DataNode node maintenance and after the maintenance is completed, the total number of DataNodes running normally should not be less than the number of data replicas (usually 2), nor less than the number of metadata replicas (usually 3). + +#### Adding DataNode Nodes + +Script command: + +```Bash +# Linux / MacOS +# First switch to the IoTDB root directory +sbin/start-datanode.sh + +# Windows +# First switch to the IoTDB root directory +sbin/start-datanode.bat +``` + +Parameter introduction: + +| Abbreviation | Description | Is it required | +| :--- | :--------------------------------------------- | :----------- | +| -v | Show version information | No | +| -f | Run the script in the foreground, do not put it in the background | No | +| -d | Start in daemon mode, i.e. run in the background | No | +| -p | Specify a file to store the process ID for process management | No | +| -c | Specify the path to the configuration file folder, the script will load the configuration file from here | No | +| -g | Print detailed garbage collection (GC) information | No | +| -H | Specify the path of the Java heap dump file, used when JVM memory overflows | No | +| -E | Specify the path of the JVM error log file | No | +| -D | Define system properties, in the format key=value | No | +| -X | Pass -XX parameters directly to the JVM | No | +| -h | Help instruction | No | + +Note: After adding a DataNode, as new writes arrive (and old data expires, if TTL is set), the cluster load will gradually balance towards the new DataNode, eventually achieving a balance of storage and computation resources on all nodes. + +#### Removing DataNode Nodes + +First connect to the cluster through the CLI and confirm the RPC address and port number of the DataNode you want to remove with `show datanodes`: + +```Bash +IoTDB> show datanodes ++------+-------+----------+-------+-------------+---------------+ +|NodeID| Status|RpcAddress|RpcPort|DataRegionNum|SchemaRegionNum| ++------+-------+----------+-------+-------------+---------------+ +| 1|Running| 0.0.0.0| 6667| 0| 0| +| 2|Running| 0.0.0.0| 6668| 1| 1| +| 3|Running| 0.0.0.0| 6669| 1| 0| ++------+-------+----------+-------+-------------+---------------+ +Total line number = 3 +It costs 0.110s +``` + +Then use the script to remove the DataNode. Script command: + +```Bash +# Linux / MacOS +sbin/remove-datanode.sh [datanode_id] + +#Windows +sbin/remove-datanode.bat [datanode_id] +``` + +## Common Questions +1. Multiple prompts indicating activation failure during deployment process + - Use the `ls -al` command: Use the `ls -al` command to check if the owner information of the installation package root directory is the current user. + - Check activation directory: Check all files in the `./activation` directory and whether the owner information is the current user. + +2. Confignode failed to start + + Step 1: Please check the startup log to see if any parameters that cannot be changed after the first startup have been modified. + + Step 2: Please check the startup log for any other abnormalities. If there are any abnormal phenomena in the log, please contact Timecho Technical Support personnel for consultation on solutions. + + Step 3: If it is the first deployment or data can be deleted, you can also clean up the environment according to the following steps, redeploy, and restart. + + Step 4: Clean up the environment: + + a. Terminate all ConfigNode Node and DataNode processes. + ```Bash + # 1. Stop the ConfigNode and DataNode services + sbin/stop-standalone.sh + + # 2. Check for any remaining processes + jps + # Or + ps -ef|gerp iotdb + + # 3. If there are any remaining processes, manually kill the + kill -9 + # If you are sure there is only one iotdb on the machine, you can use the following command to clean up residual processes + ps -ef|grep iotdb|grep -v grep|tr -s ' ' ' ' |cut -d ' ' -f2|xargs kill -9 + ``` + b. Delete the data and logs directories. + + Explanation: Deleting the data directory is necessary, deleting the logs directory is for clean logs and is not mandatory. + + ```Bash + cd /data/iotdb + rm -rf data logs + ``` \ No newline at end of file diff --git a/src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Database-Resources.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Database-Resources.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Database-Resources.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/Database-Resources.md diff --git a/src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Docker-Deployment_apache.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Docker-Deployment_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Docker-Deployment_apache.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/Docker-Deployment_apache.md diff --git a/src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Docker-Deployment_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/Docker-Deployment_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Environment-Requirements.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Environment-Requirements.md similarity index 99% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Environment-Requirements.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/Environment-Requirements.md index e286154e1..887bda839 100644 --- a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Environment-Requirements.md +++ b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Environment-Requirements.md @@ -80,6 +80,7 @@ IoTDB supports operating systems such as Linux, Windows, and MacOS, while the en - The system disk needs only the space used by the operating system, and does not need to reserve space for the IoTDB. - Each disk group corresponds to only one partition. Data disks (with multiple disk groups, corresponding to raid) do not need additional partitions. All space is used by the IoTDB. The following table lists the recommended disk partitioning methods. + @@ -119,6 +120,7 @@ The following table lists the recommended disk partitioning methods.
+ ### Network Configuration 1. Disable the firewall diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/IoTDB-Package.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/IoTDB-Package.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/IoTDB-Package.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/IoTDB-Package.md diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/IoTDB-Package_apache.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/IoTDB-Package_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/IoTDB-Package_apache.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/IoTDB-Package_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/IoTDB-Package_timecho.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/IoTDB-Package_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/IoTDB-Package_timecho.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/IoTDB-Package_timecho.md diff --git a/src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Monitoring-panel-deployment.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/Monitoring-panel-deployment.md diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md diff --git a/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md new file mode 100644 index 000000000..a4e3e3c59 --- /dev/null +++ b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md @@ -0,0 +1,220 @@ + +# Stand-Alone Deployment + +This chapter will introduce how to start an IoTDB standalone instance, which includes 1 ConfigNode and 1 DataNode (commonly known as 1C1D). + +## Matters Needing Attention + +1. Before installation, ensure that the system is complete by referring to [System configuration](./Environment-Requirements.md). + +2. It is recommended to prioritize using 'hostname' for IP configuration during deployment, which can avoid the problem of modifying the host IP in the later stage and causing the database to fail to start. To set the host name, you need to configure/etc/hosts on the target server. For example, if the local IP is 192.168.1.3 and the host name is iotdb-1, you can use the following command to set the server's host name and configure IoTDB's' cn_internal-address' using the host name dn_internal_address、dn_rpc_address。 + + ```shell + echo "192.168.1.3 iotdb-1" >> /etc/hosts + ``` + +3. Some parameters cannot be modified after the first startup. Please refer to the "Parameter Configuration" section below for settings. + +4. Whether in linux or windows, ensure that the IoTDB installation path does not contain Spaces and Chinese characters to avoid software exceptions. + +5. Please note that when installing and deploying IoTDB (including activating and using software), it is necessary to use the same user for operations. You can: +- Using root user (recommended): Using root user can avoid issues such as permissions. +- Using a fixed non root user: + - Using the same user operation: Ensure that the same user is used for start, activation, stop, and other operations, and do not switch users. + - Avoid using sudo: Try to avoid using sudo commands as they execute commands with root privileges, which may cause confusion or security issues. + +6. It is recommended to deploy a monitoring panel, which can monitor important operational indicators and keep track of database operation status at any time. The monitoring panel can be obtained by contacting the business department, and the steps for deploying the monitoring panel can be referred to:[Monitoring Board Install and Deploy](./Monitoring-panel-deployment.md). + +## Installation Steps + +### 1、Unzip the installation package and enter the installation directory + +```shell +unzip iotdb-enterprise-{version}-bin.zip +cd iotdb-enterprise-{version}-bin +``` + +### 2、Parameter Configuration + +#### Environment Script Configuration + +- ./conf/confignode-env.sh (./conf/confignode-env.bat) configuration + +| **Configuration** | **Description** | **Default** | **Recommended value** | Note | +| :---------------: | :----------------------------------------------------------: | :---------: | :----------------------------------------------------------: | :---------------------------------: | +| MEMORY_SIZE | The total amount of memory that IoTDB ConfigNode nodes can use | empty | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect | + +- ./conf/datanode-env.sh (./conf/datanode-env.bat) configuration + +| **Configuration** | **Description** | **Default** | **Recommended value** | **Note** | +| :---------: | :----------------------------------: | :--------: | :----------------------------------------------: | :----------: | +| MEMORY_SIZE | The total amount of memory that IoTDB DataNode nodes can use | empty | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect | + +#### System General Configuration + +Open the general configuration file (./conf/iotdb-system. properties file) and set the following parameters: + +| **Configuration** | **Description** | **Default** | **Recommended value** | Note | +| :-----------------------: | :----------------------------------------------------------: | :------------: | :----------------------------------------------------------: | :---------------------------------------------------: | +| cluster_name | Cluster Name | defaultCluster | The cluster name can be set as needed, and if there are no special needs, the default can be kept | Cannot be modified after initial startup | +| schema_replication_factor | Number of metadata replicas, set to 1 for the standalone version here | 1 | 1 | Default 1, cannot be modified after the first startup | +| data_replication_factor | Number of data replicas, set to 1 for the standalone version here | 1 | 1 | Default 1, cannot be modified after the first startup | + +#### ConfigNode Configuration + +Open the ConfigNode configuration file (./conf/iotdb-system. properties file) and set the following parameters: + +| **Configuration** | **Description** | **Default** | **Recommended value** | Note | +| :-----------------: | :----------------------------------------------------------: | :-------------: | :----------------------------------------------------------: | :--------------------------------------: | +| cn_internal_address | The address used by ConfigNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | Cannot be modified after initial startup | +| cn_internal_port | The port used by ConfigNode for communication within the cluster | 10710 | 10710 | Cannot be modified after initial startup | +| cn_consensus_port | The port used for ConfigNode replica group consensus protocol communication | 10720 | 10720 | Cannot be modified after initial startup | +| cn_seed_config_node | The address of the ConfigNode that the node connects to when registering to join the cluster, cn_internal_address:cn_internal_port | 127.0.0.1:10710 | cn_internal_address:cn_internal_port | Cannot be modified after initial startup | + +#### DataNode Configuration + +Open the DataNode configuration file (./conf/iotdb-system. properties file) and set the following parameters: + +| **Configuration** | **Description** | **Default** | **Recommended value** | **Note** | +| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------- | +| dn_rpc_address | The address of the client RPC service | 0.0.0.0 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | Restarting the service takes effect | +| dn_rpc_port | The port of the client RPC service | 6667 | 6667 | Restarting the service takes effect | +| dn_internal_address | The address used by DataNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | Cannot be modified after initial startup | +| dn_internal_port | The port used by DataNode for communication within the cluster | 10730 | 10730 | Cannot be modified after initial startup | +| dn_mpp_data_exchange_port | The port used by DataNode to receive data streams | 10740 | 10740 | Cannot be modified after initial startup | +| dn_data_region_consensus_port | The port used by DataNode for data replica consensus protocol communication | 10750 | 10750 | Cannot be modified after initial startup | +| dn_schema_region_consensus_port | The port used by DataNode for metadata replica consensus protocol communication | 10760 | 10760 | Cannot be modified after initial startup | +| dn_seed_config_node | The ConfigNode address that the node connects to when registering to join the cluster, i.e. cn_internal-address: cn_internal_port | 127.0.0.1:10710 | cn_internal_address:cn_internal_port | Cannot be modified after initial startup | + +> ❗️Attention: Editors such as VSCode Remote do not have automatic configuration saving function. Please ensure that the modified files are saved persistently, otherwise the configuration items will not take effect + +### 3、Start ConfigNode + +Enter the sbin directory of iotdb and start confignode + +```shell +./start-confignode.sh -d #The "- d" parameter will start in the background +``` +If the startup fails, please refer to [Common Questions](#common-questions). + +### 4、Activate Database + +#### Method 1: Activate file copy activation + +- After starting the confignode node, enter the activation folder and copy the systeminfo file to the Timecho staff +- Received the license file returned by the staff +- Place the license file in the activation folder of the corresponding node; + +#### Method 2: Activate Script Activation + +- Obtain the required machine code for activation, enter the sbin directory of the installation directory, and execute the activation script: + +```shell + cd sbin +./start-activate.sh +``` + +- The following information is displayed. Please copy the machine code (i.e. the string of characters) to the Timecho staff: + +```shell +Please copy the system_info's content and send it to Timecho: +01-KU5LDFFN-PNBEHDRH +Please enter license: +``` + +- Enter the activation code returned by the staff into the previous command line prompt 'Please enter license:', as shown below: + +```shell +Please enter license: +JJw+MmF+AtexsfgNGOFgTm83Bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxm6pF+APW1CiXLTSijK9Qh3nsLgzrW8OJPh26Vl6ljKUpCvpTiw== +License has been stored to sbin/../activation/license +Import completed. Please start cluster and excute 'show cluster' to verify activation status +``` + +### 5、Start DataNode + +Enter the sbin directory of iotdb and start datanode: + +```shell +cd sbin +./start-datanode.sh -d # The "- d" parameter will start in the background +``` + +### 6、Verify Deployment + +Can be executed directly/ Cli startup script in sbin directory: + +```shell +./start-cli.sh -h ip(local IP or domain name) -p port(6667) +``` + +After successful startup, the following interface will appear displaying successful installation of IOTDB. + +![](https://alioss.timecho.com/docs/img/%E5%90%AF%E5%8A%A8%E6%88%90%E5%8A%9F.png) + +After the installation success interface appears, continue to check if the activation is successful and use the `show cluster`command + +When you see the display "Activated" on the far right, it indicates successful activation + +![](https://alioss.timecho.com/docs/img/show%20cluster.png) + + +> The appearance of 'Activated (W)' indicates passive activation, indicating that this Config Node does not have a license file (or has not issued the latest license file with a timestamp). At this point, it is recommended to check if the license file has been placed in the license folder. If not, please place the license file. If a license file already exists, it may be due to inconsistency between the license file of this node and the information of other nodes. Please contact Timecho staff to reapply. + +## Common Problem +1. Multiple prompts indicating activation failure during deployment process + - Use the `ls -al` command: Use the `ls -al` command to check if the owner information of the installation package root directory is the current user. + - Check activation directory: Check all files in the `./activation` directory and whether the owner information is the current user. + +2. Confignode failed to start + + Step 1: Please check the startup log to see if any parameters that cannot be changed after the first startup have been modified. + + Step 2: Please check the startup log for any other abnormalities. If there are any abnormal phenomena in the log, please contact Timecho Technical Support personnel for consultation on solutions. + + Step 3: If it is the first deployment or data can be deleted, you can also clean up the environment according to the following steps, redeploy, and restart. + + Step 4: Clean up the environment: + + a. Terminate all ConfigNode Node and DataNode processes. + ```Bash + # 1. Stop the ConfigNode and DataNode services + sbin/stop-standalone.sh + + # 2. Check for any remaining processes + jps + # Or + ps -ef|gerp iotdb + + # 3. If there are any remaining processes, manually kill the + kill -9 + # If you are sure there is only one iotdb on the machine, you can use the following command to clean up residual processes + ps -ef|grep iotdb|grep -v grep|tr -s ' ' ' ' |cut -d ' ' -f2|xargs kill -9 + ``` + b. Delete the data and logs directories. + + Explanation: Deleting the data directory is necessary, deleting the logs directory is for clean logs and is not mandatory. + + ```Bash + cd /data/iotdb + rm -rf data logs + ``` \ No newline at end of file diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/workbench-deployment_timecho.md b/src/UserGuide/V1.3.3/Deployment-and-Maintenance/workbench-deployment_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/workbench-deployment_timecho.md rename to src/UserGuide/V1.3.3/Deployment-and-Maintenance/workbench-deployment_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/DBeaver.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/DBeaver.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/DBeaver.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/DBeaver.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/DataEase.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/DataEase.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/DataEase.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/DataEase.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Flink-IoTDB.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Flink-IoTDB.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Flink-IoTDB.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/Flink-IoTDB.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Flink-TsFile.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Flink-TsFile.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Flink-TsFile.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/Flink-TsFile.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Grafana-Connector.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Grafana-Connector.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Grafana-Connector.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/Grafana-Connector.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Grafana-Plugin.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Grafana-Plugin.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Grafana-Plugin.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/Grafana-Plugin.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Hive-TsFile.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Hive-TsFile.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Hive-TsFile.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/Hive-TsFile.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Ignition-IoTDB-plugin_timecho.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Ignition-IoTDB-plugin_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Ignition-IoTDB-plugin_timecho.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/Ignition-IoTDB-plugin_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Kubernetes_apache.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Kubernetes_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Kubernetes_apache.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/Kubernetes_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Kubernetes_timecho.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Kubernetes_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Kubernetes_timecho.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/Kubernetes_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/NiFi-IoTDB.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/NiFi-IoTDB.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/NiFi-IoTDB.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/NiFi-IoTDB.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Spark-IoTDB.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Spark-IoTDB.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Spark-IoTDB.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/Spark-IoTDB.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Spark-TsFile.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Spark-TsFile.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Spark-TsFile.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/Spark-TsFile.md diff --git a/src/UserGuide/V1.3.3/Ecosystem-Integration/Telegraf.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Telegraf.md new file mode 100644 index 000000000..1c84d9bef --- /dev/null +++ b/src/UserGuide/V1.3.3/Ecosystem-Integration/Telegraf.md @@ -0,0 +1,191 @@ + +# Telegraf + +## 1、Product Overview + +### 1.1 Telegraf + +[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is an open-source proxy tool developed by InfluxData for collecting, processing, and transmitting metric data. + +Telegraf has the following characteristics: + +- Plugin architecture: The strength of Telegraf lies in its extensive plugin ecosystem. It supports multiple input, output, and processor plugins, and can seamlessly integrate with various data sources and targets. + - Data collection: Telegraf excels at collecting metric data from various sources, such as system metrics, logs, databases, etc. Its versatility makes it suitable for monitoring applications, infrastructure, and IoT devices. + - Output target: Once data is collected, it can be sent to various output targets, including popular databases such as InfluxDB. This flexibility allows Telegraf to adapt to different monitoring and analysis settings. +- Easy configuration: Telegraf is configured using TOML files. This simplicity enables users to easily define inputs, outputs, and processors, making customization simple and clear. +- Community and Support: As an open-source project, Telegraf benefits from an active community. Users can contribute plugins, report issues, and seek help through forums and documents. + +### 1.2 Telegraf-IoTDB Plugin + +The Telegraf IoTDB plugin can output and store monitoring information saved in Telegraf to IoTDB. The output plugin uses IoTDB sessions for connection and data writing. + +![](https://alioss.timecho.com/docs/img/telegraf-en.png) + +## 2、Installation Requirements + +Telegraf supports multiple operating systems, including Linux, Windows, and macOS. It is recommended to use 'root' administrator privileges to successfully install Telegraf. Please refer to the installation requirements for specific [Installation Requirements](https://docs.influxdata.com/telegraf/v1/install/) + +## 3、Installation Steps + +Please refer to [Installation Steps](https://docs.influxdata.com/telegraf/v1/install/) for specific installation steps + +- Note: This plugin is a built-in plugin for Telegraf and does not require secondary installation + +## 4、Instructions + +### 4.1 Set Input Source + +Find 'INPUT PLUGINS' in the' telegraf. conf 'configuration file to configure the input source. The specific configuration content is shown in the table below + +| Configuration | Description | Notes | +| ----------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| alias | Example of named plugin | | +| interval |Collect the frequency of this indicator. Ordinary plugins use a single global interval, but if the running frequency of a specific input should be lower or higher, you can configure it here` Interval can be increased to reduce data input rate limitations. | | +| precision | Overlay the settings of the 'precision' proxy. The collected indicators are rounded to the specified precision interval. When this value is set on the service input (e.g. `'statsd':`), the output database may merge multiple events that occur at the same timestamp. | | +| collection_jitter | Overlay the settings of the 'collectic_jitter' proxy. Collection jitter is used to perform random intervals` | | +| name_override | Custom time series path name used when outputting to IoTDB | The output path name must meet the "[Syntax Requirement](../Reference/Syntax-Rule.md)" requirement | +| name_prefix | Specify the prefix attached to the measurement name | | +| name_suffix | Specify the suffix attached to the measurement name | | + +![](https://alioss.timecho.com/docs/img/Telegraf_1.png) + +### 4.2 Set Output Source + +Find "outputs. iotdb" in the "telegraf. conf" configuration file to configure the output source. The specific configuration content is shown in the table below. For specific input source examples, please refer to [Output Source Example](https://docs.influxdata.com/telegraf/v1/configuration/#output-configuration-examples) + +| Configuration | Description | Before Modification | After Modification | Notes | +| ------------------- | -------------- | ----------------------------------- | ------------------------------------------------- | ------------------------------------------------------------ | +| host | Host of IoTDB | # host = "127.0.0.1" | host = "Deploy IoTDB host" | Default is 127.0.0.1 | +| port | The port number of IoTDB | # port = "6667" | port = "Port number for deploying IoTDB" | Default is 6667 | +| user | Username for IoTDB | # user = "root" | user = "Username for IoTDB" |Default as root | +| password | Password for IoTDB | # password = "root" | password= "Password for IoTDB" | Default as root | +| timestamp_precision | Timestamp accuracy | timestamp_precision = "millisecond" | timestamp_precision = "Same timestamp accuracy as IoTDB" | You can check the 'timestamp-precision' field in 'iotdb system. properties' | +| sanitize_tag | Database version | none | sanitize_tag = "0.13/1.0/1.1/1.2/1.3" | | + +![](https://alioss.timecho.com/docs/img/Telegraf_2.png) + +### 4.3 Start Telegraf Service + +```Bash +telegraf -config /path/to/telegraf.conf +``` + +## 5、Example Usage + +The following is an example of collecting CPU data using Telegraf and outputting it to IoTDB using Telegraf IoTDB. Generate configuration files using the telegraf command + +```Bash +telegraf --sample-config --input-filter cpu --output-filter iotdb > cpu_iotdb.conf +``` + +1. Modify the configuration of the input CPU plugin in cpu_iotdb. conf. Among them, the "name_ooverride" field is the custom time-series path name used when outputting to IoTDB + +```Bash +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states + report_active = false + ## If true and the info is available then add core_id and physical_id tags + core_tags = false + name_override = "root.demo.telgraf.cpu" +``` + +2. Modify the configuration of the output iotdb plugin in cpu_iotdb. conf + +| Configuration | Description | Before Modification | After Modification | Notes | +| ------------------- | -------------- | ----------------------------------- | ------------------------------------------------- | ------------------------------------------------------------ | +| host | Host of IoTDB | # host = "127.0.0.1" | host = "Deploy IoTDB host" | Default is 127.0.0.1 | +| port | The port number of IoTDB | # port = "6667" | port = "Port number for deploying IoTDB" | Default is 6667 | +| user | Username for IoTDB | # user = "root" | user = "Username for IoTDB" |Default as root | +| password | Password for IoTDB | # password = "root" | password= "Password for IoTDB" | Default as root | +| timestamp_precision | Timestamp accuracy | timestamp_precision = "millisecond" | timestamp_precision = "Same timestamp accuracy as IoTDB" | You can check the 'timestamp-precision' field in 'iotdb system. properties' | +| sanitize_tag | Database version | none | sanitize_tag = "0.13/1.0/1.1/1.2/1.3" | | + +```Bash +# Save metrics to an IoTDB Database +[[outputs.iotdb]] + ## Configuration of IoTDB server connection + host = "127.0.0.1" + # port = "6667" + + ## Configuration of authentication + # user = "root" + # password = "root" + + ## Timeout to open a new session. + ## A value of zero means no timeout. + # timeout = "5s" + + ## Configuration of type conversion for 64-bit unsigned int + ## IoTDB currently DOES NOT support unsigned integers (version 13.x). + ## 32-bit unsigned integers are safely converted into 64-bit signed integers by the plugin, + ## however, this is not true for 64-bit values in general as overflows may occur. + ## The following setting allows to specify the handling of 64-bit unsigned integers. + ## Available values are: + ## - "int64" -- convert to 64-bit signed integers and accept overflows + ## - "int64_clip" -- convert to 64-bit signed integers and clip the values on overflow to 9,223,372,036,854,775,807 + ## - "text" -- convert to the string representation of the value + # uint64_conversion = "int64_clip" + + ## Configuration of TimeStamp + ## TimeStamp is always saved in 64bits int. timestamp_precision specifies the unit of timestamp. + ## Available value: + ## "second", "millisecond", "microsecond", "nanosecond"(default) + timestamp_precision = "millisecond" + + ## Handling of tags + ## Tags are not fully supported by IoTDB. + ## A guide with suggestions on how to handle tags can be found here: + ## https://iotdb.apache.org/UserGuide/Master/API/InfluxDB-Protocol.html + ## + ## Available values are: + ## - "fields" -- convert tags to fields in the measurement + ## - "device_id" -- attach tags to the device ID + ## + ## For Example, a metric named "root.sg.device" with the tags `tag1: "private"` and `tag2: "working"` and + ## fields `s1: 100` and `s2: "hello"` will result in the following representations in IoTDB + ## - "fields" -- root.sg.device, s1=100, s2="hello", tag1="private", tag2="working" + ## - "device_id" -- root.sg.device.private.working, s1=100, s2="hello" + # convert_tags_to = "device_id" + ## Handling of unsupported characters + ## Some characters in different versions of IoTDB are not supported in path name + ## A guide with suggetions on valid paths can be found here: + ## for iotdb 0.13.x -> https://iotdb.apache.org/UserGuide/V0.13.x/Reference/Syntax-Conventions.html#identifiers + ## for iotdb 1.x.x and above -> https://iotdb.apache.org/UserGuide/V1.3.x/User-Manual/Syntax-Rule.html#identifier + ## + ## Available values are: + ## - "1.0", "1.1", "1.2", "1.3" -- enclose in `` the world having forbidden character + ## such as @ $ # : [ ] { } ( ) space + ## - "0.13" -- enclose in `` the world having forbidden character + ## such as space + ## + ## Keep this section commented if you don't want to sanitize the path + sanitize_tag = "1.3" +``` + +3. Run Telegraf using the cpu_iotdb.exe configuration file: After running for a period of time, the data collected and reported by Telegraf can be queried in IoTDB \ No newline at end of file diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Thingsboard.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Thingsboard.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Thingsboard.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/Thingsboard.md diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Zeppelin-IoTDB.md b/src/UserGuide/V1.3.3/Ecosystem-Integration/Zeppelin-IoTDB.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Zeppelin-IoTDB.md rename to src/UserGuide/V1.3.3/Ecosystem-Integration/Zeppelin-IoTDB.md diff --git a/src/UserGuide/V2.0.1/Tree/FAQ/Frequently-asked-questions.md b/src/UserGuide/V1.3.3/FAQ/Frequently-asked-questions.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/FAQ/Frequently-asked-questions.md rename to src/UserGuide/V1.3.3/FAQ/Frequently-asked-questions.md diff --git a/src/UserGuide/V2.0.1/Tree/IoTDB-Introduction/IoTDB-Introduction_apache.md b/src/UserGuide/V1.3.3/IoTDB-Introduction/IoTDB-Introduction_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/IoTDB-Introduction/IoTDB-Introduction_apache.md rename to src/UserGuide/V1.3.3/IoTDB-Introduction/IoTDB-Introduction_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/IoTDB-Introduction/IoTDB-Introduction_timecho.md b/src/UserGuide/V1.3.3/IoTDB-Introduction/IoTDB-Introduction_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/IoTDB-Introduction/IoTDB-Introduction_timecho.md rename to src/UserGuide/V1.3.3/IoTDB-Introduction/IoTDB-Introduction_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/IoTDB-Introduction/Scenario.md b/src/UserGuide/V1.3.3/IoTDB-Introduction/Scenario.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/IoTDB-Introduction/Scenario.md rename to src/UserGuide/V1.3.3/IoTDB-Introduction/Scenario.md diff --git a/src/UserGuide/V2.0.1/Table/QuickStart/QuickStart.md b/src/UserGuide/V1.3.3/QuickStart/QuickStart.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/QuickStart/QuickStart.md rename to src/UserGuide/V1.3.3/QuickStart/QuickStart.md diff --git a/src/UserGuide/V2.0.1/Tree/QuickStart/QuickStart_apache.md b/src/UserGuide/V1.3.3/QuickStart/QuickStart_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/QuickStart/QuickStart_apache.md rename to src/UserGuide/V1.3.3/QuickStart/QuickStart_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/QuickStart/QuickStart_timecho.md b/src/UserGuide/V1.3.3/QuickStart/QuickStart_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/QuickStart/QuickStart_timecho.md rename to src/UserGuide/V1.3.3/QuickStart/QuickStart_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Reference/Common-Config-Manual.md b/src/UserGuide/V1.3.3/Reference/Common-Config-Manual.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Reference/Common-Config-Manual.md rename to src/UserGuide/V1.3.3/Reference/Common-Config-Manual.md diff --git a/src/UserGuide/V2.0.1/Tree/Reference/ConfigNode-Config-Manual.md b/src/UserGuide/V1.3.3/Reference/ConfigNode-Config-Manual.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Reference/ConfigNode-Config-Manual.md rename to src/UserGuide/V1.3.3/Reference/ConfigNode-Config-Manual.md diff --git a/src/UserGuide/V2.0.1/Tree/Reference/DataNode-Config-Manual.md b/src/UserGuide/V1.3.3/Reference/DataNode-Config-Manual.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Reference/DataNode-Config-Manual.md rename to src/UserGuide/V1.3.3/Reference/DataNode-Config-Manual.md diff --git a/src/UserGuide/V2.0.1/Tree/Reference/DataNode-Config-Manual_apache.md b/src/UserGuide/V1.3.3/Reference/DataNode-Config-Manual_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Reference/DataNode-Config-Manual_apache.md rename to src/UserGuide/V1.3.3/Reference/DataNode-Config-Manual_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/Reference/DataNode-Config-Manual_timecho.md b/src/UserGuide/V1.3.3/Reference/DataNode-Config-Manual_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Reference/DataNode-Config-Manual_timecho.md rename to src/UserGuide/V1.3.3/Reference/DataNode-Config-Manual_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Reference/Keywords.md b/src/UserGuide/V1.3.3/Reference/Keywords.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Reference/Keywords.md rename to src/UserGuide/V1.3.3/Reference/Keywords.md diff --git a/src/UserGuide/V2.0.1/Tree/Reference/Modify-Config-Manual.md b/src/UserGuide/V1.3.3/Reference/Modify-Config-Manual.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Reference/Modify-Config-Manual.md rename to src/UserGuide/V1.3.3/Reference/Modify-Config-Manual.md diff --git a/src/UserGuide/V2.0.1/Tree/Reference/Status-Codes.md b/src/UserGuide/V1.3.3/Reference/Status-Codes.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Reference/Status-Codes.md rename to src/UserGuide/V1.3.3/Reference/Status-Codes.md diff --git a/src/UserGuide/V2.0.1/Tree/Reference/Syntax-Rule.md b/src/UserGuide/V1.3.3/Reference/Syntax-Rule.md similarity index 99% rename from src/UserGuide/V2.0.1/Tree/Reference/Syntax-Rule.md rename to src/UserGuide/V1.3.3/Reference/Syntax-Rule.md index 38dffc6ac..40d858e28 100644 --- a/src/UserGuide/V2.0.1/Tree/Reference/Syntax-Rule.md +++ b/src/UserGuide/V1.3.3/Reference/Syntax-Rule.md @@ -172,6 +172,7 @@ Below are basic constraints of identifiers, specific identifiers may have other - [0-9 a-z A-Z _ ] (letters, digits and underscore) - ['\u2E80'..'\u9FFF'] (UNICODE Chinese characters) + ### Reverse quotation marks **If the following situations occur, the identifier needs to be quoted using reverse quotes:** @@ -278,4 +279,5 @@ create device template `t1't"t` ```sql `root.db.*` - ``` \ No newline at end of file + ``` + diff --git a/src/UserGuide/V2.0.1/Tree/SQL-Manual/Function-and-Expression.md b/src/UserGuide/V1.3.3/SQL-Manual/Function-and-Expression.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/SQL-Manual/Function-and-Expression.md rename to src/UserGuide/V1.3.3/SQL-Manual/Function-and-Expression.md diff --git a/src/UserGuide/V2.0.1/Tree/SQL-Manual/Operator-and-Expression.md b/src/UserGuide/V1.3.3/SQL-Manual/Operator-and-Expression.md similarity index 97% rename from src/UserGuide/V2.0.1/Tree/SQL-Manual/Operator-and-Expression.md rename to src/UserGuide/V1.3.3/SQL-Manual/Operator-and-Expression.md index 1b6fd667f..c6fec7f61 100644 --- a/src/UserGuide/V2.0.1/Tree/SQL-Manual/Operator-and-Expression.md +++ b/src/UserGuide/V1.3.3/SQL-Manual/Operator-and-Expression.md @@ -25,7 +25,7 @@ This chapter describes the operators and functions supported by IoTDB. IoTDB pro A list of all available functions, both built-in and custom, can be displayed with `SHOW FUNCTIONS` command. -See the documentation [Select-Expression](../SQL-Manual/Function-and-Expression.md#selector-functions) for the behavior of operators and functions in SQL. +See the documentation [Select-Expression](./Function-and-Expression.md#selector-functions) for the behavior of operators and functions in SQL. ## OPERATORS @@ -41,7 +41,7 @@ See the documentation [Select-Expression](../SQL-Manual/Function-and-Expression. | `+` | addition | | `-` | subtraction | -For details and examples, see the document [Arithmetic Operators and Functions](../SQL-Manual/Function-and-Expression.md#arithmetic-functions). +For details and examples, see the document [Arithmetic Operators and Functions](./Function-and-Expression.md#arithmetic-functions). ### Comparison Operators @@ -64,7 +64,7 @@ For details and examples, see the document [Arithmetic Operators and Functions]( | `IN` / `CONTAINS` | is a value in the specified list | | `NOT IN` / `NOT CONTAINS` | is not a value in the specified list | -For details and examples, see the document [Comparison Operators and Functions](../SQL-Manual/Function-and-Expression.md#comparison-operators-and-functions). +For details and examples, see the document [Comparison Operators and Functions](./Function-and-Expression.md#comparison-operators-and-functions). ### Logical Operators @@ -74,7 +74,7 @@ For details and examples, see the document [Comparison Operators and Functions]( | `AND` / `&` / `&&` | logical AND | | `OR`/ | / || | logical OR | -For details and examples, see the document [Logical Operators](../SQL-Manual/Function-and-Expression.md#logical-operators). +For details and examples, see the document [Logical Operators](./Function-and-Expression.md#logical-operators). ### Operator Precedence @@ -123,7 +123,7 @@ The built-in functions can be used in IoTDB without registration, and the functi | MAX_BY | MAX_BY(x, y) returns the value of x corresponding to the maximum value of the input y. MAX_BY(time, x) returns the timestamp when x is at its maximum value. | The first input x can be of any type, while the second input y must be of type INT32, INT64, FLOAT, DOUBLE, STRING, TIMESTAMP or DATE. | / | Consistent with the data type of the first input x. | | MIN_BY | MIN_BY(x, y) returns the value of x corresponding to the minimum value of the input y. MIN_BY(time, x) returns the timestamp when x is at its minimum value. | The first input x can be of any type, while the second input y must be of type INT32, INT64, FLOAT, DOUBLE, STRING, TIMESTAMP or DATE. | / | Consistent with the data type of the first input x. | -For details and examples, see the document [Aggregate Functions](../SQL-Manual/Function-and-Expression.md#aggregate-functions). +For details and examples, see the document [Aggregate Functions](./Function-and-Expression.md#aggregate-functions). ### Arithmetic Functions @@ -150,7 +150,7 @@ For details and examples, see the document [Aggregate Functions](../SQL-Manual/F | LOG10 | INT32 / INT64 / FLOAT / DOUBLE | DOUBLE | / | Math#log10(double) | | SQRT | INT32 / INT64 / FLOAT / DOUBLE | DOUBLE | / | Math#sqrt(double) | -For details and examples, see the document [Arithmetic Operators and Functions](../SQL-Manual/Function-and-Expression.md#arithmetic-operators-and-functions). +For details and examples, see the document [Arithmetic Operators and Functions](./Function-and-Expression.md#arithmetic-operators-and-functions). ### Comparison Functions @@ -159,7 +159,7 @@ For details and examples, see the document [Arithmetic Operators and Functions]( | ON_OFF | INT32 / INT64 / FLOAT / DOUBLE | `threshold`: a double type variate | BOOLEAN | Return `ts_value >= threshold`. | | IN_RANGR | INT32 / INT64 / FLOAT / DOUBLE | `lower`: DOUBLE type `upper`: DOUBLE type | BOOLEAN | Return `ts_value >= lower && value <= upper`. | -For details and examples, see the document [Comparison Operators and Functions](../SQL-Manual/Function-and-Expression.md#comparison-operators-and-functions). +For details and examples, see the document [Comparison Operators and Functions](./Function-and-Expression.md#comparison-operators-and-functions). ### String Processing Functions @@ -179,7 +179,7 @@ For details and examples, see the document [Comparison Operators and Functions]( | TRIM | TEXT STRING | / | TEXT | Get the string whose value is same to input series, with all leading and trailing space removed. | | STRCMP | TEXT STRING | / | TEXT | Get the compare result of two input series. Returns `0` if series value are the same, a `negative integer` if value of series1 is smaller than series2,
a `positive integer` if value of series1 is more than series2. | -For details and examples, see the document [String Processing](../SQL-Manual/Function-and-Expression.md#string-processing). +For details and examples, see the document [String Processing](./Function-and-Expression.md#string-processing). ### Data Type Conversion Function @@ -187,7 +187,7 @@ For details and examples, see the document [String Processing](../SQL-Manual/Fun | ------------- | ------------------------------------------------------------ | ----------------------- | ------------------------------------------------------------ | | CAST | `type`: Output data type, INT32 / INT64 / FLOAT / DOUBLE / BOOLEAN / TEXT | determined by `type` | Convert the data to the type specified by the `type` parameter. | -For details and examples, see the document [Data Type Conversion Function](../SQL-Manual/Function-and-Expression.md#data-type-conversion-function). +For details and examples, see the document [Data Type Conversion Function](./Function-and-Expression.md#data-type-conversion-function). ### Constant Timeseries Generating Functions @@ -197,7 +197,7 @@ For details and examples, see the document [Data Type Conversion Function](../SQ | PI | None | DOUBLE | Data point value: a `double` value of `π`, the ratio of the circumference of a circle to its diameter, which is equals to `Math.PI` in the *Java Standard Library*. | | E | None | DOUBLE | Data point value: a `double` value of `e`, the base of the natural logarithms, which is equals to `Math.E` in the *Java Standard Library*. | -For details and examples, see the document [Constant Timeseries Generating Functions](../SQL-Manual/Function-and-Expression.md#constant-timeseries-generating-functions). +For details and examples, see the document [Constant Timeseries Generating Functions](./Function-and-Expression.md#constant-timeseries-generating-functions). ### Selector Functions @@ -206,7 +206,7 @@ For details and examples, see the document [Constant Timeseries Generating Funct | TOP_K | INT32 / INT64 / FLOAT / DOUBLE / TEXT / STRING / DATE / TIEMSTAMP | `k`: the maximum number of selected data points, must be greater than 0 and less than or equal to 1000 | Same type as the input series | Returns `k` data points with the largest values in a time series. | | BOTTOM_K | INT32 / INT64 / FLOAT / DOUBLE / TEXT / STRING / DATE / TIEMSTAMP | `k`: the maximum number of selected data points, must be greater than 0 and less than or equal to 1000 | Same type as the input series | Returns `k` data points with the smallest values in a time series. | -For details and examples, see the document [Selector Functions](../SQL-Manual/Function-and-Expression.md#selector-functions). +For details and examples, see the document [Selector Functions](./Function-and-Expression.md#selector-functions). ### Continuous Interval Functions @@ -217,7 +217,7 @@ For details and examples, see the document [Selector Functions](../SQL-Manual/Fu | ZERO_COUNT | INT32/ INT64/ FLOAT/ DOUBLE/ BOOLEAN | `min`:Optional with default value `1L` `max`:Optional with default value `Long.MAX_VALUE` | Long | Return intervals' start times and the number of data points in the interval in which the value is always 0(false). Data points number `n` satisfy `n >= min && n <= max` | | NON_ZERO_COUNT | INT32/ INT64/ FLOAT/ DOUBLE/ BOOLEAN | `min`:Optional with default value `1L` `max`:Optional with default value `Long.MAX_VALUE` | Long | Return intervals' start times and the number of data points in the interval in which the value is always not 0(false). Data points number `n` satisfy `n >= min && n <= max` | -For details and examples, see the document [Continuous Interval Functions](../SQL-Manual/Function-and-Expression.md#continuous-interval-functions). +For details and examples, see the document [Continuous Interval Functions](./Function-and-Expression.md#continuous-interval-functions). ### Variation Trend Calculation Functions @@ -230,7 +230,7 @@ For details and examples, see the document [Continuous Interval Functions](../SQ | NON_NEGATIVE_DERIVATIVE | INT32 / INT64 / FLOAT / DOUBLE | / | DOUBLE | Calculates the absolute value of the rate of change of a data point compared to the previous data point, the result is equals to NON_NEGATIVE_DIFFERENCE / TIME_DIFFERENCE. There is no corresponding output for the first data point. | | DIFF | INT32 / INT64 / FLOAT / DOUBLE | `ignoreNull`:optional,default is true. If is true, the previous data point is ignored when it is null and continues to find the first non-null value forwardly. If the value is false, previous data point is not ignored when it is null, the result is also null because null is used for subtraction | DOUBLE | Calculates the difference between the value of a data point and the value of the previous data point. There is no corresponding output for the first data point, so output is null | -For details and examples, see the document [Variation Trend Calculation Functions](../SQL-Manual/Function-and-Expression.md#variation-trend-calculation-functions). +For details and examples, see the document [Variation Trend Calculation Functions](./Function-and-Expression.md#variation-trend-calculation-functions). ### Sample Functions @@ -250,7 +250,7 @@ For details and examples, see the document [Sample Functions](../SQL-Manual/Func | ------------- | ------------------------------- | ------------------- | ----------------------------- | ----------------------------------------------------------- | | CHANGE_POINTS | INT32 / INT64 / FLOAT / DOUBLE | / | Same type as the input series | Remove consecutive identical values from an input sequence. | -For details and examples, see the document [Time-Series](../SQL-Manual/Function-and-Expression.md#time-series-processing). +For details and examples, see the document [Time-Series](./Function-and-Expression.md#time-series-processing). ## LAMBDA EXPRESSION @@ -259,7 +259,7 @@ For details and examples, see the document [Time-Series](../SQL-Manual/Function- | ------------- | ----------------------------------------------- | ------------------------------------------------------------ | ----------------------------------------------- | ------------------------------------------------------------ | | JEXL | INT32 / INT64 / FLOAT / DOUBLE / TEXT / BOOLEAN | `expr` is a lambda expression that supports standard one or multi arguments in the form `x -> {...}` or `(x, y, z) -> {...}`, e.g. `x -> {x * 2}`, `(x, y, z) -> {x + y * z}` | INT32 / INT64 / FLOAT / DOUBLE / TEXT / BOOLEAN | Returns the input time series transformed by a lambda expression | -For details and examples, see the document [Lambda](../SQL-Manual/Function-and-Expression.md#lambda-expression). +For details and examples, see the document [Lambda](./Function-and-Expression.md#lambda-expression). ## CONDITIONAL EXPRESSION @@ -267,7 +267,7 @@ For details and examples, see the document [Lambda](../SQL-Manual/Function-and-E | --------------- | -------------------- | | `CASE` | similar to "if else" | -For details and examples, see the document [Conditional Expressions](../SQL-Manual/Function-and-Expression.md#conditional-expressions). +For details and examples, see the document [Conditional Expressions](./Function-and-Expression.md#conditional-expressions). ## SELECT EXPRESSION @@ -322,7 +322,7 @@ Aggregate functions are many-to-one functions. They perform aggregate calculatio > select a, count(a) from root.sg group by ([10,100),10ms) > ``` -For the aggregation functions supported by IoTDB, see the document [Aggregate Functions](../SQL-Manual/Function-and-Expression.md#aggregate-functions). +For the aggregation functions supported by IoTDB, see the document [Aggregate Functions](./Function-and-Expression.md#aggregate-functions). #### Time Series Generation Function diff --git a/src/UserGuide/V2.0.1/Tree/SQL-Manual/SQL-Manual.md b/src/UserGuide/V1.3.3/SQL-Manual/SQL-Manual.md similarity index 97% rename from src/UserGuide/V2.0.1/Tree/SQL-Manual/SQL-Manual.md rename to src/UserGuide/V1.3.3/SQL-Manual/SQL-Manual.md index 2a078041c..4ac977278 100644 --- a/src/UserGuide/V2.0.1/Tree/SQL-Manual/SQL-Manual.md +++ b/src/UserGuide/V1.3.3/SQL-Manual/SQL-Manual.md @@ -602,7 +602,7 @@ IoTDB > select avg(temperature), from root.ln.wf01.wt01; IoTDB > select avg(*), - (avg(*) + 1) * 3 / 2 -1 + (avg(*) + 1) * 3 / 2 -1 from root.sg1 IoTDB > select avg(temperature), @@ -1090,11 +1090,11 @@ select change_points(s1), change_points(s2), change_points(s3), change_points(s4 ## DATA QUALITY FUNCTION LIBRARY -For more details, see document [Operator-and-Expression](../SQL-Manual/UDF-Libraries.md). +For more details, see document [Operator-and-Expression](./UDF-Libraries.md). ### Data Quality -For details and examples, see the document [Data-Quality](../SQL-Manual/UDF-Libraries.md#data-quality). +For details and examples, see the document [Data-Quality](./UDF-Libraries.md#data-quality). ```sql # Completeness @@ -1119,7 +1119,7 @@ select Accuracy(t1,t2,t3,m1,m2,m3) from root.test ### Data Profiling -For details and examples, see the document [Data-Profiling](../SQL-Manual/UDF-Libraries.md#data-profiling). +For details and examples, see the document [Data-Profiling](./UDF-Libraries.md#data-profiling). ```sql # ACF @@ -1199,7 +1199,7 @@ select zscore(s1) from root.test ### Anomaly Detection -For details and examples, see the document [Anomaly-Detection](../SQL-Manual/UDF-Libraries.md#anomaly-detection). +For details and examples, see the document [Anomaly-Detection](./UDF-Libraries.md#anomaly-detection). ```sql # IQR @@ -1234,7 +1234,7 @@ select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3 ### Frequency Domain -For details and examples, see the document [Frequency-Domain](../SQL-Manual/UDF-Libraries.md#frequency-domain-analysis). +For details and examples, see the document [Frequency-Domain](./UDF-Libraries.md#frequency-domain-analysis). ```sql # Conv @@ -1266,7 +1266,7 @@ select envelope(s1) from root.test.d1 ### Data Matching -For details and examples, see the document [Data-Matching](../SQL-Manual/UDF-Libraries.md#data-matching). +For details and examples, see the document [Data-Matching](./UDF-Libraries.md#data-matching). ```sql # Cov @@ -1287,7 +1287,7 @@ select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 ### Data Repairing -For details and examples, see the document [Data-Repairing](../SQL-Manual/UDF-Libraries.md#data-repairing). +For details and examples, see the document [Data-Repairing](./UDF-Libraries.md#data-repairing). ```sql # TimestampRepair @@ -1312,7 +1312,7 @@ select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 ### Series Discovery -For details and examples, see the document [Series-Discovery](../SQL-Manual/UDF-Libraries.md#series-discovery). +For details and examples, see the document [Series-Discovery](./UDF-Libraries.md#series-discovery). ```sql # ConsecutiveSequences @@ -1325,7 +1325,7 @@ select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 ### Machine Learning -For details and examples, see the document [Machine-Learning](../SQL-Manual/UDF-Libraries.md#machine-learning). +For details and examples, see the document [Machine-Learning](./UDF-Libraries.md#machine-learning). ```sql # AR @@ -1340,7 +1340,7 @@ select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0 ## LAMBDA EXPRESSION -For details and examples, see the document [Lambda](../SQL-Manual/UDF-Libraries.md#lambda-expression). +For details and examples, see the document [Lambda](./UDF-Libraries.md#lambda-expression). ```sql select jexl(temperature, 'expr'='x -> {x + x}') as jexl1, jexl(temperature, 'expr'='x -> {x * 3}') as jexl2, jexl(temperature, 'expr'='x -> {x * x}') as jexl3, jexl(temperature, 'expr'='x -> {multiply(x, 100)}') as jexl4, jexl(temperature, st, 'expr'='(x, y) -> {x + y}') as jexl5, jexl(temperature, st, str, 'expr'='(x, y, z) -> {x + y + z}') as jexl6 from root.ln.wf01.wt01;``` @@ -1348,7 +1348,7 @@ select jexl(temperature, 'expr'='x -> {x + x}') as jexl1, jexl(temperature, 'exp ## CONDITIONAL EXPRESSION -For details and examples, see the document [Conditional Expressions](../SQL-Manual/UDF-Libraries.md#conditional-expressions). +For details and examples, see the document [Conditional Expressions](./UDF-Libraries.md#conditional-expressions). ```sql select T, P, case @@ -1548,7 +1548,7 @@ CQs can't be altered once they're created. To change a CQ, you must `DROP` and r ## USER-DEFINED FUNCTION (UDF) -For more details, see document [Operator-and-Expression](../SQL-Manual/UDF-Libraries.md). +For more details, see document [Operator-and-Expression](./UDF-Libraries.md). ### UDF Registration diff --git a/src/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries.md b/src/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries.md rename to src/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries.md diff --git a/src/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries_apache.md b/src/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries_apache.md similarity index 99% rename from src/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries_apache.md rename to src/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries_apache.md index c2a0dcd54..8bab853b8 100644 --- a/src/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries_apache.md +++ b/src/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries_apache.md @@ -37,7 +37,7 @@ Based on the ability of user-defined functions, IoTDB provides a series of funct | apache-UDF-1.3.2.zip | V1.0.0~V1.3.2 | Please contact Timecho for assistance| 2. Place the library-udf.jar file in the compressed file obtained in the directory `/ext/udf ` of all nodes in the IoTDB cluster -3. In the SQL operation interface of IoTDB's SQL command line terminal (CLI), execute the corresponding function registration statement as follows. +3. In the SQL operation interface of IoTDB's SQL command line terminal (CLI), execute the corresponding function registration statement as follows. 4. Batch registration: Two registration methods: registration script or SQL full statement - Register Script - Copy the registration script (register-UDF.sh or register-UDF.bat) from the compressed package to the `tools` directory of IoTDB as needed, and modify the parameters in the script (default is host=127.0.0.1, rpcPort=6667, user=root, pass=root); @@ -46,6 +46,7 @@ Based on the ability of user-defined functions, IoTDB provides a series of funct - All SQL statements - Open the SQl file in the compressed package, copy all SQL statements, and in the SQL operation interface of IoTDB's SQL command line terminal (CLI), execute all SQl statements to batch register UDFs + ## Data Quality ### Completeness diff --git a/src/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries_timecho.md b/src/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries_timecho.md rename to src/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Technical-Insider/Cluster-data-partitioning.md b/src/UserGuide/V1.3.3/Technical-Insider/Cluster-data-partitioning.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Technical-Insider/Cluster-data-partitioning.md rename to src/UserGuide/V1.3.3/Technical-Insider/Cluster-data-partitioning.md diff --git a/src/UserGuide/V2.0.1/Tree/Technical-Insider/Encoding-and-Compression.md b/src/UserGuide/V1.3.3/Technical-Insider/Encoding-and-Compression.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Technical-Insider/Encoding-and-Compression.md rename to src/UserGuide/V1.3.3/Technical-Insider/Encoding-and-Compression.md diff --git a/src/UserGuide/V2.0.1/Tree/Technical-Insider/Publication.md b/src/UserGuide/V1.3.3/Technical-Insider/Publication.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Technical-Insider/Publication.md rename to src/UserGuide/V1.3.3/Technical-Insider/Publication.md diff --git a/src/UserGuide/V2.0.1/Tree/Tools-System/Benchmark.md b/src/UserGuide/V1.3.3/Tools-System/Benchmark.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Tools-System/Benchmark.md rename to src/UserGuide/V1.3.3/Tools-System/Benchmark.md diff --git a/src/UserGuide/V2.0.1/Tree/Tools-System/CLI.md b/src/UserGuide/V1.3.3/Tools-System/CLI.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Tools-System/CLI.md rename to src/UserGuide/V1.3.3/Tools-System/CLI.md diff --git a/src/UserGuide/V2.0.1/Tree/Tools-System/Data-Export-Tool.md b/src/UserGuide/V1.3.3/Tools-System/Data-Export-Tool.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Tools-System/Data-Export-Tool.md rename to src/UserGuide/V1.3.3/Tools-System/Data-Export-Tool.md diff --git a/src/UserGuide/V2.0.1/Tree/Tools-System/Data-Import-Tool.md b/src/UserGuide/V1.3.3/Tools-System/Data-Import-Tool.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Tools-System/Data-Import-Tool.md rename to src/UserGuide/V1.3.3/Tools-System/Data-Import-Tool.md diff --git a/src/UserGuide/V2.0.1/Tree/Tools-System/Maintenance-Tool_apache.md b/src/UserGuide/V1.3.3/Tools-System/Maintenance-Tool_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Tools-System/Maintenance-Tool_apache.md rename to src/UserGuide/V1.3.3/Tools-System/Maintenance-Tool_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/Tools-System/Maintenance-Tool_timecho.md b/src/UserGuide/V1.3.3/Tools-System/Maintenance-Tool_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Tools-System/Maintenance-Tool_timecho.md rename to src/UserGuide/V1.3.3/Tools-System/Maintenance-Tool_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Tools-System/Monitor-Tool_apache.md b/src/UserGuide/V1.3.3/Tools-System/Monitor-Tool_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Tools-System/Monitor-Tool_apache.md rename to src/UserGuide/V1.3.3/Tools-System/Monitor-Tool_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/Tools-System/Monitor-Tool_timecho.md b/src/UserGuide/V1.3.3/Tools-System/Monitor-Tool_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Tools-System/Monitor-Tool_timecho.md rename to src/UserGuide/V1.3.3/Tools-System/Monitor-Tool_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Tools-System/Workbench_timecho.md b/src/UserGuide/V1.3.3/Tools-System/Workbench_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Tools-System/Workbench_timecho.md rename to src/UserGuide/V1.3.3/Tools-System/Workbench_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/AINode_timecho.md b/src/UserGuide/V1.3.3/User-Manual/AINode_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/AINode_timecho.md rename to src/UserGuide/V1.3.3/User-Manual/AINode_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Audit-Log_timecho.md b/src/UserGuide/V1.3.3/User-Manual/Audit-Log_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Audit-Log_timecho.md rename to src/UserGuide/V1.3.3/User-Manual/Audit-Log_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Authority-Management.md b/src/UserGuide/V1.3.3/User-Manual/Authority-Management.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Authority-Management.md rename to src/UserGuide/V1.3.3/User-Manual/Authority-Management.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Data-Recovery.md b/src/UserGuide/V1.3.3/User-Manual/Data-Recovery.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Data-Recovery.md rename to src/UserGuide/V1.3.3/User-Manual/Data-Recovery.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Data-Sync_apache.md b/src/UserGuide/V1.3.3/User-Manual/Data-Sync_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Data-Sync_apache.md rename to src/UserGuide/V1.3.3/User-Manual/Data-Sync_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Data-Sync_timecho.md b/src/UserGuide/V1.3.3/User-Manual/Data-Sync_timecho.md similarity index 99% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Data-Sync_timecho.md rename to src/UserGuide/V1.3.3/User-Manual/Data-Sync_timecho.md index 425bf0118..4669d357f 100644 --- a/src/UserGuide/V2.0.1/Tree/User-Manual/Data-Sync_timecho.md +++ b/src/UserGuide/V1.3.3/User-Manual/Data-Sync_timecho.md @@ -457,7 +457,6 @@ with sink ( | Unknown | GAP‌‌ XL—GAP | No Limit | No Limit | - ### Compression Synchronization (V1.3.3+) IoTDB supports specifying data compression methods during synchronization. Real time compression and transmission of data can be achieved by configuring the `compressor` parameter. `Compressor` currently supports 5 optional algorithms: snappy/gzip/lz4/zstd/lzma2, and can choose multiple compression algorithm combinations to compress in the order of configuration `rate-limit-bytes-per-second`(supported in V1.3.3 and later versions) is the maximum number of bytes allowed to be transmitted per second, calculated as compressed bytes. If it is less than 0, there is no limit. diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Data-subscription.md b/src/UserGuide/V1.3.3/User-Manual/Data-subscription.md similarity index 99% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Data-subscription.md rename to src/UserGuide/V1.3.3/User-Manual/Data-subscription.md index 250115e36..3eefc3f8f 100644 --- a/src/UserGuide/V2.0.1/Tree/User-Manual/Data-subscription.md +++ b/src/UserGuide/V1.3.3/User-Manual/Data-subscription.md @@ -80,7 +80,6 @@ WITH ( 'start-time' = '2023-01-01', 'end-time' = '2023-12-31' ); -``` #### 3.1.2 Delete Topic diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Database-Programming.md b/src/UserGuide/V1.3.3/User-Manual/Database-Programming.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Database-Programming.md rename to src/UserGuide/V1.3.3/User-Manual/Database-Programming.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/IoTDB-View_timecho.md b/src/UserGuide/V1.3.3/User-Manual/IoTDB-View_timecho.md similarity index 99% rename from src/UserGuide/V2.0.1/Tree/User-Manual/IoTDB-View_timecho.md rename to src/UserGuide/V1.3.3/User-Manual/IoTDB-View_timecho.md index 195847395..b84bfef7a 100644 --- a/src/UserGuide/V2.0.1/Tree/User-Manual/IoTDB-View_timecho.md +++ b/src/UserGuide/V1.3.3/User-Manual/IoTDB-View_timecho.md @@ -434,6 +434,8 @@ DELETE VIEW root.view.device.avg_temperatue ### View Synchronisation + + #### If the dependent original sequence is deleted When the sequence view is queried (when the sequence is parsed), **the empty result set** is returned if the dependent time series does not exist. diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Load-Balance.md b/src/UserGuide/V1.3.3/User-Manual/Load-Balance.md similarity index 99% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Load-Balance.md rename to src/UserGuide/V1.3.3/User-Manual/Load-Balance.md index 45ae3299b..3453ea107 100644 --- a/src/UserGuide/V2.0.1/Tree/User-Manual/Load-Balance.md +++ b/src/UserGuide/V1.3.3/User-Manual/Load-Balance.md @@ -101,4 +101,4 @@ Here is a schematic diagram of the region migration process : ```plain IoTDB> set configuration "wal_throttle_threshold_in_byte"="536870912000" Msg: The statement is executed successfully. - ``` \ No newline at end of file + ``` diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Query-Performance-Analysis.md b/src/UserGuide/V1.3.3/User-Manual/Query-Performance-Analysis.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Query-Performance-Analysis.md rename to src/UserGuide/V1.3.3/User-Manual/Query-Performance-Analysis.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Streaming_apache.md b/src/UserGuide/V1.3.3/User-Manual/Streaming_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Streaming_apache.md rename to src/UserGuide/V1.3.3/User-Manual/Streaming_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Streaming_timecho.md b/src/UserGuide/V1.3.3/User-Manual/Streaming_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Streaming_timecho.md rename to src/UserGuide/V1.3.3/User-Manual/Streaming_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Tiered-Storage_timecho.md b/src/UserGuide/V1.3.3/User-Manual/Tiered-Storage_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Tiered-Storage_timecho.md rename to src/UserGuide/V1.3.3/User-Manual/Tiered-Storage_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/Trigger.md b/src/UserGuide/V1.3.3/User-Manual/Trigger.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/Trigger.md rename to src/UserGuide/V1.3.3/User-Manual/Trigger.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/UDF-development.md b/src/UserGuide/V1.3.3/User-Manual/UDF-development.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/UDF-development.md rename to src/UserGuide/V1.3.3/User-Manual/UDF-development.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/User-defined-function_apache.md b/src/UserGuide/V1.3.3/User-Manual/User-defined-function_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/User-defined-function_apache.md rename to src/UserGuide/V1.3.3/User-Manual/User-defined-function_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/User-defined-function_timecho.md b/src/UserGuide/V1.3.3/User-Manual/User-defined-function_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/User-defined-function_timecho.md rename to src/UserGuide/V1.3.3/User-Manual/User-defined-function_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/User-Manual/White-List_timecho.md b/src/UserGuide/V1.3.3/User-Manual/White-List_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/User-Manual/White-List_timecho.md rename to src/UserGuide/V1.3.3/User-Manual/White-List_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/UserGuideReadme.md b/src/UserGuide/V1.3.3/UserGuideReadme.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/UserGuideReadme.md rename to src/UserGuide/V1.3.3/UserGuideReadme.md diff --git a/src/UserGuide/V2.0.1/Table/Basic-Concept/Sample-Data.md b/src/UserGuide/V2.0.1-Table/Basic-Concept/Sample-Data.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/Basic-Concept/Sample-Data.md rename to src/UserGuide/V2.0.1-Table/Basic-Concept/Sample-Data.md diff --git a/src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md b/src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md rename to src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Database-Resources.md b/src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Database-Resources.md similarity index 99% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Database-Resources.md rename to src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Database-Resources.md index d6210318a..374b03e2f 100644 --- a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Database-Resources.md +++ b/src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Database-Resources.md @@ -188,7 +188,6 @@ Calculation formula: Number of measurement points * Sampling frequency (Hz) * Si Example: 1000 devices, each with 100 measurement points, a total of 100000 sequences, INT32 type. Sampling frequency 1Hz (once per second), storage for 1 year, 3 copies. - Complete calculation formula: 1000 devices * 100 measurement points * 12 bytes per data point * 86400 seconds per day * 365 days per year * 3 copies / 10 compression ratio / 1024 / 1024 / 1024 / 1024 =11T - Simplified calculation formula: 1000 * 100 * 12 * 86400 * 365 * 3 / 10 / 1024 / 1024 / 1024 / 1024 =11T - ### Storage Configuration If the number of nodes is over 10000000 or the query load is high, it is recommended to configure SSD ## Network (Network card) diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Docker-Deployment_apache.md b/src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Docker-Deployment_apache.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Docker-Deployment_apache.md rename to src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Docker-Deployment_apache.md diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Docker-Deployment_timecho.md b/src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Docker-Deployment_timecho.md rename to src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md diff --git a/src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md b/src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md rename to src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md diff --git a/src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Environment-Requirements.md b/src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Environment-Requirements.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Environment-Requirements.md rename to src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Environment-Requirements.md diff --git a/src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md b/src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md rename to src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Monitoring-panel-deployment.md b/src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Monitoring-panel-deployment.md rename to src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md diff --git a/src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md b/src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md rename to src/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md diff --git a/src/UserGuide/V2.0.1/Tree/QuickStart/QuickStart.md b/src/UserGuide/V2.0.1-Table/QuickStart/QuickStart.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/QuickStart/QuickStart.md rename to src/UserGuide/V2.0.1-Table/QuickStart/QuickStart.md diff --git a/src/UserGuide/V2.0.1/Table/QuickStart/QuickStart_timecho.md b/src/UserGuide/V2.0.1-Table/QuickStart/QuickStart_timecho.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/QuickStart/QuickStart_timecho.md rename to src/UserGuide/V2.0.1-Table/QuickStart/QuickStart_timecho.md diff --git a/src/UserGuide/V2.0.1/Table/Reference/System-Config-Manual.md b/src/UserGuide/V2.0.1-Table/Reference/System-Config-Manual.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/Reference/System-Config-Manual.md rename to src/UserGuide/V2.0.1-Table/Reference/System-Config-Manual.md diff --git a/src/UserGuide/V2.0.1/Table/SQL-Manual/Fill-Clause.md b/src/UserGuide/V2.0.1-Table/SQL-Manual/Fill-Clause.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/SQL-Manual/Fill-Clause.md rename to src/UserGuide/V2.0.1-Table/SQL-Manual/Fill-Clause.md diff --git a/src/UserGuide/V2.0.1/Table/SQL-Manual/From-Join-Clause.md b/src/UserGuide/V2.0.1-Table/SQL-Manual/From-Join-Clause.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/SQL-Manual/From-Join-Clause.md rename to src/UserGuide/V2.0.1-Table/SQL-Manual/From-Join-Clause.md diff --git a/src/UserGuide/V2.0.1/Table/SQL-Manual/GroupBy-Clause.md b/src/UserGuide/V2.0.1-Table/SQL-Manual/GroupBy-Clause.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/SQL-Manual/GroupBy-Clause.md rename to src/UserGuide/V2.0.1-Table/SQL-Manual/GroupBy-Clause.md diff --git a/src/UserGuide/V2.0.1/Table/SQL-Manual/Having-Clause.md b/src/UserGuide/V2.0.1-Table/SQL-Manual/Having-Clause.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/SQL-Manual/Having-Clause.md rename to src/UserGuide/V2.0.1-Table/SQL-Manual/Having-Clause.md diff --git a/src/UserGuide/V2.0.1/Table/SQL-Manual/Identifier.md b/src/UserGuide/V2.0.1-Table/SQL-Manual/Identifier.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/SQL-Manual/Identifier.md rename to src/UserGuide/V2.0.1-Table/SQL-Manual/Identifier.md diff --git a/src/UserGuide/V2.0.1/Table/SQL-Manual/Keywords.md b/src/UserGuide/V2.0.1-Table/SQL-Manual/Keywords.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/SQL-Manual/Keywords.md rename to src/UserGuide/V2.0.1-Table/SQL-Manual/Keywords.md diff --git a/src/UserGuide/V2.0.1/Table/SQL-Manual/Limit-Offset-Clause.md b/src/UserGuide/V2.0.1-Table/SQL-Manual/Limit-Offset-Clause.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/SQL-Manual/Limit-Offset-Clause.md rename to src/UserGuide/V2.0.1-Table/SQL-Manual/Limit-Offset-Clause.md diff --git a/src/UserGuide/V2.0.1/Table/SQL-Manual/OrderBy-Clause.md b/src/UserGuide/V2.0.1-Table/SQL-Manual/OrderBy-Clause.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/SQL-Manual/OrderBy-Clause.md rename to src/UserGuide/V2.0.1-Table/SQL-Manual/OrderBy-Clause.md diff --git a/src/UserGuide/V2.0.1/Table/SQL-Manual/Select-Clause.md b/src/UserGuide/V2.0.1-Table/SQL-Manual/Select-Clause.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/SQL-Manual/Select-Clause.md rename to src/UserGuide/V2.0.1-Table/SQL-Manual/Select-Clause.md diff --git a/src/UserGuide/V2.0.1/Table/SQL-Manual/Where-Clause.md b/src/UserGuide/V2.0.1-Table/SQL-Manual/Where-Clause.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/SQL-Manual/Where-Clause.md rename to src/UserGuide/V2.0.1-Table/SQL-Manual/Where-Clause.md diff --git a/src/UserGuide/V2.0.1/Table/SQL-Manual/overview.md b/src/UserGuide/V2.0.1-Table/SQL-Manual/overview.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/SQL-Manual/overview.md rename to src/UserGuide/V2.0.1-Table/SQL-Manual/overview.md diff --git a/src/UserGuide/V2.0.1/Table/Tools-System/CLI.md b/src/UserGuide/V2.0.1-Table/Tools-System/CLI.md similarity index 100% rename from src/UserGuide/V2.0.1/Table/Tools-System/CLI.md rename to src/UserGuide/V2.0.1-Table/Tools-System/CLI.md diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-Go-Native-API.md b/src/UserGuide/V2.0.1/Tree/API/Programming-Go-Native-API.md deleted file mode 100644 index b227ed672..000000000 --- a/src/UserGuide/V2.0.1/Tree/API/Programming-Go-Native-API.md +++ /dev/null @@ -1,64 +0,0 @@ - - -# Go Native API - -The Git repository for the Go Native API client is located [here](https://github.com/apache/iotdb-client-go/) - -## Dependencies - - * golang >= 1.13 - * make >= 3.0 - * curl >= 7.1.1 - * thrift 0.15.0 - * Linux、Macos or other unix-like systems - * Windows+bash (WSL、cygwin、Git Bash) - -## Installation - - * go mod - -```sh -export GO111MODULE=on -export GOPROXY=https://goproxy.io - -mkdir session_example && cd session_example - -curl -o session_example.go -L https://github.com/apache/iotdb-client-go/raw/main/example/session_example.go - -go mod init session_example -go run session_example.go -``` - -* GOPATH - -```sh -# get thrift 0.15.0 -go get github.com/apache/thrift -cd $GOPATH/src/github.com/apache/thrift -git checkout 0.15.0 - -mkdir -p $GOPATH/src/iotdb-client-go-example/session_example -cd $GOPATH/src/iotdb-client-go-example/session_example -curl -o session_example.go -L https://github.com/apache/iotdb-client-go/raw/main/example/session_example.go -go run session_example.go -``` - diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-Kafka.md b/src/UserGuide/V2.0.1/Tree/API/Programming-Kafka.md deleted file mode 100644 index 0a041448f..000000000 --- a/src/UserGuide/V2.0.1/Tree/API/Programming-Kafka.md +++ /dev/null @@ -1,118 +0,0 @@ - - -# Kafka - -[Apache Kafka](https://kafka.apache.org/) is an open-source distributed event streaming platform used by thousands of companies for high-performance data pipelines, streaming analytics, data integration, and mission-critical applications. - -## Coding Example - -### kafka Producer Producing Data Java Code Example - -```java - Properties props = new Properties(); - props.put("bootstrap.servers", "127.0.0.1:9092"); - props.put("key.serializer", StringSerializer.class); - props.put("value.serializer", StringSerializer.class); - KafkaProducer producer = new KafkaProducer<>(props); - producer.send( - new ProducerRecord<>( - "Kafka-Test", "key", "root.kafka," + System.currentTimeMillis() + ",value,INT32,100")); - producer.close(); -``` - -### kafka Consumer Receiving Data Java Code Example - -```java - Properties props = new Properties(); - props.put("bootstrap.servers", "127.0.0.1:9092"); - props.put("key.deserializer", StringDeserializer.class); - props.put("value.deserializer", StringDeserializer.class); - props.put("auto.offset.reset", "earliest"); - props.put("group.id", "Kafka-Test"); - KafkaConsumer kafkaConsumer = new KafkaConsumer<>(props); - kafkaConsumer.subscribe(Collections.singleton("Kafka-Test")); - ConsumerRecords records = kafkaConsumer.poll(Duration.ofSeconds(1)); - ``` - -### Example of Java Code Stored in IoTDB Server - -```java - SessionPool pool = - new SessionPool.Builder() - .host("127.0.0.1") - .port(6667) - .user("root") - .password("root") - .maxSize(3) - .build(); - List datas = new ArrayList<>(records.count()); - for (ConsumerRecord record : records) { - datas.add(record.value()); - } - int size = datas.size(); - List deviceIds = new ArrayList<>(size); - List times = new ArrayList<>(size); - List> measurementsList = new ArrayList<>(size); - List> typesList = new ArrayList<>(size); - List> valuesList = new ArrayList<>(size); - for (String data : datas) { - String[] dataArray = data.split(","); - String device = dataArray[0]; - long time = Long.parseLong(dataArray[1]); - List measurements = Arrays.asList(dataArray[2].split(":")); - List types = new ArrayList<>(); - for (String type : dataArray[3].split(":")) { - types.add(TSDataType.valueOf(type)); - } - List values = new ArrayList<>(); - String[] valuesStr = dataArray[4].split(":"); - for (int i = 0; i < valuesStr.length; i++) { - switch (types.get(i)) { - case INT64: - values.add(Long.parseLong(valuesStr[i])); - break; - case DOUBLE: - values.add(Double.parseDouble(valuesStr[i])); - break; - case INT32: - values.add(Integer.parseInt(valuesStr[i])); - break; - case TEXT: - values.add(valuesStr[i]); - break; - case FLOAT: - values.add(Float.parseFloat(valuesStr[i])); - break; - case BOOLEAN: - values.add(Boolean.parseBoolean(valuesStr[i])); - break; - } - } - deviceIds.add(device); - times.add(time); - measurementsList.add(measurements); - typesList.add(types); - valuesList.add(values); - } - pool.insertRecords(deviceIds, times, measurementsList, typesList, valuesList); - ``` - diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-MQTT.md b/src/UserGuide/V2.0.1/Tree/API/Programming-MQTT.md deleted file mode 100644 index 5bbb610cf..000000000 --- a/src/UserGuide/V2.0.1/Tree/API/Programming-MQTT.md +++ /dev/null @@ -1,183 +0,0 @@ - -# MQTT Protocol - -[MQTT](http://mqtt.org/) is a machine-to-machine (M2M)/"Internet of Things" connectivity protocol. -It was designed as an extremely lightweight publish/subscribe messaging transport. -It is useful for connections with remote locations where a small code footprint is required and/or network bandwidth is at a premium. - -IoTDB supports the MQTT v3.1(an OASIS Standard) protocol. -IoTDB server includes a built-in MQTT service that allows remote devices send messages into IoTDB server directly. - - - - -## Built-in MQTT Service -The Built-in MQTT Service provide the ability of direct connection to IoTDB through MQTT. It listen the publish messages from MQTT clients - and then write the data into storage immediately. -The MQTT topic corresponds to IoTDB timeseries. -The messages payload can be format to events by `PayloadFormatter` which loaded by java SPI, and the default implementation is `JSONPayloadFormatter`. -The default `json` formatter support two json format and its json array. The following is an MQTT message payload example: - -```json - { - "device":"root.sg.d1", - "timestamp":1586076045524, - "measurements":["s1","s2"], - "values":[0.530635,0.530635] - } -``` -or -```json - { - "device":"root.sg.d1", - "timestamps":[1586076045524,1586076065526], - "measurements":["s1","s2"], - "values":[[0.530635,0.530635], [0.530655,0.530695]] - } -``` -or json array of the above two. - - - -## MQTT Configurations -The IoTDB MQTT service load configurations from `${IOTDB_HOME}/${IOTDB_CONF}/iotdb-system.properties` by default. - -Configurations are as follows: - -| NAME | DESCRIPTION | DEFAULT | -| ------------- |:-------------:|:------:| -| enable_mqtt_service | whether to enable the mqtt service | false | -| mqtt_host | the mqtt service binding host | 127.0.0.1 | -| mqtt_port | the mqtt service binding port | 1883 | -| mqtt_handler_pool_size | the handler pool size for handing the mqtt messages | 1 | -| mqtt_payload_formatter | the mqtt message payload formatter | json | -| mqtt_max_message_size | the max mqtt message size in byte| 1048576 | - - -## Coding Examples -The following is an example which a mqtt client send messages to IoTDB server. - -```java -MQTT mqtt = new MQTT(); -mqtt.setHost("127.0.0.1", 1883); -mqtt.setUserName("root"); -mqtt.setPassword("root"); - -BlockingConnection connection = mqtt.blockingConnection(); -connection.connect(); - -Random random = new Random(); -for (int i = 0; i < 10; i++) { - String payload = String.format("{\n" + - "\"device\":\"root.sg.d1\",\n" + - "\"timestamp\":%d,\n" + - "\"measurements\":[\"s1\"],\n" + - "\"values\":[%f]\n" + - "}", System.currentTimeMillis(), random.nextDouble()); - - connection.publish("root.sg.d1.s1", payload.getBytes(), QoS.AT_LEAST_ONCE, false); -} - -connection.disconnect(); - -``` - -## Customize your MQTT Message Format - -If you do not like the above Json format, you can customize your MQTT Message format by just writing several lines -of codes. An example can be found in `example/mqtt-customize` project. - -Steps: -1. Create a java project, and add dependency: -```xml - - org.apache.iotdb - iotdb-server - 1.1.0-SNAPSHOT - -``` -2. Define your implementation which implements `org.apache.iotdb.db.protocol.mqtt.PayloadFormatter` -e.g., - -```java -package org.apache.iotdb.mqtt.server; - -import io.netty.buffer.ByteBuf; -import org.apache.iotdb.db.protocol.mqtt.Message; -import org.apache.iotdb.db.protocol.mqtt.PayloadFormatter; - -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -public class CustomizedJsonPayloadFormatter implements PayloadFormatter { - - @Override - public List format(ByteBuf payload) { - // Suppose the payload is a json format - if (payload == null) { - return null; - } - - String json = payload.toString(StandardCharsets.UTF_8); - // parse data from the json and generate Messages and put them into List ret - List ret = new ArrayList<>(); - // this is just an example, so we just generate some Messages directly - for (int i = 0; i < 2; i++) { - long ts = i; - Message message = new Message(); - message.setDevice("d" + i); - message.setTimestamp(ts); - message.setMeasurements(Arrays.asList("s1", "s2")); - message.setValues(Arrays.asList("4.0" + i, "5.0" + i)); - ret.add(message); - } - return ret; - } - - @Override - public String getName() { - // set the value of mqtt_payload_formatter in iotdb-system.properties as the following string: - return "CustomizedJson"; - } -} -``` -3. modify the file in `src/main/resources/META-INF/services/org.apache.iotdb.db.protocol.mqtt.PayloadFormatter`: - clean the file and put your implementation class name into the file. - In this example, the content is: `org.apache.iotdb.mqtt.server.CustomizedJsonPayloadFormatter` -4. compile your implementation as a jar file: `mvn package -DskipTests` - - -Then, in your server: -1. Create ${IOTDB_HOME}/ext/mqtt/ folder, and put the jar into this folder. -2. Update configuration to enable MQTT service. (`enable_mqtt_service=true` in `conf/iotdb-system.properties`) -3. Set the value of `mqtt_payload_formatter` in `conf/iotdb-system.properties` as the value of getName() in your implementation - , in this example, the value is `CustomizedJson` -4. Launch the IoTDB server. -5. Now IoTDB will use your implementation to parse the MQTT message. - -More: the message format can be anything you want. For example, if it is a binary format, -just use `payload.forEachByte()` or `payload.array` to get bytes content. - - - diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-ODBC.md b/src/UserGuide/V2.0.1/Tree/API/Programming-ODBC.md deleted file mode 100644 index 8e0d74852..000000000 --- a/src/UserGuide/V2.0.1/Tree/API/Programming-ODBC.md +++ /dev/null @@ -1,146 +0,0 @@ - - -# ODBC -With IoTDB JDBC, IoTDB can be accessed using the ODBC-JDBC bridge. - -## Dependencies -* IoTDB-JDBC's jar-with-dependency package -* ODBC-JDBC bridge (e.g. ZappySys JDBC Bridge) - -## Deployment -### Preparing JDBC package -Download the source code of IoTDB, and execute the following command in root directory: -```shell -mvn clean package -pl iotdb-client/jdbc -am -DskipTests -P get-jar-with-dependencies -``` -Then, you can see the output `iotdb-jdbc-1.3.2-SNAPSHOT-jar-with-dependencies.jar` under `iotdb-client/jdbc/target` directory. - -### Preparing ODBC-JDBC Bridge -*Note: Here we only provide one kind of ODBC-JDBC bridge as the instance. Readers can use other ODBC-JDBC bridges to access IoTDB with the IOTDB-JDBC.* -1. **Download Zappy-Sys ODBC-JDBC Bridge**: - Enter the https://zappysys.com/products/odbc-powerpack/odbc-jdbc-bridge-driver/ website, and click "download". - - ![ZappySys_website.jpg](https://alioss.timecho.com/upload/ZappySys_website.jpg) - -2. **Prepare IoTDB**: Set up IoTDB cluster, and write a row of data arbitrarily. - ```sql - IoTDB > insert into root.ln.wf02.wt02(timestamp,status) values(1,true) - ``` - -3. **Deploy and Test the Bridge**: - 1. Open ODBC Data Sources(32/64 bit), depending on the bits of Windows. One possible position is `C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Administrative Tools`. - - ![ODBC_ADD_EN.jpg](https://alioss.timecho.com/upload/ODBC_ADD_EN.jpg) - - 2. Click on "add" and select ZappySys JDBC Bridge. - - ![ODBC_CREATE_EN.jpg](https://alioss.timecho.com/upload/ODBC_CREATE_EN.jpg) - - 3. Fill in the following settings: - - | Property | Content | Example | - |---------------------|-----------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------| - | Connection String | jdbc:iotdb://\:\/ | jdbc:iotdb://127.0.0.1:6667/ | - | Driver Class | org.apache.iotdb.jdbc.IoTDBDriver | org.apache.iotdb.jdbc.IoTDBDriver | - | JDBC driver file(s) | The path of IoTDB JDBC jar-with-dependencies | C:\Users\13361\Documents\GitHub\iotdb\iotdb-client\jdbc\target\iotdb-jdbc-1.3.2-SNAPSHOT-jar-with-dependencies.jar | - | User name | IoTDB's user name | root | - | User password | IoTDB's password | root | - - ![ODBC_CONNECTION.png](https://alioss.timecho.com/upload/ODBC_CONNECTION.png) - - 4. Click on "Test Connection" button, and a "Test Connection: SUCCESSFUL" should appear. - - ![ODBC_CONFIG_EN.jpg](https://alioss.timecho.com/upload/ODBC_CONFIG_EN.jpg) - - 5. Click the "Preview" button above, and replace the original query text with `select * from root.**`, then click "Preview Data", and the query result should correctly. - - ![ODBC_TEST.jpg](https://alioss.timecho.com/upload/ODBC_TEST.jpg) - -4. **Operate IoTDB's data with ODBC**: After correct deployment, you can use Microsoft's ODBC library to operate IoTDB's data. Here's an example written in C#: - ```C# - using System.Data.Odbc; - - // Get a connection - var dbConnection = new OdbcConnection("DSN=ZappySys JDBC Bridge"); - dbConnection.Open(); - - // Execute the write commands to prepare data - var dbCommand = dbConnection.CreateCommand(); - dbCommand.CommandText = "insert into root.Keller.Flur.Energieversorgung(time, s1) values(1715670861634, 1)"; - dbCommand.ExecuteNonQuery(); - dbCommand.CommandText = "insert into root.Keller.Flur.Energieversorgung(time, s2) values(1715670861634, true)"; - dbCommand.ExecuteNonQuery(); - dbCommand.CommandText = "insert into root.Keller.Flur.Energieversorgung(time, s3) values(1715670861634, 3.1)"; - dbCommand.ExecuteNonQuery(); - - // Execute the read command - dbCommand.CommandText = "SELECT * FROM root.Keller.Flur.Energieversorgung"; - var dbReader = dbCommand.ExecuteReader(); - - // Write the output header - var fCount = dbReader.FieldCount; - Console.Write(":"); - for(var i = 0; i < fCount; i++) - { - var fName = dbReader.GetName(i); - Console.Write(fName + ":"); - } - Console.WriteLine(); - - // Output the content - while (dbReader.Read()) - { - Console.Write(":"); - for(var i = 0; i < fCount; i++) - { - var fieldType = dbReader.GetFieldType(i); - switch (fieldType.Name) - { - case "DateTime": - var dateTime = dbReader.GetInt64(i); - Console.Write(dateTime + ":"); - break; - case "Double": - if (dbReader.IsDBNull(i)) - { - Console.Write("null:"); - } - else - { - var fValue = dbReader.GetDouble(i); - Console.Write(fValue + ":"); - } - break; - default: - Console.Write(fieldType.Name + ":"); - break; - } - } - Console.WriteLine(); - } - - // Shut down gracefully - dbReader.Close(); - dbCommand.Dispose(); - dbConnection.Close(); - ``` - This program can write data into IoTDB, and query the data we have just written. diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-OPC-UA_timecho.md b/src/UserGuide/V2.0.1/Tree/API/Programming-OPC-UA_timecho.md deleted file mode 100644 index 703b47c68..000000000 --- a/src/UserGuide/V2.0.1/Tree/API/Programming-OPC-UA_timecho.md +++ /dev/null @@ -1,262 +0,0 @@ - - -# OPC UA Protocol - -## OPC UA - -OPC UA is a technical specification used in the automation field for communication between different devices and systems, enabling cross platform, cross language, and cross network operations, providing a reliable and secure data exchange foundation for the Industrial Internet of Things. IoTDB supports OPC UA protocol, and IoTDB OPC Server supports both Client/Server and Pub/Sub communication modes. - -### OPC UA Client/Server Mode - -- **Client/Server Mode**:In this mode, IoTDB's stream processing engine establishes a connection with the OPC UA Server via an OPC UA Sink. The OPC UA Server maintains data within its Address Space, from which IoTDB can request and retrieve data. Additionally, other OPC UA Clients can access the data on the server. - -
- -
- - -- Features: - - - OPC UA will organize the device information received from Sink into folders under the Objects folder according to a tree model. - - - Each measurement point is recorded as a variable node and the latest value in the current database is recorded. - -### OPC UA Pub/Sub Mode - -- **Pub/Sub Mode**: In this mode, IoTDB's stream processing engine sends data change events to the OPC UA Server through an OPC UA Sink. These events are published to the server's message queue and managed through Event Nodes. Other OPC UA Clients can subscribe to these Event Nodes to receive notifications upon data changes. - -
- -
- -- Features: - - - Each measurement point is wrapped as an Event Node in OPC UA. - - - - The relevant fields and their meanings are as follows: - - | Field | Meaning | Type (Milo) | Example | - | :--------- | :--------------- | :------------ | :-------------------- | - | Time | Timestamp | DateTime | 1698907326198 | - | SourceName | Full path of the measurement point | String | root.test.opc.sensor0 | - | SourceNode | Data type of the measurement point | NodeId | Int32 | - | Message | Data | LocalizedText | 3.0 | - - - Events are only sent to clients that are already listening; if a client is not connected, the Event will be ignored. - - -## IoTDB OPC Server Startup method - -### Syntax - -The syntax for creating the Sink is as follows: - - -```SQL -create pipe p1 - with source (...) - with processor (...) - with sink ('sink' = 'opc-ua-sink', - 'sink.opcua.tcp.port' = '12686', - 'sink.opcua.https.port' = '8443', - 'sink.user' = 'root', - 'sink.password' = 'root', - 'sink.opcua.security.dir' = '...' - ) -``` - -### Parameters - -| key | value | value range | required or not | default value | -| :------------------------------ | :----------------------------------------------------------- | :------------------------------------- | :------- | :------------- | -| sink | OPC UA SINK | String: opc-ua-sink | Required | | -| sink.opcua.model | OPC UA model used | String: client-server / pub-sub | Optional | client-server | -| sink.opcua.tcp.port | OPC UA's TCP port | Integer: [0, 65536] | Optional | 12686 | -| sink.opcua.https.port | OPC UA's HTTPS port | Integer: [0, 65536] | Optional | 8443 | -| sink.opcua.security.dir | Directory for OPC UA's keys and certificates | String: Path, supports absolute and relative directories | Optional | Opc_security folder/in the conf directory of the DataNode related to iotdb
If there is no conf directory for iotdb (such as launching DataNode in IDEA), it will be the iotdb_opc_Security folder/in the user's home directory | -| sink.opcua.enable-anonymous-access | Whether OPC UA allows anonymous access | Boolean | Optional | true | -| sink.user | User for OPC UA, specified in the configuration | String | Optional | root | -| sink.password | Password for OPC UA, specified in the configuration | String | Optional | root | - -### 示例 - -```Bash -create pipe p1 - with sink ('sink' = 'opc-ua-sink', - 'sink.user' = 'root', - 'sink.password' = 'root'); -start pipe p1; -``` - -### Usage Limitations - -1. **DataRegion Requirement**: The OPC UA server will only start if there is a DataRegion in IoTDB. For an empty IoTDB, a data entry is necessary for the OPC UA server to become effective. - -2. **Data Availability**: Clients subscribing to the server will not receive data written to IoTDB before their connection. - -3. **Multiple DataNodes may have scattered sending/conflict issues**: - - - For IoTDB clusters with multiple dataRegions and scattered across different DataNode IPs, data will be sent in a dispersed manner on the leaders of the dataRegions. The client needs to listen to the configuration ports of the DataNode IP separately.。 - - - Suggest using this OPC UA server under 1C1D. - -4. **Does not support deleting data and modifying measurement point types:** In Client Server mode, OPC UA cannot delete data or change data type settings. In Pub Sub mode, if data is deleted, information cannot be pushed to the client. - -## IoTDB OPC Server Example - -### Client / Server Mode - -#### Preparation Work - -1. Take UAExpert client as an example, download the UAExpert client: https://www.unified-automation.com/downloads/opc-ua-clients.html - -2. Install UAExpert and fill in your own certificate information. - -#### Quick Start - -1. Use the following SQL to create and start the OPC UA Sink in client-server mode. For detailed syntax, please refer to: [IoTDB OPC Server Syntax](#syntax) - -```SQL -create pipe p1 with sink ('sink'='opc-ua-sink'); -``` - -2. Write some data. - -```SQL -insert into root.test.db(time, s2) values(now(), 2) -``` - -​ The metadata is automatically created and enabled here. - -3. Configure the connection to IoTDB in UAExpert, where the password should be set to the one defined in the sink.password parameter (using the default password "root" as an example): - -
- -
- -
- -
- -4. After trusting the server's certificate, you can see the written data in the Objects folder on the left. - -
- -
- -
- -
- -5. You can drag the node on the left to the center and display the latest value of that node: - -
- -
- -### Pub / Sub Mode - -#### Preparation Work - -The code is located in the [opc-ua-sink 文件夹](https://github.com/apache/iotdb/tree/master/example/pipe-opc-ua-sink/src/main/java/org/apache/iotdb/opcua) under the iotdb-example package. - -The code includes: - -- The main class (ClientTest) -- Client certificate-related logic(IoTDBKeyStoreLoaderClient) -- Client configuration and startup logic(ClientExampleRunner) -- The parent class of ClientTest(ClientExample) - -### Quick Start - -The steps are as follows: - -1. Start IoTDB and write some data. - -```SQL -insert into root.a.b(time, c, d) values(now(), 1, 2); -``` - -​ The metadata is automatically created and enabled here. - -2. Use the following SQL to create and start the OPC UA Sink in Pub-Sub mode. For detailed syntax, please refer to: [IoTDB OPC Server Syntax](#syntax) - -```SQL -create pipe p1 with sink ('sink'='opc-ua-sink', - 'sink.opcua.model'='pub-sub'); -start pipe p1; -``` - -​ At this point, you can see that the opc certificate-related directory has been created under the server's conf directory. - -
- -
- -3. Run the Client connection directly; the Client's certificate will be rejected by the server. - -
- -
- -4. Go to the server's sink.opcua.security.dir directory, then to the pki's rejected directory, where the Client's certificate should have been generated. - -
- -
- -5. Move (not copy) the client's certificate into (not into a subdirectory of) the trusted directory's certs folder in the same directory. - -
- -
- -6. Open the Client connection again; the server's certificate should now be rejected by the Client. - -
- -
- -7. Go to the client's /client/security directory, then to the pki's rejected directory, and move the server's certificate into (not into a subdirectory of) the trusted directory. - -
- -
- -8. Open the Client, and now the two-way trust is successful, and the Client can connect to the server. - -9. Write data to the server, and the Client will print out the received data. - -
- -
- - -### Notes - -1. **stand alone and cluster:**It is recommended to use a 1C1D (one coordinator and one data node) single machine version. If there are multiple DataNodes in the cluster, data may be sent in a scattered manner across various DataNodes, and it may not be possible to listen to all the data. - -2. **No Need to Operate Root Directory Certificates:** During the certificate operation process, there is no need to operate the `iotdb-server.pfx` certificate under the IoTDB security root directory and the `example-client.pfx` directory under the client security directory. When the Client and Server connect bidirectionally, they will send the root directory certificate to each other. If it is the first time the other party sees this certificate, it will be placed in the reject dir. If the certificate is in the trusted/certs, then the other party can trust it. - -3. **It is Recommended to Use Java 17+:** -In JVM 8 versions, there may be a key length restriction, resulting in an "Illegal key size" error. For specific versions (such as jdk.1.8u151+), you can add `Security.`*`setProperty`*`("crypto.policy", "unlimited");`; in the create client of ClientExampleRunner to solve this, or you can download the unlimited package `local_policy.jar` and `US_export_policy ` to replace the packages in the `JDK/jre/lib/security `. Download link:https://www.oracle.com/java/technologies/javase-jce8-downloads.html。 diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Cluster-Deployment_timecho.md b/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Cluster-Deployment_timecho.md deleted file mode 100644 index cf9658ff9..000000000 --- a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Cluster-Deployment_timecho.md +++ /dev/null @@ -1,412 +0,0 @@ - -# Cluster Deployment - -This guide describes how to manually deploy a cluster instance consisting of 3 ConfigNodes and 3 DataNodes (commonly referred to as a 3C3D cluster). - -
- -
- - - -## Prerequisites - -1. [System configuration](./Environment-Requirements.md):Ensure the system has been configured according to the preparation guidelines. - -2. **IP Configuration**: It is recommended to use hostnames for IP configuration to prevent issues caused by IP address changes. Configure the `/etc/hosts` file on each server. For example, if the local IP is `11.101.17.224` and the hostname is `iotdb-1`, use the following command to set the hostname: - - ``` shell - echo "192.168.1.3 iotdb-1" >> /etc/hosts - ``` - - Use the hostname for `cn_internal_address` and `dn_internal_address` in IoTDB configuration. - -3. **Unmodifiable Parameters**: Some parameters cannot be changed after the first startup. Refer to the Parameter Configuration section. - -4. **Installation Path**: Ensure the installation path contains no spaces or non-ASCII characters to prevent runtime issues. - -5. **User Permissions**: Choose one of the following permissions during installation and deployment: - - - **Root User (Recommended)**: This avoids permission-related issues. - - **Non-Root User**: - - Use the same user for all operations, including starting, activating, and stopping services. - - Avoid using `sudo`, which can cause permission conflicts. - -6. **Monitoring Panel**: Deploy a monitoring panel to track key performance metrics. Contact the Timecho team for access and refer to the "[Monitoring Panel Deployment](./Monitoring-panel-deployment.md)" guide. - -## Preparation - -1. Obtain the TimechoDB installation package: `timechodb-{version}-bin.zip` following[IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_timecho.md) -2. Configure the operating system environment according to [Environment Requirement](./Environment-Requirements.md) - -## Installation Steps - -Taking a cluster with three Linux servers with the following information as example: - -| Node IP | Host Name | Service | -| ------------- | --------- | -------------------- | -| 11.101.17.224 | iotdb-1 | ConfigNode、DataNode | -| 11.101.17.225 | iotdb-2 | ConfigNode、DataNode | -| 11.101.17.226 | iotdb-3 | ConfigNode、DataNode | - -### 1.Configure Hostnames - -On all three servers, configure the hostnames by editing the `/etc/hosts` file. Use the following commands: - -```Bash -echo "11.101.17.224 iotdb-1" >> /etc/hosts -echo "11.101.17.225 iotdb-2" >> /etc/hosts -echo "11.101.17.226 iotdb-3" >> /etc/hosts -``` - -### 2. Extract Installation Package - -Unzip the installation package and enter the installation directory: - -```Plain -unzip timechodb-{version}-bin.zip -cd timechodb-{version}-bin -``` - -### 3. Parameters Configuration - -- #### Memory Configuration - - Edit the following files for memory allocation: - - - **ConfigNode**: `./conf/confignode-env.sh` (or `.bat` for Windows) - - **DataNode**: `./conf/datanode-env.sh` (or `.bat` for Windows) - - | **Parameter** | **Description** | **Default** | **Recommended** | **Notes** | - | :------------ | :--------------------------------- | :---------- | :-------------- | :-------------------------------------- | - | MEMORY_SIZE | Total memory allocated to the node | Empty | As needed | Effective after restarting the service. | - -**General Configuration** - -Set the following parameters in `./conf/iotdb-system.properties`. Refer to `./conf/iotdb-system.properties.template` for a complete list. - -**Cluster-Level Parameters**: - -| **Parameter** | **Description** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | -| :------------------------ | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | -| cluster_name | Name of the cluster | defaultCluster | defaultCluster | defaultCluster | -| schema_replication_factor | Metadata replication factor; DataNode count shall not be fewer than this value | 3 | 3 | 3 | -| data_replication_factor | Data replication factor; DataNode count shall not be fewer than this value | 2 | 2 | 2 | - -#### ConfigNode Parameters - -| **Parameter** | **Description** | **Default** | **Recommended** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | **Notes** | -| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | :--------------------------------------------------------- | -| cn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | iotdb-1 | iotdb-2 | iotdb-3 | This parameter cannot be modified after the first startup. | -| cn_internal_port | Port used for internal communication within the cluster | 10710 | 10710 | 10710 | 10710 | 10710 | This parameter cannot be modified after the first startup. | -| cn_consensus_port | Port used for consensus protocol communication among ConfigNode replicas | 10720 | 10720 | 10720 | 10720 | 10720 | This parameter cannot be modified after the first startup. | -| cn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Address and port of the seed ConfigNode (e.g., `cn_internal_address:cn_internal_port`) | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | This parameter cannot be modified after the first startup. | - -#### DataNode Parameters - -| **Parameter** | **Description** | **Default** | **Recommended** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | **Notes** | -| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | :--------------------------------------------------------- | -| dn_rpc_address | Address for the client RPC service | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | Effective after restarting the service. | -| dn_rpc_port | Port for the client RPC service | 6667 | 6667 | 6667 | 6667 | 6667 | Effective after restarting the service. | -| dn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | iotdb-1 | iotdb-2 | iotdb-3 | This parameter cannot be modified after the first startup. | -| dn_internal_port | Port used for internal communication within the cluster | 10730 | 10730 | 10730 | 10730 | 10730 | This parameter cannot be modified after the first startup. | -| dn_mpp_data_exchange_port | Port used for receiving data streams | 10740 | 10740 | 10740 | 10740 | 10740 | This parameter cannot be modified after the first startup. | -| dn_data_region_consensus_port | Port used for data replica consensus protocol communication | 10750 | 10750 | 10750 | 10750 | 10750 | This parameter cannot be modified after the first startup. | -| dn_schema_region_consensus_port | Port used for metadata replica consensus protocol communication | 10760 | 10760 | 10760 | 10760 | 10760 | This parameter cannot be modified after the first startup. | -| dn_seed_config_node | Address of the ConfigNode for registering and joining the cluster.(e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Address of the first ConfigNode | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | This parameter cannot be modified after the first startup. | - -**Note:** Ensure files are saved after editing. Tools like VSCode Remote do not save changes automatically. - -### 4. Start ConfigNode Instances - -1. Start the first ConfigNode (`iotdb-1`) as the seed node - -```Bash -cd sbin -./start-confignode.sh -d #"- d" parameter will start in the background -``` - -2. Start the remaining ConfigNodes (`iotdb-2` and `iotdb-3`) in sequence. - - If the startup fails, refer to the [Common Questions](#common-questions) section below for troubleshooting. - -### 5.Start DataNode Instances - -On each server, navigate to the `sbin` directory and start the DataNode: - -```Go -cd sbin -./start-datanode.sh -d #"- d" parameter will start in the background -``` - -### 6.Activate Database - -#### Option 1: File-Based Activation - -1. Start all ConfigNodes and DataNodes. -2. Copy the `system_info` file from the `activation` directory on each server and send them to the Timecho team. -3. Place the license files provided by the Timecho team into the corresponding `activation` folder for each node. - -#### Option 2: Command-Based Activation - -1. Enter the IoTDB CLI for each node: - -- **For Table Model**: - - ```SQL - # For Linux or macOS - ./start-cli.sh -sql_dialect table - - # For Windows - ./start-cli.bat -sql_dialect table - ``` - -- **For Tree Model**: - - ```SQL - # For Linux or macOS - ./start-cli.sh - - # For Windows - ./start-cli.bat - ``` - -2. Run the following command to retrieve the machine code required for activation: - - ```Bash - show system info - ``` - - **Note**: Activation is currently supported only in the Tree Model. - -3. Copy the returned machine code of each server (displayed as a green string) and send it to the Timecho team: - - ```Bash - +--------------------------------------------------------------+ - | SystemInfo| - +--------------------------------------------------------------+ - |01-TE5NLES4-UDDWCMYE,01-GG5NLES4-XXDWCMYE,01-FF5NLES4-WWWWCMYE| - +--------------------------------------------------------------+ - Total line number = 1 - It costs 0.030s - ``` - -4. Enter the activation codes provided by the Timecho team in the CLI in sequence using the following format. Wrap the activation code in single quotes ('): - - ```Bash - IoTDB> activate '01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===,01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===,01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===' - ``` - -### 7.Verify Activation - -Check the `ClusterActivationStatus` field. If it shows `ACTIVATED`, the database has been successfully activated. - -![](https://alioss.timecho.com/docs/img/%E9%9B%86%E7%BE%A4-%E9%AA%8C%E8%AF%81.png) - -## Maintenance - -### ConfigNode Maintenance - -ConfigNode maintenance includes adding and removing ConfigNodes. Common use cases include: - -- **Cluster Expansion:** If the cluster contains only 1 ConfigNode, adding 2 more ConfigNodes enhances high availability, resulting in a total of 3 ConfigNodes. -- **Cluster Fault Recovery:** If a ConfigNode's machine fails and it cannot function normally, remove the faulty ConfigNode and add a new one to the cluster. - -**Note:** After completing ConfigNode maintenance, ensure that the cluster contains either 1 or 3 active ConfigNodes. Two ConfigNodes do not provide high availability, and more than three ConfigNodes can degrade performance. - -#### Adding a ConfigNode - -**Linux /** **MacOS**: - -```Plain -sbin/start-confignode.sh -``` - -**Windows:** - -```Plain -sbin/start-confignode.bat -``` - -#### Removing a ConfigNode - -1. Connect to the cluster using the CLI and confirm the internal address and port of the ConfigNode to be removed: - - ```Plain - show confignodes; - ``` - -Example output: - -```Plain -IoTDB> show confignodes -+------+-------+---------------+------------+--------+ -|NodeID| Status|InternalAddress|InternalPort| Role| -+------+-------+---------------+------------+--------+ -| 0|Running| 127.0.0.1| 10710| Leader| -| 1|Running| 127.0.0.1| 10711|Follower| -| 2|Running| 127.0.0.1| 10712|Follower| -+------+-------+---------------+------------+--------+ -Total line number = 3 -It costs 0.030s -``` - -2. Remove the ConfigNode using the script: - -**Linux /** **MacOS**: - -```Bash -sbin/remove-confignode.sh [confignode_id] -# Or: -sbin/remove-confignode.sh [cn_internal_address:cn_internal_port] -``` - -**Windows:** - -```Bash -sbin/remove-confignode.bat [confignode_id] -# Or: -sbin/remove-confignode.bat [cn_internal_address:cn_internal_port] -``` - -### DataNode Maintenance - -DataNode maintenance includes adding and removing DataNodes. Common use cases include: - -- **Cluster Expansion:** Add new DataNodes to increase cluster capacity. -- **Cluster Fault Recovery:** If a DataNode's machine fails and it cannot function normally, remove the faulty DataNode and add a new one to the cluster. - -**Note:** During and after DataNode maintenance, ensure that the number of active DataNodes is not fewer than the data replication factor (usually 2) or the schema replication factor (usually 3). - -#### Adding a DataNode - -**Linux /** **MacOS**: - -```Plain -sbin/start-datanode.sh -``` - -**Windows:** - -```Plain -sbin/start-datanode.bat -``` - -**Note:** After adding a DataNode, the cluster load will gradually balance across all nodes as new writes arrive and old data expires (if TTL is set). - -#### Removing a DataNode - -1. Connect to the cluster using the CLI and confirm the RPC address and port of the DataNode to be removed: - -```Plain -show datanodes; -``` - -Example output: - -```Plain -IoTDB> show datanodes -+------+-------+----------+-------+-------------+---------------+ -|NodeID| Status|RpcAddress|RpcPort|DataRegionNum|SchemaRegionNum| -+------+-------+----------+-------+-------------+---------------+ -| 1|Running| 0.0.0.0| 6667| 0| 0| -| 2|Running| 0.0.0.0| 6668| 1| 1| -| 3|Running| 0.0.0.0| 6669| 1| 0| -+------+-------+----------+-------+-------------+---------------+ -Total line number = 3 -It costs 0.110s -``` - -2. Remove the DataNode using the script: - -**Linux / MacOS:** - -```Bash -sbin/remove-datanode.sh [dn_rpc_address:dn_rpc_port] -``` - -**Windows:** - -```Bash -sbin/remove-datanode.bat [dn_rpc_address:dn_rpc_port] -``` - -## Common Questions - -1. Activation Fails Repeatedly - - Use the `ls -al` command to verify that the ownership of the installation directory matches the current user. - - Check the ownership of all files in the `./activation` directory to ensure they belong to the current user. -2. ConfigNode Fails to Start - - Review the startup logs to check if any parameters, which cannot be modified after the first startup, were changed. - - Check the logs for any other errors. If unresolved, contact technical support for assistance. - - If the deployment is fresh or data can be discarded, clean the environment and redeploy using the following steps: - **Clean the Environment** - - - Stop all ConfigNode and DataNode processes: - ```Bash - sbin/stop-standalone.sh - ``` - - - Check for any remaining processes: - ```Bash - jps - # or - ps -ef | grep iotdb - ``` - - - If processes remain, terminate them manually: - ```Bash - kill -9 - - #For systems with a single IoTDB instance, you can clean up residual processes with: - ps -ef | grep iotdb | grep -v grep | tr -s ' ' ' ' | cut -d ' ' -f2 | xargs kill -9 - ``` - - - Delete the `data` and `logs` directories: - ```Bash - cd /data/iotdb - rm -rf data logs - ``` - -## Appendix - -### ConfigNode Parameters - -| Parameter | Description | Is it required | -| :-------- | :---------------------------------------------------------- | :------------- | -| -d | Starts the process in daemon mode (runs in the background). | No | - -### DataNode Parameters - -| Parameter | Description | Required | -| :-------- | :----------------------------------------------------------- | :------- | -| -v | Displays version information. | No | -| -f | Runs the script in the foreground without backgrounding it. | No | -| -d | Starts the process in daemon mode (runs in the background). | No | -| -p | Specifies a file to store the process ID for process management. | No | -| -c | Specifies the path to the configuration folder; the script loads configuration files from this location. | No | -| -g | Prints detailed garbage collection (GC) information. | No | -| -H | Specifies the path for the Java heap dump file, used during JVM memory overflow. | No | -| -E | Specifies the file for JVM error logs. | No | -| -D | Defines system properties in the format `key=value`. | No | -| -X | Passes `-XX` options directly to the JVM. | No | -| -h | Displays the help instructions. | No | \ No newline at end of file diff --git a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md b/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md deleted file mode 100644 index cf3ed80a6..000000000 --- a/src/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md +++ /dev/null @@ -1,254 +0,0 @@ - -# Stand-Alone Deployment - -This guide introduces how to set up a standalone TimechoDB instance, which includes one ConfigNode and one DataNode (commonly referred to as 1C1D). - -## Prerequisites - -1. [System configuration](./Environment-Requirements.md): Ensure the system has been configured according to the preparation guidelines. - -2. **IP Configuration**: It is recommended to use hostnames for IP configuration to prevent issues caused by IP address changes. Set the hostname by editing the `/etc/hosts` file. For example, if the local IP is `192.168.1.3` and the hostname is `iotdb-1`, run: - - ```shell - echo "192.168.1.3 iotdb-1" >> /etc/hosts - ``` - - Use the hostname for `cn_internal_address` and `dn_internal_address` in IoTDB configuration. - -3. **Unmodifiable Parameters**: Some parameters cannot be changed after the first startup. Refer to the Parameter Configuration section. - -4. **Installation Path**: Ensure the installation path contains no spaces or non-ASCII characters to prevent runtime issues. - -5. - **User Permissions**: Choose one of the following permissions during installation and deployment: - - **Root User (Recommended)**: This avoids permission-related issues. - - **Non-Root User**: - - Use the same user for all operations, including starting, activating, and stopping services. - - Avoid using `sudo`, which can cause permission conflicts. - -6. **Monitoring Panel**: Deploy a monitoring panel to track key performance metrics. Contact the Timecho team for access and refer to the "[Monitoring Board Install and Deploy](./Monitoring-panel-deployment.md)" guide. - -## Installation Steps - -### 1、Extract Installation Package - -Unzip the installation package and navigate to the directory: - -```Plain -unzip timechodb-{version}-bin.zip -cd timechodb-{version}-bin -``` - -### 2、Parameter Configuration - -#### Memory Configuration - -Edit the following files for memory allocation: - -- **ConfigNode**: `conf/confignode-env.sh` (or `.bat` for Windows) -- **DataNode**: `conf/datanode-env.sh` (or `.bat` for Windows) - -| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** | -| :------------ | :---------------------------------- | :---------- | :-------------- | :---------------------- | -| MEMORY_SIZE | Total memory allocated for the node | Empty | As needed | Effective after restart | - -#### General Configuration - -Set the following parameters in `conf/iotdb-system.properties`. Refer to `conf/iotdb-system.properties.template` for a complete list. - -**Cluster-Level Parameters**: - -| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** | -| :------------------------ | :-------------------------- | :------------- | :-------------- | :----------------------------------------------------------- | -| cluster_name | Name of the cluster | defaultCluster | Customizable | If there is no specific requirement, keep the default value. | -| schema_replication_factor | Number of metadata replicas | 1 | 1 | In standalone mode, set this to 1. This value cannot be modified after the first startup. | -| data_replication_factor | Number of data replicas | 1 | 1 | In standalone mode, set this to 1. This value cannot be modified after the first startup. | - -**ConfigNode Parameters**: - -| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** | -| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------------------------- | -| cn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | This parameter cannot be modified after the first startup. | -| cn_internal_port | Port used for internal communication within the cluster | 10710 | 10710 | This parameter cannot be modified after the first startup. | -| cn_consensus_port | Port used for consensus protocol communication among ConfigNode replicas | 10720 | 10720 | This parameter cannot be modified after the first startup. | -| cn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Use `cn_internal_address:cn_internal_port` | This parameter cannot be modified after the first startup. | - -**DataNode** **Parameters**: - -| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** | -| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------------------------- | -| dn_rpc_address | Address for the client RPC service | 0.0.0.0 | 0.0.0.0 | Effective after restarting the service. | -| dn_rpc_port | Port for the client RPC service | 6667 | 6667 | Effective after restarting the service. | -| dn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | This parameter cannot be modified after the first startup. | -| dn_internal_port | Port used for internal communication within the cluster | 10730 | 10730 | This parameter cannot be modified after the first startup. | -| dn_mpp_data_exchange_port | Port used for receiving data streams | 10740 | 10740 | This parameter cannot be modified after the first startup. | -| dn_data_region_consensus_port | Port used for data replica consensus protocol communication | 10750 | 10750 | This parameter cannot be modified after the first startup. | -| dn_schema_region_consensus_port | Port used for metadata replica consensus protocol communication | 10760 | 10760 | This parameter cannot be modified after the first startup. | -| dn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Use `cn_internal_address:cn_internal_port` | This parameter cannot be modified after the first startup. | - -### 3、Start ConfigNode - -Navigate to the `sbin` directory and start ConfigNode: - -```Bash -./sbin/start-confignode.sh -d # The "-d" flag starts the process in the background. -``` - - If the startup fails, refer to the [**Common Problem**](#Common Problem) section below for troubleshooting. - -### 4、Start DataNode - -Navigate to the `sbin` directory of IoTDB and start the DataNode: - -````shell -./sbin/start-datanode.sh -d # The "-d" flag starts the process in the background. -```` - -### 5、Activate Database - -#### Option 1: File-Based Activation - -1. Start both the ConfigNode and DataNode. -2. Navigate to the `activation` folder and copy the `system_info` file. -3. Send the `system_info` file to the Timecho team. -4. Place the license file provided by the Timecho team into the corresponding `activation` folder for each node. - -#### Option 2: Command-Based Activation - -1. Enter the IoTDB CLI. - -- **For Table Model**: - -```SQL -# For Linux or macOS -./start-cli.sh -sql_dialect table - -# For Windows -./start-cli.bat -sql_dialect table -``` - -- **For Tree Model**: - -```SQL -# For Linux or macOS -./start-cli.sh - -# For Windows -./start-cli.bat -``` - -2. Run the following command to retrieve the machine code required for activation: - - ```Bash - show system info - ``` - - **Note**: Activation is currently supported only in the Tree Model. - -3. Copy the returned machine code (displayed as a green string) and send it to the Timecho team: - -```Bash -+--------------------------------------------------------------+ -| SystemInfo| -+--------------------------------------------------------------+ -| 01-TE5NLES4-UDDWCMYE| -+--------------------------------------------------------------+ -Total line number = 1 -It costs 0.030s -``` - -4. Enter the activation code provided by the Timecho team in the CLI using the following format. Wrap the activation code in single quotes ('): - -```Bash -IoTDB> activate '01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===' -``` - -### 6、Verify Activation - -Check the `ClusterActivationStatus` field. If it shows `ACTIVATED`, the database has been successfully activated. - -![](https://alioss.timecho.com/docs/img/%E5%8D%95%E6%9C%BA-%E9%AA%8C%E8%AF%81.png) - -## Common Problem - -1. Activation Fails Repeatedly - 1. Use the `ls -al` command to verify that the ownership of the installation directory matches the current user. - 2. Check the ownership of all files in the `./activation` directory to ensure they belong to the current user. -2. ConfigNode Fails to Start - 1. Review the startup logs to check if any parameters, which cannot be modified after the first startup, were changed. - 2. Check the logs for any other errors. If unresolved, contact technical support for assistance. - 3. If the deployment is fresh or data can be discarded, clean the environment and redeploy using the following steps: - - **Clean the Environment** - -1. Stop all ConfigNode and DataNode processes: - -```Bash -sbin/stop-standalone.sh -``` - -2. Check for any remaining processes: - -```Bash -jps -# or -ps -ef | grep iotdb -``` - -3. If processes remain, terminate them manually: - -```Bash -kill -9 - -#For systems with a single IoTDB instance, you can clean up residual processes with: -ps -ef | grep iotdb | grep -v grep | tr -s ' ' ' ' | cut -d ' ' -f2 | xargs kill -9 -``` - -4. Delete the `data` and `logs` directories: - -```Bash -cd /data/iotdb -rm -rf data logs -``` - -## Appendix - -### ConfigNode Parameters - -| Parameter | Description | **Is it required** | -| :-------- | :---------------------------------------------------------- | :----------------- | -| -d | Starts the process in daemon mode (runs in the background). | No | - -### DataNode Parameters - -| Parameter | Description | Required | -| :-------- | :----------------------------------------------------------- | :------- | -| -v | Displays version information. | No | -| -f | Runs the script in the foreground without backgrounding it. | No | -| -d | Starts the process in daemon mode (runs in the background). | No | -| -p | Specifies a file to store the process ID for process management. | No | -| -c | Specifies the path to the configuration folder; the script loads configuration files from this location. | No | -| -g | Prints detailed garbage collection (GC) information. | No | -| -H | Specifies the path for the Java heap dump file, used during JVM memory overflow. | No | -| -E | Specifies the file for JVM error logs. | No | -| -D | Defines system properties in the format `key=value`. | No | -| -X | Passes `-XX` options directly to the JVM. | No | -| -h | Displays the help instructions. | No | \ No newline at end of file diff --git a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Telegraf.md b/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Telegraf.md deleted file mode 100644 index f09fb025d..000000000 --- a/src/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Telegraf.md +++ /dev/null @@ -1,22 +0,0 @@ - - -Comming Soon \ No newline at end of file diff --git a/src/UserGuide/latest/API/Programming-CSharp-Native-API.md b/src/UserGuide/latest/API/Programming-CSharp-Native-API.md index 06f403f42..12d431a3a 100644 --- a/src/UserGuide/latest/API/Programming-CSharp-Native-API.md +++ b/src/UserGuide/latest/API/Programming-CSharp-Native-API.md @@ -1,19 +1,22 @@ # C# Native API @@ -32,31 +35,33 @@ Note that the `Apache.IoTDB` package only supports versions greater than `.net f ## Prerequisites -- .NET SDK Version >= 5.0 -- .NET Framework >= 4.6.1 + .NET SDK Version >= 5.0 + .NET Framework >= 4.6.1 ## How to Use the Client (Quick Start) Users can quickly get started by referring to the use cases under the Apache-IoTDB-Client-CSharp-UserCase directory. These use cases serve as a useful resource for getting familiar with the client's functionality and capabilities. -For those who wish to delve deeper into the client's usage and explore more advanced features, the samples directory contains additional code samples. +For those who wish to delve deeper into the client's usage and explore more advanced features, the samples directory contains additional code samples. ## Developer environment requirements for iotdb-client-csharp -- .NET SDK Version >= 5.0 -- .NET Framework >= 4.6.1 -- ApacheThrift >= 0.14.1 -- NLog >= 4.7.9 +``` +.NET SDK Version >= 5.0 +.NET Framework >= 4.6.1 +ApacheThrift >= 0.14.1 +NLog >= 4.7.9 +``` ### OS -- Linux, Macos or other unix-like OS -- Windows+bash(WSL, cygwin, Git Bash) +* Linux, Macos or other unix-like OS +* Windows+bash(WSL, cygwin, Git Bash) ### Command Line Tools -- dotnet CLI -- Thrift +* dotnet CLI +* Thrift ## Basic interface description @@ -74,7 +79,7 @@ var session_pool = new SessionPool(host, port, pool_size); // Open Session await session_pool.Open(false); -// Create TimeSeries +// Create TimeSeries await session_pool.CreateTimeSeries("root.test_group.test_device.ts1", TSDataType.TEXT, TSEncoding.PLAIN, Compressor.UNCOMPRESSED); await session_pool.CreateTimeSeries("root.test_group.test_device.ts2", TSDataType.BOOLEAN, TSEncoding.PLAIN, Compressor.UNCOMPRESSED); await session_pool.CreateTimeSeries("root.test_group.test_device.ts3", TSDataType.INT32, TSEncoding.PLAIN, Compressor.UNCOMPRESSED); @@ -108,7 +113,7 @@ await session_pool.Close(); - Construction: ```csharp -var rowRecord = +var rowRecord = new RowRecord(long timestamps, List values, List measurements); ``` @@ -126,10 +131,12 @@ var rowRecord = - Construction: ```csharp -var tablet = +var tablet = Tablet(string deviceId, List measurements, List> values, List timestamps); ``` + + ## **API** ### **Basic API** @@ -146,43 +153,43 @@ var tablet = ### **Record API** -| api name | parameters | notes | use example | -| ----------------------------------- | --------------------------------- | ----------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | -| InsertRecordAsync | string, RowRecord | insert single record | session_pool.InsertRecordAsync("root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE", new RowRecord(1, values, measures)); | -| InsertRecordsAsync | List\, List\ | insert records | session_pool.InsertRecordsAsync(device_id, rowRecords) | -| InsertRecordsOfOneDeviceAsync | string, List\ | insert records of one device | session_pool.InsertRecordsOfOneDeviceAsync(device_id, rowRecords) | -| InsertRecordsOfOneDeviceSortedAsync | string, List\ | insert sorted records of one device | InsertRecordsOfOneDeviceSortedAsync(deviceId, sortedRowRecords); | -| TestInsertRecordAsync | string, RowRecord | test insert record | session_pool.TestInsertRecordAsync("root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE", rowRecord) | -| TestInsertRecordsAsync | List\, List\ | test insert record | session_pool.TestInsertRecordsAsync(device_id, rowRecords) | +| api name | parameters | notes | use example | +| ----------------------------------- | ----------------------------- | ----------------------------------- | ------------------------------------------------------------ | +| InsertRecordAsync | string, RowRecord | insert single record | session_pool.InsertRecordAsync("root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE", new RowRecord(1, values, measures)); | +| InsertRecordsAsync | List\, List\ | insert records | session_pool.InsertRecordsAsync(device_id, rowRecords) | +| InsertRecordsOfOneDeviceAsync | string, List\ | insert records of one device | session_pool.InsertRecordsOfOneDeviceAsync(device_id, rowRecords) | +| InsertRecordsOfOneDeviceSortedAsync | string, List\ | insert sorted records of one device | InsertRecordsOfOneDeviceSortedAsync(deviceId, sortedRowRecords); | +| TestInsertRecordAsync | string, RowRecord | test insert record | session_pool.TestInsertRecordAsync("root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE", rowRecord) | +| TestInsertRecordsAsync | List\, List\ | test insert record | session_pool.TestInsertRecordsAsync(device_id, rowRecords) | ### **Tablet API** -| api name | parameters | notes | use example | -| ---------------------- | -------------- | -------------------- | -------------------------------------------- | -| InsertTabletAsync | Tablet | insert single tablet | session_pool.InsertTabletAsync(tablet) | +| api name | parameters | notes | use example | +| ---------------------- | ------------ | -------------------- | -------------------------------------------- | +| InsertTabletAsync | Tablet | insert single tablet | session_pool.InsertTabletAsync(tablet) | | InsertTabletsAsync | List\ | insert tablets | session_pool.InsertTabletsAsync(tablets) | -| TestInsertTabletAsync | Tablet | test insert tablet | session_pool.TestInsertTabletAsync(tablet) | +| TestInsertTabletAsync | Tablet | test insert tablet | session_pool.TestInsertTabletAsync(tablet) | | TestInsertTabletsAsync | List\ | test insert tablets | session_pool.TestInsertTabletsAsync(tablets) | ### **SQL API** -| api name | parameters | notes | use example | -| ----------------------------- | ---------- | ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| ExecuteQueryStatementAsync | string | execute sql query statement | session_pool.ExecuteQueryStatementAsync("select \* from root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE where time<15"); | +| api name | parameters | notes | use example | +| ----------------------------- | ---------- | ------------------------------ | ------------------------------------------------------------ | +| ExecuteQueryStatementAsync | string | execute sql query statement | session_pool.ExecuteQueryStatementAsync("select * from root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE where time<15"); | | ExecuteNonQueryStatementAsync | string | execute sql nonquery statement | session_pool.ExecuteNonQueryStatementAsync( "create timeseries root.97209_TEST_CSHARP_CLIENT_GROUP.TEST_CSHARP_CLIENT_DEVICE.status with datatype=BOOLEAN,encoding=PLAIN") | ### **Scheam API** -| api name | parameters | notes | use example | -| -------------------------- | ---------------------------------------------------------------------------- | --------------------------- | -------------------------------------------------------------------------------------------------- | -| SetStorageGroup | string | set storage group | session_pool.SetStorageGroup("root.97209_TEST_CSHARP_CLIENT_GROUP_01") | -| CreateTimeSeries | string, TSDataType, TSEncoding, Compressor | create time series | session_pool.InsertTabletsAsync(tablets) | -| DeleteStorageGroupAsync | string | delete single storage group | session_pool.DeleteStorageGroupAsync("root.97209_TEST_CSHARP_CLIENT_GROUP_01") | -| DeleteStorageGroupsAsync | List\ | delete storage group | session_pool.DeleteStorageGroupAsync("root.97209_TEST_CSHARP_CLIENT_GROUP") | +| api name | parameters | notes | use example | +| -------------------------- | ------------------------------------------------------------ | --------------------------- | ------------------------------------------------------------ | +| SetStorageGroup | string | set storage group | session_pool.SetStorageGroup("root.97209_TEST_CSHARP_CLIENT_GROUP_01") | +| CreateTimeSeries | string, TSDataType, TSEncoding, Compressor | create time series | session_pool.InsertTabletsAsync(tablets) | +| DeleteStorageGroupAsync | string | delete single storage group | session_pool.DeleteStorageGroupAsync("root.97209_TEST_CSHARP_CLIENT_GROUP_01") | +| DeleteStorageGroupsAsync | List\ | delete storage group | session_pool.DeleteStorageGroupAsync("root.97209_TEST_CSHARP_CLIENT_GROUP") | | CreateMultiTimeSeriesAsync | List\, List\ , List\ , List\ | create multi time series | session_pool.CreateMultiTimeSeriesAsync(ts_path_lst, data_type_lst, encoding_lst, compressor_lst); | -| DeleteTimeSeriesAsync | List\ | delete time series | | -| DeleteTimeSeriesAsync | string | delete time series | | -| DeleteDataAsync | List\, long, long | delete data | session_pool.DeleteDataAsync(ts_path_lst, 2, 3) | +| DeleteTimeSeriesAsync | List\ | delete time series | | +| DeleteTimeSeriesAsync | string | delete time series | | +| DeleteDataAsync | List\, long, long | delete data | session_pool.DeleteDataAsync(ts_path_lst, 2, 3) | ### **Other API** @@ -190,6 +197,8 @@ var tablet = | -------------------------- | ---------- | --------------------------- | ---------------------------------------------------- | | CheckTimeSeriesExistsAsync | string | check if time series exists | session_pool.CheckTimeSeriesExistsAsync(time series) | + + [e.g.](https://github.com/apache/iotdb-client-csharp/tree/main/samples/Apache.IoTDB.Samples) ## SessionPool @@ -201,3 +210,4 @@ We use the `ConcurrentQueue` data structure to encapsulate a client queue to mai When a request occurs, it will try to find an idle client connection from the Connection pool. If there is no idle connection, the program will need to wait until there is an idle connection When a connection is used up, it will automatically return to the pool and wait for the next time it is used up + diff --git a/src/UserGuide/latest/API/Programming-Cpp-Native-API.md b/src/UserGuide/latest/API/Programming-Cpp-Native-API.md index 0d2267ff1..b462983d2 100644 --- a/src/UserGuide/latest/API/Programming-Cpp-Native-API.md +++ b/src/UserGuide/latest/API/Programming-Cpp-Native-API.md @@ -1,19 +1,22 @@ # C++ Native API @@ -32,75 +35,68 @@ ### Install Required Dependencies - **MAC** + 1. Install Bison: - 1. Install Bison: - - Use the following brew command to install the Bison version: + Use the following brew command to install the Bison version: + ```shell + brew install bison + ``` - ```shell - brew install bison - ``` + 2. Install Boost: Make sure to install the latest version of Boost. - 2. Install Boost: Make sure to install the latest version of Boost. + ```shell + brew install boost + ``` - ```shell - brew install boost - ``` + 3. Check OpenSSL: Make sure the OpenSSL library is installed. The default OpenSSL header file path is "/usr/local/opt/openssl/include". - 3. Check OpenSSL: Make sure the OpenSSL library is installed. The default OpenSSL header file path is "/usr/local/opt/openssl/include". - - If you encounter errors related to OpenSSL not being found during compilation, try adding `-Dopenssl.include.dir=""`. + If you encounter errors related to OpenSSL not being found during compilation, try adding `-Dopenssl.include.dir=""`. - **Ubuntu 16.04+ or Other Debian-based Systems** Use the following commands to install dependencies: - ```shell - sudo apt-get update - sudo apt-get install gcc g++ bison flex libboost-all-dev libssl-dev - ``` + ```shell + sudo apt-get update + sudo apt-get install gcc g++ bison flex libboost-all-dev libssl-dev + ``` - **CentOS 7.7+/Fedora/Rocky Linux or Other Red Hat-based Systems** Use the yum command to install dependencies: - ```shell - sudo yum update - sudo yum install gcc gcc-c++ boost-devel bison flex openssl-devel - ``` + ```shell + sudo yum update + sudo yum install gcc gcc-c++ boost-devel bison flex openssl-devel + ``` - **Windows** - 1. Set Up the Build Environment - - - Install MS Visual Studio (version 2019+ recommended): Make sure to select Visual Studio C/C++ IDE and compiler (supporting CMake, Clang, MinGW) during installation. - - Download and install [CMake](https://cmake.org/download/). - - 2. Download and Install Flex, Bison + 1. Set Up the Build Environment + - Install MS Visual Studio (version 2019+ recommended): Make sure to select Visual Studio C/C++ IDE and compiler (supporting CMake, Clang, MinGW) during installation. + - Download and install [CMake](https://cmake.org/download/). - - Download [Win_Flex_Bison](https://sourceforge.net/projects/winflexbison/). - - After downloading, rename the executables to flex.exe and bison.exe to ensure they can be found during compilation, and add the directory of these executables to the PATH environment variable. + 2. Download and Install Flex, Bison + - Download [Win_Flex_Bison](https://sourceforge.net/projects/winflexbison/). + - After downloading, rename the executables to flex.exe and bison.exe to ensure they can be found during compilation, and add the directory of these executables to the PATH environment variable. - 3. Install Boost Library + 3. Install Boost Library + - Download [Boost](https://www.boost.org/users/download/). + - Compile Boost locally: Run `bootstrap.bat` and `b2.exe` in sequence. + - Add the Boost installation directory to the PATH environment variable, e.g., `C:\Program Files (x86)\boost_1_78_0`. - - Download [Boost](https://www.boost.org/users/download/). - - Compile Boost locally: Run `bootstrap.bat` and `b2.exe` in sequence. - - Add the Boost installation directory to the PATH environment variable, e.g., `C:\Program Files (x86)\boost_1_78_0`. - - 4. Install OpenSSL - - Download and install [OpenSSL](http://slproweb.com/products/Win32OpenSSL.html). - - Add the include directory under the installation directory to the PATH environment variable. + 4. Install OpenSSL + - Download and install [OpenSSL](http://slproweb.com/products/Win32OpenSSL.html). + - Add the include directory under the installation directory to the PATH environment variable. ### Compilation Clone the source code from git: - ```shell git clone https://github.com/apache/iotdb.git ``` The default main branch is the master branch. If you want to use a specific release version, switch to that branch (e.g., version 1.3.2): - ```shell git checkout rc/1.3.2 ``` @@ -108,36 +104,30 @@ git checkout rc/1.3.2 Run Maven to compile in the IoTDB root directory: - Mac or Linux with glibc version >= 2.32 - - ```shell - ./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp - ``` + ```shell + ./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp + ``` - Linux with glibc version >= 2.31 - - ```shell - ./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Diotdb-tools-thrift.version=0.14.1.1-old-glibc-SNAPSHOT - ``` + ```shell + ./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Diotdb-tools-thrift.version=0.14.1.1-old-glibc-SNAPSHOT + ``` - Linux with glibc version >= 2.17 - - ```shell - ./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Diotdb-tools-thrift.version=0.14.1.1-glibc223-SNAPSHOT - ``` + ```shell + ./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Diotdb-tools-thrift.version=0.14.1.1-glibc223-SNAPSHOT + ``` - Windows using Visual Studio 2022 - - ```batch - .\mvnw.cmd clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp - ``` + ```batch + .\mvnw.cmd clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp + ``` - Windows using Visual Studio 2019 - - ```batch - .\mvnw.cmd clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Dcmake.generator="Visual Studio 16 2019" -Diotdb-tools-thrift.version=0.14.1.1-msvc142-SNAPSHOT - ``` - - - If you haven't added the Boost library path to the PATH environment variable, you need to add the relevant parameters to the compile command, e.g., `-DboostIncludeDir="C:\Program Files (x86)\boost_1_78_0" -DboostLibraryDir="C:\Program Files (x86)\boost_1_78_0\stage\lib"`. + ```batch + .\mvnw.cmd clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Dcmake.generator="Visual Studio 16 2019" -Diotdb-tools-thrift.version=0.14.1.1-msvc142-SNAPSHOT + ``` + - If you haven't added the Boost library path to the PATH environment variable, you need to add the relevant parameters to the compile command, e.g., `-DboostIncludeDir="C:\Program Files (x86)\boost_1_78_0" -DboostLibraryDir="C:\Program Files (x86)\boost_1_78_0\stage\lib"`. After successful compilation, the packaged library files will be located in `iotdb-client/client-cpp/target`, and you can find the compiled example program under `example/client-cpp-example/target`. @@ -146,29 +136,27 @@ After successful compilation, the packaged library files will be located in `iot Q: What are the requirements for the environment on Linux? A: - - The known minimum version requirement for glibc (x86_64 version) is 2.17, and the minimum version for GCC is 5.5. - The known minimum version requirement for glibc (ARM version) is 2.31, and the minimum version for GCC is 10.2. - If the above requirements are not met, you can try compiling Thrift locally: - - Download the code from . - - Run `./mvnw clean install`. - - Go back to the IoTDB code directory and run `./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp`. + - Download the code from https://github.com/apache/iotdb-bin-resources/tree/iotdb-tools-thrift-v0.14.1.0/iotdb-tools-thrift. + - Run `./mvnw clean install`. + - Go back to the IoTDB code directory and run `./mvnw clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp`. Q: How to resolve the `undefined reference to '_libc_single_thread'` error during Linux compilation? A: - - This issue is caused by the precompiled Thrift dependencies requiring a higher version of glibc. - You can try adding `-Diotdb-tools-thrift.version=0.14.1.1-glibc223-SNAPSHOT` or `-Diotdb-tools-thrift.version=0.14.1.1-old-glibc-SNAPSHOT` to the Maven compile command. Q: What if I need to compile using Visual Studio 2017 or earlier on Windows? A: - - You can try compiling Thrift locally before compiling the client: - - Download the code from . - - Run `.\mvnw.cmd clean install`. - - Go back to the IoTDB code directory and run `.\mvnw.cmd clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Dcmake.generator="Visual Studio 15 2017"`. + - Download the code from https://github.com/apache/iotdb-bin-resources/tree/iotdb-tools-thrift-v0.14.1.0/iotdb-tools-thrift. + - Run `.\mvnw.cmd clean install`. + - Go back to the IoTDB code directory and run `.\mvnw.cmd clean package -pl example/client-cpp-example -am -DskipTests -P with-cpp -Dcmake.generator="Visual Studio 15 2017"`. + ## Native APIs @@ -177,23 +165,19 @@ Here we show the commonly used interfaces and their parameters in the Native API ### Initialization - Open a Session - ```cpp -void open(); +void open(); ``` - Open a session, with a parameter to specify whether to enable RPC compression - ```cpp -void open(bool enableRPCCompression); +void open(bool enableRPCCompression); ``` - Notice: this RPC compression status of client must comply with that of IoTDB server - Close a Session - ```cpp -void close(); +void close(); ``` ### Data Definition Interface (DDL) @@ -201,13 +185,11 @@ void close(); #### Database Management - CREATE DATABASE - ```cpp void setStorageGroup(const std::string &storageGroupId); ``` - Delete one or several databases - ```cpp void deleteStorageGroup(const std::string &storageGroup); void deleteStorageGroups(const std::vector &storageGroups); @@ -216,11 +198,10 @@ void deleteStorageGroups(const std::vector &storageGroups); #### Timeseries Management - Create one or multiple timeseries - ```cpp void createTimeseries(const std::string &path, TSDataType::TSDataType dataType, TSEncoding::TSEncoding encoding, CompressionType::CompressionType compressor); - + void createMultiTimeseries(const std::vector &paths, const std::vector &dataTypes, const std::vector &encodings, @@ -232,7 +213,6 @@ void createMultiTimeseries(const std::vector &paths, ``` - Create aligned timeseries - ```cpp void createAlignedTimeseries(const std::string &deviceId, const std::vector &measurements, @@ -242,14 +222,12 @@ void createAlignedTimeseries(const std::string &deviceId, ``` - Delete one or several timeseries - ```cpp void deleteTimeseries(const std::string &path); void deleteTimeseries(const std::vector &paths); ``` - Check whether the specific timeseries exists. - ```cpp bool checkTimeseriesExists(const std::string &path); ``` @@ -257,25 +235,21 @@ bool checkTimeseriesExists(const std::string &path); #### Schema Template - Create a schema template - ```cpp void createSchemaTemplate(const Template &templ); ``` - Set the schema template named `templateName` at path `prefixPath`. - ```cpp void setSchemaTemplate(const std::string &template_name, const std::string &prefix_path); ``` - Unset the schema template - ```cpp void unsetSchemaTemplate(const std::string &prefix_path, const std::string &template_name); ``` - After measurement template created, you can edit the template with belowed APIs. - ```cpp // Add aligned measurements to a template void addAlignedMeasurementsInTemplate(const std::string &template_name, @@ -310,7 +284,6 @@ void deleteNodeInTemplate(const std::string &template_name, const std::string &p ``` - You can query measurement templates with these APIS: - ```cpp // Return the amount of measurements inside a template int countMeasurementsInTemplate(const std::string &template_name); @@ -328,6 +301,7 @@ std::vector showMeasurementsInTemplate(const std::string &template_ std::vector showMeasurementsInTemplate(const std::string &template_name, const std::string &pattern); ``` + ### Data Manipulation Interface (DML) #### Insert @@ -335,28 +309,24 @@ std::vector showMeasurementsInTemplate(const std::string &template_ > It is recommended to use insertTablet to help improve write efficiency. - Insert a Tablet,which is multiple rows of a device, each row has the same measurements - - Better Write Performance - - Support null values: fill the null value with any value, and then mark the null value via BitMap - + - Better Write Performance + - Support null values: fill the null value with any value, and then mark the null value via BitMap ```cpp void insertTablet(Tablet &tablet); ``` - Insert multiple Tablets - ```cpp void insertTablets(std::unordered_map &tablets); ``` - Insert a Record, which contains multiple measurement value of a device at a timestamp - ```cpp void insertRecord(const std::string &deviceId, int64_t time, const std::vector &measurements, const std::vector &types, const std::vector &values); ``` - Insert multiple Records - ```cpp void insertRecords(const std::vector &deviceIds, const std::vector ×, @@ -366,7 +336,6 @@ void insertRecords(const std::vector &deviceIds, ``` - Insert multiple Records that belong to the same device. With type info the server has no need to do type inference, which leads a better performance - ```cpp void insertRecordsOfOneDevice(const std::string &deviceId, std::vector ×, @@ -409,7 +378,6 @@ The Insert of aligned timeseries uses interfaces like `insertAlignedXXX`, and ot #### Delete - Delete data in a time range of one or several timeseries - ```cpp void deleteData(const std::string &path, int64_t endTime); void deleteData(const std::vector &paths, int64_t endTime); @@ -419,17 +387,16 @@ void deleteData(const std::vector &paths, int64_t startTime, int64_ ### IoTDB-SQL Interface - Execute query statement - ```cpp unique_ptr executeQueryStatement(const std::string &sql); ``` - Execute non query statement - ```cpp void executeNonQueryStatement(const std::string &sql); ``` + ## Examples The sample code of using these interfaces is in: @@ -445,18 +412,17 @@ If the compilation finishes successfully, the example project will be placed und If errors occur when compiling thrift source code, try to downgrade your xcode-commandline from 12 to 11.5 -see +see https://stackoverflow.com/questions/63592445/ld-unsupported-tapi-file-type-tapi-tbd-in-yaml-file/65518087#65518087 + ### on Windows When Building Thrift and downloading packages via "wget", a possible annoying issue may occur with error message looks like: - ```shell Failed to delete cached file C:\Users\Administrator\.m2\repository\.cache\download-maven-plugin\index.ser ``` - Possible fixes: +- Try to delete the ".m2\repository\\.cache\" directory and try again. +- Add "\true\" configuration to the download-maven-plugin maven phase that complains this error. -- Try to delete the `.m2\repository\.cache`" directory and try again. -- Add `true` configuration to the download-maven-plugin maven phase that complains this error. diff --git a/src/UserGuide/latest/API/Programming-Go-Native-API.md b/src/UserGuide/latest/API/Programming-Go-Native-API.md index ca8ce541a..b227ed672 100644 --- a/src/UserGuide/latest/API/Programming-Go-Native-API.md +++ b/src/UserGuide/latest/API/Programming-Go-Native-API.md @@ -1,19 +1,22 @@ # Go Native API @@ -22,39 +25,40 @@ The Git repository for the Go Native API client is located [here](https://github ## Dependencies -- golang >= 1.13 -- make >= 3.0 -- curl >= 7.1.1 -- thrift 0.15.0 -- Linux、Macos or other unix-like systems -- Windows+bash (WSL、cygwin、Git Bash) + * golang >= 1.13 + * make >= 3.0 + * curl >= 7.1.1 + * thrift 0.15.0 + * Linux、Macos or other unix-like systems + * Windows+bash (WSL、cygwin、Git Bash) ## Installation -- go mod + * go mod + +```sh +export GO111MODULE=on +export GOPROXY=https://goproxy.io - ```sh - export GO111MODULE=on - export GOPROXY=https://goproxy.io +mkdir session_example && cd session_example - mkdir session_example && cd session_example +curl -o session_example.go -L https://github.com/apache/iotdb-client-go/raw/main/example/session_example.go - curl -o session_example.go -L https://github.com/apache/iotdb-client-go/raw/main/example/session_example.go +go mod init session_example +go run session_example.go +``` - go mod init session_example - go run session_example.go - ``` +* GOPATH -- GOPATH +```sh +# get thrift 0.15.0 +go get github.com/apache/thrift +cd $GOPATH/src/github.com/apache/thrift +git checkout 0.15.0 - ```sh - # get thrift 0.15.0 - go get github.com/apache/thrift - cd $GOPATH/src/github.com/apache/thrift - git checkout 0.15.0 +mkdir -p $GOPATH/src/iotdb-client-go-example/session_example +cd $GOPATH/src/iotdb-client-go-example/session_example +curl -o session_example.go -L https://github.com/apache/iotdb-client-go/raw/main/example/session_example.go +go run session_example.go +``` - mkdir -p $GOPATH/src/iotdb-client-go-example/session_example - cd $GOPATH/src/iotdb-client-go-example/session_example - curl -o session_example.go -L https://github.com/apache/iotdb-client-go/raw/main/example/session_example.go - go run session_example.go - ``` diff --git a/src/UserGuide/latest/API/Programming-JDBC.md b/src/UserGuide/latest/API/Programming-JDBC.md index fa9fc3cc0..0251e469c 100644 --- a/src/UserGuide/latest/API/Programming-JDBC.md +++ b/src/UserGuide/latest/API/Programming-JDBC.md @@ -1,35 +1,34 @@ -# JDBC (Not Recommend) + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. -::: warning +--> -NOTICE: CURRENTLY, JDBC IS USED FOR CONNECTING SOME THIRD-PART TOOLS. -IT CAN NOT PROVIDE HIGH THROUGHPUT FOR WRITE OPERATIONS. -PLEASE USE [Java Native API](./Programming-Java-Native-API.md) INSTEAD +# JDBC (Not Recommend) -::: +*NOTICE: CURRENTLY, JDBC IS USED FOR CONNECTING SOME THIRD-PART TOOLS. +IT CAN NOT PROVIDE HIGH THROUGHPUT FOR WRITE OPERATIONS. +PLEASE USE [Java Native API](./Programming-Java-Native-API.md) INSTEAD* ## Dependencies -- JDK >= 1.8+ -- Maven >= 3.9+ +* JDK >= 1.8+ +* Maven >= 3.9+ ## Installation @@ -111,7 +110,7 @@ public class JDBCExample { //Count timeseries group by each node at the given level statement.execute("COUNT TIMESERIES root GROUP BY LEVEL=3"); outputResult(statement.getResultSet()); - + //Execute insert statements in batch statement.addBatch("INSERT INTO root.demo(timestamp,s0) VALUES(1,1);"); @@ -207,37 +206,27 @@ public class JDBCExample { ``` The parameter `version` can be used in the url: - -```java +````java String url = "jdbc:iotdb://127.0.0.1:6667?version=V_1_0"; -``` - -The parameter `version` represents the SQL semantic version used by the client, which is used in order to be compatible with the SQL semantics of `0.12` when upgrading to `0.13`. +```` +The parameter `version` represents the SQL semantic version used by the client, which is used in order to be compatible with the SQL semantics of `0.12` when upgrading to `0.13`. The possible values are: `V_0_12`, `V_0_13`, `V_1_0`. In addition, IoTDB provides additional interfaces in JDBC for users to read and write the database using different character sets (e.g., GB18030) in the connection. The default character set for IoTDB is UTF-8. When users want to use a character set other than UTF-8, they need to specify the charset property in the JDBC connection. For example: - 1. Create a connection using the GB18030 charset: - ```java DriverManager.getConnection("jdbc:iotdb://127.0.0.1:6667?charset=GB18030", "root", "root"); ``` - 2. When executing SQL with the `IoTDBStatement` interface, the SQL can be provided as a `byte[]` array, and it will be parsed into a string according to the specified charset. - ```java public boolean execute(byte[] sql) throws SQLException; ``` - 3. When outputting query results, the `getBytes` method of `ResultSet` can be used to get `byte[]`, which will be encoded using the charset specified in the connection. - ```java System.out.print(resultSet.getString(i) + " (" + new String(resultSet.getBytes(i), charset) + ")"); ``` - Here is a complete example: - ```java public class JDBCCharsetExample { @@ -304,4 +293,4 @@ public class JDBCCharsetExample { } } } -``` +``` \ No newline at end of file diff --git a/src/UserGuide/latest/API/Programming-Kafka.md b/src/UserGuide/latest/API/Programming-Kafka.md index 22ad13100..0a041448f 100644 --- a/src/UserGuide/latest/API/Programming-Kafka.md +++ b/src/UserGuide/latest/API/Programming-Kafka.md @@ -1,19 +1,22 @@ # Kafka @@ -25,90 +28,91 @@ ### kafka Producer Producing Data Java Code Example ```java -Properties props = new Properties(); -props.put("bootstrap.servers", "127.0.0.1:9092"); -props.put("key.serializer", StringSerializer.class); -props.put("value.serializer", StringSerializer.class); -KafkaProducer producer = new KafkaProducer<>(props); -producer.send( - new ProducerRecord<>( - "Kafka-Test", "key", "root.kafka," + System.currentTimeMillis() + ",value,INT32,100")); -producer.close(); + Properties props = new Properties(); + props.put("bootstrap.servers", "127.0.0.1:9092"); + props.put("key.serializer", StringSerializer.class); + props.put("value.serializer", StringSerializer.class); + KafkaProducer producer = new KafkaProducer<>(props); + producer.send( + new ProducerRecord<>( + "Kafka-Test", "key", "root.kafka," + System.currentTimeMillis() + ",value,INT32,100")); + producer.close(); ``` ### kafka Consumer Receiving Data Java Code Example ```java -Properties props = new Properties(); -props.put("bootstrap.servers", "127.0.0.1:9092"); -props.put("key.deserializer", StringDeserializer.class); -props.put("value.deserializer", StringDeserializer.class); -props.put("auto.offset.reset", "earliest"); -props.put("group.id", "Kafka-Test"); -KafkaConsumer kafkaConsumer = new KafkaConsumer<>(props); -kafkaConsumer.subscribe(Collections.singleton("Kafka-Test")); -ConsumerRecords records = kafkaConsumer.poll(Duration.ofSeconds(1)); -``` + Properties props = new Properties(); + props.put("bootstrap.servers", "127.0.0.1:9092"); + props.put("key.deserializer", StringDeserializer.class); + props.put("value.deserializer", StringDeserializer.class); + props.put("auto.offset.reset", "earliest"); + props.put("group.id", "Kafka-Test"); + KafkaConsumer kafkaConsumer = new KafkaConsumer<>(props); + kafkaConsumer.subscribe(Collections.singleton("Kafka-Test")); + ConsumerRecords records = kafkaConsumer.poll(Duration.ofSeconds(1)); + ``` ### Example of Java Code Stored in IoTDB Server ```java -SessionPool pool = - new SessionPool.Builder() - .host("127.0.0.1") - .port(6667) - .user("root") - .password("root") - .maxSize(3) - .build(); -List datas = new ArrayList<>(records.count()); -for (ConsumerRecord record : records) { - datas.add(record.value()); -} -int size = datas.size(); -List deviceIds = new ArrayList<>(size); -List times = new ArrayList<>(size); -List> measurementsList = new ArrayList<>(size); -List> typesList = new ArrayList<>(size); -List> valuesList = new ArrayList<>(size); -for (String data : datas) { - String[] dataArray = data.split(","); - String device = dataArray[0]; - long time = Long.parseLong(dataArray[1]); - List measurements = Arrays.asList(dataArray[2].split(":")); - List types = new ArrayList<>(); - for (String type : dataArray[3].split(":")) { - types.add(TSDataType.valueOf(type)); - } - List values = new ArrayList<>(); - String[] valuesStr = dataArray[4].split(":"); - for (int i = 0; i < valuesStr.length; i++) { - switch (types.get(i)) { - case INT64: - values.add(Long.parseLong(valuesStr[i])); - break; - case DOUBLE: - values.add(Double.parseDouble(valuesStr[i])); - break; - case INT32: - values.add(Integer.parseInt(valuesStr[i])); - break; - case TEXT: - values.add(valuesStr[i]); - break; - case FLOAT: - values.add(Float.parseFloat(valuesStr[i])); - break; - case BOOLEAN: - values.add(Boolean.parseBoolean(valuesStr[i])); - break; + SessionPool pool = + new SessionPool.Builder() + .host("127.0.0.1") + .port(6667) + .user("root") + .password("root") + .maxSize(3) + .build(); + List datas = new ArrayList<>(records.count()); + for (ConsumerRecord record : records) { + datas.add(record.value()); } - } - deviceIds.add(device); - times.add(time); - measurementsList.add(measurements); - typesList.add(types); - valuesList.add(values); -} -pool.insertRecords(deviceIds, times, measurementsList, typesList, valuesList); -``` + int size = datas.size(); + List deviceIds = new ArrayList<>(size); + List times = new ArrayList<>(size); + List> measurementsList = new ArrayList<>(size); + List> typesList = new ArrayList<>(size); + List> valuesList = new ArrayList<>(size); + for (String data : datas) { + String[] dataArray = data.split(","); + String device = dataArray[0]; + long time = Long.parseLong(dataArray[1]); + List measurements = Arrays.asList(dataArray[2].split(":")); + List types = new ArrayList<>(); + for (String type : dataArray[3].split(":")) { + types.add(TSDataType.valueOf(type)); + } + List values = new ArrayList<>(); + String[] valuesStr = dataArray[4].split(":"); + for (int i = 0; i < valuesStr.length; i++) { + switch (types.get(i)) { + case INT64: + values.add(Long.parseLong(valuesStr[i])); + break; + case DOUBLE: + values.add(Double.parseDouble(valuesStr[i])); + break; + case INT32: + values.add(Integer.parseInt(valuesStr[i])); + break; + case TEXT: + values.add(valuesStr[i]); + break; + case FLOAT: + values.add(Float.parseFloat(valuesStr[i])); + break; + case BOOLEAN: + values.add(Boolean.parseBoolean(valuesStr[i])); + break; + } + } + deviceIds.add(device); + times.add(time); + measurementsList.add(measurements); + typesList.add(types); + valuesList.add(values); + } + pool.insertRecords(deviceIds, times, measurementsList, typesList, valuesList); + ``` + diff --git a/src/UserGuide/latest/API/Programming-MQTT.md b/src/UserGuide/latest/API/Programming-MQTT.md index d33270105..5bbb610cf 100644 --- a/src/UserGuide/latest/API/Programming-MQTT.md +++ b/src/UserGuide/latest/API/Programming-MQTT.md @@ -1,21 +1,23 @@ + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +--> # MQTT Protocol [MQTT](http://mqtt.org/) is a machine-to-machine (M2M)/"Internet of Things" connectivity protocol. @@ -25,60 +27,53 @@ It is useful for connections with remote locations where a small code footprint IoTDB supports the MQTT v3.1(an OASIS Standard) protocol. IoTDB server includes a built-in MQTT service that allows remote devices send messages into IoTDB server directly. -![](https://alioss.timecho.com/docs/img/github/78357432-0c71cf80-75e4-11ea-98aa-c43a54d469ce.png) + -## Built-in MQTT Service +## Built-in MQTT Service The Built-in MQTT Service provide the ability of direct connection to IoTDB through MQTT. It listen the publish messages from MQTT clients -and then write the data into storage immediately. -The MQTT topic corresponds to IoTDB timeseries. + and then write the data into storage immediately. +The MQTT topic corresponds to IoTDB timeseries. The messages payload can be format to events by `PayloadFormatter` which loaded by java SPI, and the default implementation is `JSONPayloadFormatter`. The default `json` formatter support two json format and its json array. The following is an MQTT message payload example: ```json -{ - "device": "root.sg.d1", - "timestamp": 1586076045524, - "measurements": ["s1", "s2"], - "values": [0.530635, 0.530635] -} + { + "device":"root.sg.d1", + "timestamp":1586076045524, + "measurements":["s1","s2"], + "values":[0.530635,0.530635] + } ``` - or - ```json -{ - "device": "root.sg.d1", - "timestamps": [1586076045524, 1586076065526], - "measurements": ["s1", "s2"], - "values": [ - [0.530635, 0.530635], - [0.530655, 0.530695] - ] -} + { + "device":"root.sg.d1", + "timestamps":[1586076045524,1586076065526], + "measurements":["s1","s2"], + "values":[[0.530635,0.530635], [0.530655,0.530695]] + } ``` - or json array of the above two. ## MQTT Configurations - The IoTDB MQTT service load configurations from `${IOTDB_HOME}/${IOTDB_CONF}/iotdb-system.properties` by default. Configurations are as follows: -| NAME | DESCRIPTION | DEFAULT | -| ---------------------- | :-------------------------------------------------: | :-------: | -| enable_mqtt_service | whether to enable the mqtt service | false | -| mqtt_host | the mqtt service binding host | 127.0.0.1 | -| mqtt_port | the mqtt service binding port | 1883 | -| mqtt_handler_pool_size | the handler pool size for handing the mqtt messages | 1 | -| mqtt_payload_formatter | the mqtt message payload formatter | json | -| mqtt_max_message_size | the max mqtt message size in byte | 1048576 | +| NAME | DESCRIPTION | DEFAULT | +| ------------- |:-------------:|:------:| +| enable_mqtt_service | whether to enable the mqtt service | false | +| mqtt_host | the mqtt service binding host | 127.0.0.1 | +| mqtt_port | the mqtt service binding port | 1883 | +| mqtt_handler_pool_size | the handler pool size for handing the mqtt messages | 1 | +| mqtt_payload_formatter | the mqtt message payload formatter | json | +| mqtt_max_message_size | the max mqtt message size in byte| 1048576 | -## Coding Examples +## Coding Examples The following is an example which a mqtt client send messages to IoTDB server. ```java @@ -108,82 +103,81 @@ connection.disconnect(); ## Customize your MQTT Message Format -If you do not like the above Json format, you can customize your MQTT Message format by just writing several lines +If you do not like the above Json format, you can customize your MQTT Message format by just writing several lines of codes. An example can be found in `example/mqtt-customize` project. Steps: - 1. Create a java project, and add dependency: - - ```xml - - org.apache.iotdb - iotdb-server - 1.1.0-SNAPSHOT - - ``` - +```xml + + org.apache.iotdb + iotdb-server + 1.1.0-SNAPSHOT + +``` 2. Define your implementation which implements `org.apache.iotdb.db.protocol.mqtt.PayloadFormatter` - e.g., - - ```java - package org.apache.iotdb.mqtt.server; - - import io.netty.buffer.ByteBuf; - import org.apache.iotdb.db.protocol.mqtt.Message; - import org.apache.iotdb.db.protocol.mqtt.PayloadFormatter; - - import java.nio.charset.StandardCharsets; - import java.util.ArrayList; - import java.util.Arrays; - import java.util.List; - - public class CustomizedJsonPayloadFormatter implements PayloadFormatter { - - @Override - public List format(ByteBuf payload) { - // Suppose the payload is a json format - if (payload == null) { - return null; - } - - String json = payload.toString(StandardCharsets.UTF_8); - // parse data from the json and generate Messages and put them into List ret - List ret = new ArrayList<>(); - // this is just an example, so we just generate some Messages directly - for (int i = 0; i < 2; i++) { - long ts = i; - Message message = new Message(); - message.setDevice("d" + i); - message.setTimestamp(ts); - message.setMeasurements(Arrays.asList("s1", "s2")); - message.setValues(Arrays.asList("4.0" + i, "5.0" + i)); - ret.add(message); - } - return ret; - } - - @Override - public String getName() { - // set the value of mqtt_payload_formatter in iotdb-system.properties as the following string: - return "CustomizedJson"; - } - } - ``` +e.g., +```java +package org.apache.iotdb.mqtt.server; + +import io.netty.buffer.ByteBuf; +import org.apache.iotdb.db.protocol.mqtt.Message; +import org.apache.iotdb.db.protocol.mqtt.PayloadFormatter; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class CustomizedJsonPayloadFormatter implements PayloadFormatter { + + @Override + public List format(ByteBuf payload) { + // Suppose the payload is a json format + if (payload == null) { + return null; + } + + String json = payload.toString(StandardCharsets.UTF_8); + // parse data from the json and generate Messages and put them into List ret + List ret = new ArrayList<>(); + // this is just an example, so we just generate some Messages directly + for (int i = 0; i < 2; i++) { + long ts = i; + Message message = new Message(); + message.setDevice("d" + i); + message.setTimestamp(ts); + message.setMeasurements(Arrays.asList("s1", "s2")); + message.setValues(Arrays.asList("4.0" + i, "5.0" + i)); + ret.add(message); + } + return ret; + } + + @Override + public String getName() { + // set the value of mqtt_payload_formatter in iotdb-system.properties as the following string: + return "CustomizedJson"; + } +} +``` 3. modify the file in `src/main/resources/META-INF/services/org.apache.iotdb.db.protocol.mqtt.PayloadFormatter`: - clean the file and put your implementation class name into the file. - In this example, the content is: `org.apache.iotdb.mqtt.server.CustomizedJsonPayloadFormatter` + clean the file and put your implementation class name into the file. + In this example, the content is: `org.apache.iotdb.mqtt.server.CustomizedJsonPayloadFormatter` 4. compile your implementation as a jar file: `mvn package -DskipTests` -Then, in your server: +Then, in your server: 1. Create ${IOTDB_HOME}/ext/mqtt/ folder, and put the jar into this folder. 2. Update configuration to enable MQTT service. (`enable_mqtt_service=true` in `conf/iotdb-system.properties`) 3. Set the value of `mqtt_payload_formatter` in `conf/iotdb-system.properties` as the value of getName() in your implementation - , in this example, the value is `CustomizedJson` + , in this example, the value is `CustomizedJson` 4. Launch the IoTDB server. 5. Now IoTDB will use your implementation to parse the MQTT message. -More: the message format can be anything you want. For example, if it is a binary format, -just use `payload.forEachByte()` or `payload.array` to get bytes content. +More: the message format can be anything you want. For example, if it is a binary format, +just use `payload.forEachByte()` or `payload.array` to get bytes content. + + + diff --git a/src/UserGuide/latest/API/Programming-NodeJS-Native-API.md b/src/UserGuide/latest/API/Programming-NodeJS-Native-API.md index e67f1f0d8..35c7964cd 100644 --- a/src/UserGuide/latest/API/Programming-NodeJS-Native-API.md +++ b/src/UserGuide/latest/API/Programming-NodeJS-Native-API.md @@ -1,72 +1,71 @@ # Node.js Native API -Apache IoTDB uses Thrift as a cross-language RPC-framework so access to IoTDB can be achieved through the interfaces provided by Thrift. +Apache IoTDB uses Thrift as a cross-language RPC-framework so access to IoTDB can be achieved through the interfaces provided by Thrift. This document will introduce how to generate a native Node.js interface that can be used to access IoTDB. ## Dependents -- JDK >= 1.8 -- Node.js >= 16.0.0 -- Linux、Macos or like unix -- Windows+bash + * JDK >= 1.8 + * Node.js >= 16.0.0 + * Linux、Macos or like unix + * Windows+bash ## Generate the Node.js native interface 1. Find the `pom.xml` file in the root directory of the IoTDB source code folder. 2. Open the `pom.xml` file and find the following content: - ```xml - - generate-thrift-sources-python - generate-sources - - compile - - - py - ${project.build.directory}/generated-sources-python/ - - + + generate-thrift-sources-python + generate-sources + + compile + + + py + ${project.build.directory}/generated-sources-python/ + + ``` - 3. Duplicate this block and change the `id`, `generator` and `outputDirectory` to this: - ```xml - - generate-thrift-sources-nodejs - generate-sources - - compile - - - js:node - ${project.build.directory}/generated-sources-nodejs/ - - + + generate-thrift-sources-nodejs + generate-sources + + compile + + + js:node + ${project.build.directory}/generated-sources-nodejs/ + + ``` - 4. In the root directory of the IoTDB source code folder,run `mvn clean generate-sources`. - This command will automatically delete the files in `iotdb/iotdb-protocol/thrift/target` and `iotdb/iotdb-protocol/thrift-commons/target`, and repopulate the folder with the newly generated files. - The newly generated JavaScript sources will be located in `iotdb/iotdb-protocol/thrift/target/generated-sources-nodejs` in the various modules of the `iotdb-protocol` module. +This command will automatically delete the files in `iotdb/iotdb-protocol/thrift/target` and `iotdb/iotdb-protocol/thrift-commons/target`, and repopulate the folder with the newly generated files. +The newly generated JavaScript sources will be located in `iotdb/iotdb-protocol/thrift/target/generated-sources-nodejs` in the various modules of the `iotdb-protocol` module. ## Using the Node.js native interface @@ -74,7 +73,7 @@ Simply copy the files in `iotdb/iotdb-protocol/thrift/target/generated-sources-n ## rpc interface -```cpp +``` // open a session TSOpenSessionResp openSession(1:TSOpenSessionReq req); @@ -90,7 +89,7 @@ TSStatus executeBatchStatement(1:TSExecuteBatchStatementReq req); // execute query SQL statement TSExecuteStatementResp executeQueryStatement(1:TSExecuteStatementReq req); -// execute insert, delete and update SQL statement +// execute insert, delete and update SQL statement TSExecuteStatementResp executeUpdateStatement(1:TSExecuteStatementReq req); // fetch next query result @@ -99,7 +98,7 @@ TSFetchResultsResp fetchResults(1:TSFetchResultsReq req) // fetch meta data TSFetchMetadataResp fetchMetadata(1:TSFetchMetadataReq req) -// cancel a query +// cancel a query TSStatus cancelOperation(1:TSCancelOperationReq req); // close a query dataset @@ -179,4 +178,4 @@ TSExecuteStatementResp executeRawDataQuery(1:TSRawDataQueryReq req); // request a statement id from server i64 requestStatementId(1:i64 sessionId); -``` +``` \ No newline at end of file diff --git a/src/UserGuide/latest/API/Programming-ODBC.md b/src/UserGuide/latest/API/Programming-ODBC.md index 51ac098ba..8e0d74852 100644 --- a/src/UserGuide/latest/API/Programming-ODBC.md +++ b/src/UserGuide/latest/API/Programming-ODBC.md @@ -1,155 +1,146 @@ # ODBC - With IoTDB JDBC, IoTDB can be accessed using the ODBC-JDBC bridge. ## Dependencies - -- IoTDB-JDBC's jar-with-dependency package -- ODBC-JDBC bridge (e.g. ZappySys JDBC Bridge) +* IoTDB-JDBC's jar-with-dependency package +* ODBC-JDBC bridge (e.g. ZappySys JDBC Bridge) ## Deployment - ### Preparing JDBC package - Download the source code of IoTDB, and execute the following command in root directory: - ```shell mvn clean package -pl iotdb-client/jdbc -am -DskipTests -P get-jar-with-dependencies ``` - Then, you can see the output `iotdb-jdbc-1.3.2-SNAPSHOT-jar-with-dependencies.jar` under `iotdb-client/jdbc/target` directory. ### Preparing ODBC-JDBC Bridge +*Note: Here we only provide one kind of ODBC-JDBC bridge as the instance. Readers can use other ODBC-JDBC bridges to access IoTDB with the IOTDB-JDBC.* +1. **Download Zappy-Sys ODBC-JDBC Bridge**: + Enter the https://zappysys.com/products/odbc-powerpack/odbc-jdbc-bridge-driver/ website, and click "download". -_Note: Here we only provide one kind of ODBC-JDBC bridge as the instance. Readers can use other ODBC-JDBC bridges to access IoTDB with the IOTDB-JDBC._ - -1. **Download Zappy-Sys ODBC-JDBC Bridge**: - Enter the website, and click "download". - - ![ZappySys_website.jpg](https://alioss.timecho.com/upload/ZappySys_website.jpg) + ![ZappySys_website.jpg](https://alioss.timecho.com/upload/ZappySys_website.jpg) 2. **Prepare IoTDB**: Set up IoTDB cluster, and write a row of data arbitrarily. - - ```sql - IoTDB > insert into root.ln.wf02.wt02(timestamp,status) values(1,true) - ``` + ```sql + IoTDB > insert into root.ln.wf02.wt02(timestamp,status) values(1,true) + ``` 3. **Deploy and Test the Bridge**: + 1. Open ODBC Data Sources(32/64 bit), depending on the bits of Windows. One possible position is `C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Administrative Tools`. - 1. Open ODBC Data Sources(32/64 bit), depending on the bits of Windows. One possible position is `C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Administrative Tools`. + ![ODBC_ADD_EN.jpg](https://alioss.timecho.com/upload/ODBC_ADD_EN.jpg) - ![ODBC_ADD_EN.jpg](https://alioss.timecho.com/upload/ODBC_ADD_EN.jpg) + 2. Click on "add" and select ZappySys JDBC Bridge. - 2. Click on "add" and select ZappySys JDBC Bridge. + ![ODBC_CREATE_EN.jpg](https://alioss.timecho.com/upload/ODBC_CREATE_EN.jpg) - ![ODBC_CREATE_EN.jpg](https://alioss.timecho.com/upload/ODBC_CREATE_EN.jpg) + 3. Fill in the following settings: - 3. Fill in the following settings: + | Property | Content | Example | + |---------------------|-----------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------| + | Connection String | jdbc:iotdb://\:\/ | jdbc:iotdb://127.0.0.1:6667/ | + | Driver Class | org.apache.iotdb.jdbc.IoTDBDriver | org.apache.iotdb.jdbc.IoTDBDriver | + | JDBC driver file(s) | The path of IoTDB JDBC jar-with-dependencies | C:\Users\13361\Documents\GitHub\iotdb\iotdb-client\jdbc\target\iotdb-jdbc-1.3.2-SNAPSHOT-jar-with-dependencies.jar | + | User name | IoTDB's user name | root | + | User password | IoTDB's password | root | - | Property | Content | Example | - | ------------------- | --------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | - | Connection String | jdbc:iotdb://\:\/ | jdbc:iotdb://127.0.0.1:6667/ | - | Driver Class | org.apache.iotdb.jdbc.IoTDBDriver | org.apache.iotdb.jdbc.IoTDBDriver | - | JDBC driver file(s) | The path of IoTDB JDBC jar-with-dependencies | C:\Users\13361\Documents\GitHub\iotdb\iotdb-client\jdbc\target\iotdb-jdbc-1.3.2-SNAPSHOT-jar-with-dependencies.jar | - | User name | IoTDB's user name | root | - | User password | IoTDB's password | root | + ![ODBC_CONNECTION.png](https://alioss.timecho.com/upload/ODBC_CONNECTION.png) - ![ODBC_CONNECTION.png](https://alioss.timecho.com/upload/ODBC_CONNECTION.png) + 4. Click on "Test Connection" button, and a "Test Connection: SUCCESSFUL" should appear. - 4. Click on "Test Connection" button, and a "Test Connection: SUCCESSFUL" should appear. + ![ODBC_CONFIG_EN.jpg](https://alioss.timecho.com/upload/ODBC_CONFIG_EN.jpg) - ![ODBC_CONFIG_EN.jpg](https://alioss.timecho.com/upload/ODBC_CONFIG_EN.jpg) + 5. Click the "Preview" button above, and replace the original query text with `select * from root.**`, then click "Preview Data", and the query result should correctly. - 5. Click the "Preview" button above, and replace the original query text with `select * from root.**`, then click "Preview Data", and the query result should correctly. - - ![ODBC_TEST.jpg](https://alioss.timecho.com/upload/ODBC_TEST.jpg) + ![ODBC_TEST.jpg](https://alioss.timecho.com/upload/ODBC_TEST.jpg) 4. **Operate IoTDB's data with ODBC**: After correct deployment, you can use Microsoft's ODBC library to operate IoTDB's data. Here's an example written in C#: - - ```C# - using System.Data.Odbc; - - // Get a connection - var dbConnection = new OdbcConnection("DSN=ZappySys JDBC Bridge"); - dbConnection.Open(); - - // Execute the write commands to prepare data - var dbCommand = dbConnection.CreateCommand(); - dbCommand.CommandText = "insert into root.Keller.Flur.Energieversorgung(time, s1) values(1715670861634, 1)"; - dbCommand.ExecuteNonQuery(); - dbCommand.CommandText = "insert into root.Keller.Flur.Energieversorgung(time, s2) values(1715670861634, true)"; - dbCommand.ExecuteNonQuery(); - dbCommand.CommandText = "insert into root.Keller.Flur.Energieversorgung(time, s3) values(1715670861634, 3.1)"; - dbCommand.ExecuteNonQuery(); - - // Execute the read command - dbCommand.CommandText = "SELECT * FROM root.Keller.Flur.Energieversorgung"; - var dbReader = dbCommand.ExecuteReader(); - - // Write the output header - var fCount = dbReader.FieldCount; - Console.Write(":"); - for(var i = 0; i < fCount; i++) - { - var fName = dbReader.GetName(i); - Console.Write(fName + ":"); - } - Console.WriteLine(); - - // Output the content - while (dbReader.Read()) - { - Console.Write(":"); - for(var i = 0; i < fCount; i++) - { - var fieldType = dbReader.GetFieldType(i); - switch (fieldType.Name) - { - case "DateTime": - var dateTime = dbReader.GetInt64(i); - Console.Write(dateTime + ":"); - break; - case "Double": - if (dbReader.IsDBNull(i)) - { - Console.Write("null:"); - } - else - { - var fValue = dbReader.GetDouble(i); - Console.Write(fValue + ":"); - } - break; - default: - Console.Write(fieldType.Name + ":"); - break; - } - } - Console.WriteLine(); - } - - // Shut down gracefully - dbReader.Close(); - dbCommand.Dispose(); - dbConnection.Close(); - ``` - + ```C# + using System.Data.Odbc; + + // Get a connection + var dbConnection = new OdbcConnection("DSN=ZappySys JDBC Bridge"); + dbConnection.Open(); + + // Execute the write commands to prepare data + var dbCommand = dbConnection.CreateCommand(); + dbCommand.CommandText = "insert into root.Keller.Flur.Energieversorgung(time, s1) values(1715670861634, 1)"; + dbCommand.ExecuteNonQuery(); + dbCommand.CommandText = "insert into root.Keller.Flur.Energieversorgung(time, s2) values(1715670861634, true)"; + dbCommand.ExecuteNonQuery(); + dbCommand.CommandText = "insert into root.Keller.Flur.Energieversorgung(time, s3) values(1715670861634, 3.1)"; + dbCommand.ExecuteNonQuery(); + + // Execute the read command + dbCommand.CommandText = "SELECT * FROM root.Keller.Flur.Energieversorgung"; + var dbReader = dbCommand.ExecuteReader(); + + // Write the output header + var fCount = dbReader.FieldCount; + Console.Write(":"); + for(var i = 0; i < fCount; i++) + { + var fName = dbReader.GetName(i); + Console.Write(fName + ":"); + } + Console.WriteLine(); + + // Output the content + while (dbReader.Read()) + { + Console.Write(":"); + for(var i = 0; i < fCount; i++) + { + var fieldType = dbReader.GetFieldType(i); + switch (fieldType.Name) + { + case "DateTime": + var dateTime = dbReader.GetInt64(i); + Console.Write(dateTime + ":"); + break; + case "Double": + if (dbReader.IsDBNull(i)) + { + Console.Write("null:"); + } + else + { + var fValue = dbReader.GetDouble(i); + Console.Write(fValue + ":"); + } + break; + default: + Console.Write(fieldType.Name + ":"); + break; + } + } + Console.WriteLine(); + } + + // Shut down gracefully + dbReader.Close(); + dbCommand.Dispose(); + dbConnection.Close(); + ``` This program can write data into IoTDB, and query the data we have just written. diff --git a/src/UserGuide/latest/API/Programming-OPC-UA_timecho.md b/src/UserGuide/latest/API/Programming-OPC-UA_timecho.md index 7459c19b7..703b47c68 100644 --- a/src/UserGuide/latest/API/Programming-OPC-UA_timecho.md +++ b/src/UserGuide/latest/API/Programming-OPC-UA_timecho.md @@ -1,19 +1,22 @@ # OPC UA Protocol @@ -26,81 +29,81 @@ OPC UA is a technical specification used in the automation field for communicati - **Client/Server Mode**:In this mode, IoTDB's stream processing engine establishes a connection with the OPC UA Server via an OPC UA Sink. The OPC UA Server maintains data within its Address Space, from which IoTDB can request and retrieve data. Additionally, other OPC UA Clients can access the data on the server. -::: center - - +
+ +
-::: - Features: - - OPC UA will organize the device information received from Sink into folders under the Objects folder according to a tree model. + - OPC UA will organize the device information received from Sink into folders under the Objects folder according to a tree model. - - Each measurement point is recorded as a variable node and the latest value in the current database is recorded. + - Each measurement point is recorded as a variable node and the latest value in the current database is recorded. ### OPC UA Pub/Sub Mode - **Pub/Sub Mode**: In this mode, IoTDB's stream processing engine sends data change events to the OPC UA Server through an OPC UA Sink. These events are published to the server's message queue and managed through Event Nodes. Other OPC UA Clients can subscribe to these Event Nodes to receive notifications upon data changes. -::: center - - - -::: +
+ +
- Features: - + - Each measurement point is wrapped as an Event Node in OPC UA. + - The relevant fields and their meanings are as follows: - | Field | Meaning | Type (Milo) | Example | - | :--------- | :--------------------------------- | :------------ | :-------------------- | - | Time | Timestamp | DateTime | 1698907326198 | - | SourceName | Full path of the measurement point | String | root.test.opc.sensor0 | - | SourceNode | Data type of the measurement point | NodeId | Int32 | - | Message | Data | LocalizedText | 3.0 | + | Field | Meaning | Type (Milo) | Example | + | :--------- | :--------------- | :------------ | :-------------------- | + | Time | Timestamp | DateTime | 1698907326198 | + | SourceName | Full path of the measurement point | String | root.test.opc.sensor0 | + | SourceNode | Data type of the measurement point | NodeId | Int32 | + | Message | Data | LocalizedText | 3.0 | - Events are only sent to clients that are already listening; if a client is not connected, the Event will be ignored. + ## IoTDB OPC Server Startup method ### Syntax The syntax for creating the Sink is as follows: -```sql -create pipe p1 - with source (...) - with processor (...) - with sink ('sink' = 'opc-ua-sink', - 'sink.opcua.tcp.port' = '12686', - 'sink.opcua.https.port' = '8443', - 'sink.user' = 'root', - 'sink.password' = 'root', + +```SQL +create pipe p1 + with source (...) + with processor (...) + with sink ('sink' = 'opc-ua-sink', + 'sink.opcua.tcp.port' = '12686', + 'sink.opcua.https.port' = '8443', + 'sink.user' = 'root', + 'sink.password' = 'root', 'sink.opcua.security.dir' = '...' ) ``` ### Parameters -| key | value | value range | required or not | default value | -| :--------------------------------- | :-------------------------------------------------- | :------------------------------------------------------- | :-------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| sink | OPC UA SINK | String: opc-ua-sink | Required | | -| sink.opcua.model | OPC UA model used | String: client-server / pub-sub | Optional | client-server | -| sink.opcua.tcp.port | OPC UA's TCP port | Integer: \[0, 65536] | Optional | 12686 | -| sink.opcua.https.port | OPC UA's HTTPS port | Integer: \[0, 65536] | Optional | 8443 | -| sink.opcua.security.dir | Directory for OPC UA's keys and certificates | String: Path, supports absolute and relative directories | Optional | Opc_security folder/in the conf directory of the DataNode related to iotdb
If there is no conf directory for iotdb (such as launching DataNode in IDEA), it will be the iotdb_opc_Security folder/\in the user's home directory | -| sink.opcua.enable-anonymous-access | Whether OPC UA allows anonymous access | Boolean | Optional | true | -| sink.user | User for OPC UA, specified in the configuration | String | Optional | root | -| sink.password | Password for OPC UA, specified in the configuration | String | Optional | root | +| key | value | value range | required or not | default value | +| :------------------------------ | :----------------------------------------------------------- | :------------------------------------- | :------- | :------------- | +| sink | OPC UA SINK | String: opc-ua-sink | Required | | +| sink.opcua.model | OPC UA model used | String: client-server / pub-sub | Optional | client-server | +| sink.opcua.tcp.port | OPC UA's TCP port | Integer: [0, 65536] | Optional | 12686 | +| sink.opcua.https.port | OPC UA's HTTPS port | Integer: [0, 65536] | Optional | 8443 | +| sink.opcua.security.dir | Directory for OPC UA's keys and certificates | String: Path, supports absolute and relative directories | Optional | Opc_security folder/in the conf directory of the DataNode related to iotdb
If there is no conf directory for iotdb (such as launching DataNode in IDEA), it will be the iotdb_opc_Security folder/in the user's home directory | +| sink.opcua.enable-anonymous-access | Whether OPC UA allows anonymous access | Boolean | Optional | true | +| sink.user | User for OPC UA, specified in the configuration | String | Optional | root | +| sink.password | Password for OPC UA, specified in the configuration | String | Optional | root | ### 示例 ```Bash -create pipe p1 +create pipe p1 with sink ('sink' = 'opc-ua-sink', - 'sink.user' = 'root', + 'sink.user' = 'root', 'sink.password' = 'root'); start pipe p1; ``` @@ -113,9 +116,9 @@ start pipe p1; 3. **Multiple DataNodes may have scattered sending/conflict issues**: - - For IoTDB clusters with multiple dataRegions and scattered across different DataNode IPs, data will be sent in a dispersed manner on the leaders of the dataRegions. The client needs to listen to the configuration ports of the DataNode IP separately.。 + - For IoTDB clusters with multiple dataRegions and scattered across different DataNode IPs, data will be sent in a dispersed manner on the leaders of the dataRegions. The client needs to listen to the configuration ports of the DataNode IP separately.。 - - Suggest using this OPC UA server under 1C1D. + - Suggest using this OPC UA server under 1C1D. 4. **Does not support deleting data and modifying measurement point types:** In Client Server mode, OPC UA cannot delete data or change data type settings. In Pub Sub mode, if data is deleted, information cannot be pushed to the client. @@ -125,7 +128,7 @@ start pipe p1; #### Preparation Work -1. Take UAExpert client as an example, download the UAExpert client: +1. Take UAExpert client as an example, download the UAExpert client: https://www.unified-automation.com/downloads/opc-ua-clients.html 2. Install UAExpert and fill in your own certificate information. @@ -133,53 +136,43 @@ start pipe p1; 1. Use the following SQL to create and start the OPC UA Sink in client-server mode. For detailed syntax, please refer to: [IoTDB OPC Server Syntax](#syntax) - ```sql - create pipe p1 with sink ('sink'='opc-ua-sink'); - ``` +```SQL +create pipe p1 with sink ('sink'='opc-ua-sink'); +``` 2. Write some data. - ```sql - insert into root.test.db(time, s2) values(now(), 2) - ``` +```SQL +insert into root.test.db(time, s2) values(now(), 2) +``` - ​The metadata is automatically created and enabled here. +​ The metadata is automatically created and enabled here. 3. Configure the connection to IoTDB in UAExpert, where the password should be set to the one defined in the sink.password parameter (using the default password "root" as an example): - ::: center - - - - ::: - - ::: center - - +
+ +
- ::: +
+ +
4. After trusting the server's certificate, you can see the written data in the Objects folder on the left. - ::: center - - - - ::: - - ::: center - - +
+ +
- ::: +
+ +
5. You can drag the node on the left to the center and display the latest value of that node: - ::: center - - - - ::: +
+ +
### Pub / Sub Mode @@ -200,77 +193,64 @@ The steps are as follows: 1. Start IoTDB and write some data. - ```sql - insert into root.a.b(time, c, d) values(now(), 1, 2); - ``` +```SQL +insert into root.a.b(time, c, d) values(now(), 1, 2); +``` - ​The metadata is automatically created and enabled here. +​ The metadata is automatically created and enabled here. 2. Use the following SQL to create and start the OPC UA Sink in Pub-Sub mode. For detailed syntax, please refer to: [IoTDB OPC Server Syntax](#syntax) - ```sql - create pipe p1 with sink ('sink'='opc-ua-sink', - 'sink.opcua.model'='pub-sub'); - start pipe p1; - ``` - - ​ At this point, you can see that the opc certificate-related directory has been created under the server's conf directory. - - ::: center +```SQL +create pipe p1 with sink ('sink'='opc-ua-sink', + 'sink.opcua.model'='pub-sub'); +start pipe p1; +``` - +​ At this point, you can see that the opc certificate-related directory has been created under the server's conf directory. - ::: +
+ +
3. Run the Client connection directly; the Client's certificate will be rejected by the server. - ::: center - - - - ::: +
+ +
4. Go to the server's sink.opcua.security.dir directory, then to the pki's rejected directory, where the Client's certificate should have been generated. - ::: center - - - - ::: +
+ +
5. Move (not copy) the client's certificate into (not into a subdirectory of) the trusted directory's certs folder in the same directory. - ::: center - - - - ::: +
+ +
6. Open the Client connection again; the server's certificate should now be rejected by the Client. - ::: center - - - - ::: +
+ +
7. Go to the client's /client/security directory, then to the pki's rejected directory, and move the server's certificate into (not into a subdirectory of) the trusted directory. - ::: center - - - - ::: +
+ +
8. Open the Client, and now the two-way trust is successful, and the Client can connect to the server. 9. Write data to the server, and the Client will print out the received data. - ::: center - - +
+ +
- ::: ### Notes @@ -279,4 +259,4 @@ The steps are as follows: 2. **No Need to Operate Root Directory Certificates:** During the certificate operation process, there is no need to operate the `iotdb-server.pfx` certificate under the IoTDB security root directory and the `example-client.pfx` directory under the client security directory. When the Client and Server connect bidirectionally, they will send the root directory certificate to each other. If it is the first time the other party sees this certificate, it will be placed in the reject dir. If the certificate is in the trusted/certs, then the other party can trust it. 3. **It is Recommended to Use Java 17+:** - In JVM 8 versions, there may be a key length restriction, resulting in an "Illegal key size" error. For specific versions (such as jdk.1.8u151+), you can add `Security.`_`setProperty`_`("crypto.policy", "unlimited");`; in the create client of ClientExampleRunner to solve this, or you can download the unlimited package `local_policy.jar` and `US_export_policy` to replace the packages in the `JDK/jre/lib/security`. Download link: . +In JVM 8 versions, there may be a key length restriction, resulting in an "Illegal key size" error. For specific versions (such as jdk.1.8u151+), you can add `Security.`*`setProperty`*`("crypto.policy", "unlimited");`; in the create client of ClientExampleRunner to solve this, or you can download the unlimited package `local_policy.jar` and `US_export_policy ` to replace the packages in the `JDK/jre/lib/security `. Download link:https://www.oracle.com/java/technologies/javase-jce8-downloads.html。 diff --git a/src/UserGuide/latest/API/Programming-Python-Native-API.md b/src/UserGuide/latest/API/Programming-Python-Native-API.md index 370522c1c..446b0cd53 100644 --- a/src/UserGuide/latest/API/Programming-Python-Native-API.md +++ b/src/UserGuide/latest/API/Programming-Python-Native-API.md @@ -1,19 +1,22 @@ # Python Native API @@ -22,13 +25,15 @@ You have to install thrift (>=0.13) before using the package. + + ## How to use (Example) First, download the package: `pip3 install apache-iotdb` -You can get an example of using the package to read and write data at here: [Session Example](https://github.com/apache/iotdb/blob/rc/1.3.3/iotdb-client/client-py/SessionExample.py) +You can get an example of using the package to read and write data at here:[Session Example](https://github.com/apache/iotdb/blob/rc/2.0.1/iotdb-client/client-py/session_example.py) -An example of aligned timeseries: [Aligned Timeseries Session Example](https://github.com/apache/iotdb/blob/rc/1.3.3/iotdb-client/client-py/SessionAlignedTimeseriesExample.py) +An example of aligned timeseries: [Aligned Timeseries Session Example](https://github.com/apache/iotdb/blob/rc/2.0.1/iotdb-client/client-py/session_aligned_timeseries_example.py) (you need to add `import iotdb` in the head of the file) @@ -49,7 +54,7 @@ session.close() ## Initialization -- Initialize a Session +* Initialize a Session ```python session = Session( @@ -63,7 +68,7 @@ session = Session( ) ``` -- Initialize a Session to connect multiple nodes +* Initialize a Session to connect multiple nodes ```python session = Session.init_from_node_urls( @@ -76,7 +81,7 @@ session = Session.init_from_node_urls( ) ``` -- Open a session, with a parameter to specify whether to enable RPC compression +* Open a session, with a parameter to specify whether to enable RPC compression ```python session.open(enable_rpc_compression=False) @@ -84,12 +89,11 @@ session.open(enable_rpc_compression=False) Notice: this RPC compression status of client must comply with that of IoTDB server -- Close a Session +* Close a Session ```python session.close() ``` - ## Managing Session through SessionPool Utilizing SessionPool to manage sessions eliminates the need to worry about session reuse. When the number of session connections reaches the maximum capacity of the pool, requests for acquiring a session will be blocked, and you can set the blocking wait time through parameters. After using a session, it should be returned to the SessionPool using the `putBack` method for proper management. @@ -106,9 +110,7 @@ wait_timeout_in_ms = 3000 # # Create the connection pool session_pool = SessionPool(pool_config, max_pool_size, wait_timeout_in_ms) ``` - -### Create a SessionPool using distributed nodes - +### Create a SessionPool using distributed nodes. ```python pool_config = PoolConfig(node_urls=node_urls=["127.0.0.1:6667", "127.0.0.1:6668", "127.0.0.1:6669"], user_name=username, password=password, fetch_size=1024, @@ -116,7 +118,6 @@ pool_config = PoolConfig(node_urls=node_urls=["127.0.0.1:6667", "127.0.0.1:6668" max_pool_size = 5 wait_timeout_in_ms = 3000 ``` - ### Acquiring a session through SessionPool and manually calling PutBack after use ```python @@ -135,34 +136,33 @@ session_pool.close() ### Database Management -- CREATE DATABASE +* CREATE DATABASE ```python session.set_storage_group(group_name) ``` -- Delete one or several databases +* Delete one or several databases ```python session.delete_storage_group(group_name) session.delete_storage_groups(group_name_lst) ``` - ### Timeseries Management -- Create one or multiple timeseries +* Create one or multiple timeseries ```python session.create_time_series(ts_path, data_type, encoding, compressor, props=None, tags=None, attributes=None, alias=None) - + session.create_multi_time_series( ts_path_lst, data_type_lst, encoding_lst, compressor_lst, props_lst=None, tags_lst=None, attributes_lst=None, alias_lst=None ) ``` -- Create aligned timeseries +* Create aligned timeseries ```python session.create_aligned_time_series( @@ -172,13 +172,13 @@ session.create_aligned_time_series( Attention: Alias of measurements are **not supported** currently. -- Delete one or several timeseries +* Delete one or several timeseries ```python session.delete_time_series(paths_list) ``` -- Check whether the specific timeseries exists +* Check whether the specific timeseries exists ```python session.check_time_series_exists(path) @@ -190,13 +190,14 @@ session.check_time_series_exists(path) It is recommended to use insertTablet to help improve write efficiency. -- Insert a Tablet,which is multiple rows of a device, each row has the same measurements - - **Better Write Performance** - - **Support null values**: fill the null value with any value, and then mark the null value via BitMap (from v0.13) +* Insert a Tablet,which is multiple rows of a device, each row has the same measurements + * **Better Write Performance** + * **Support null values**: fill the null value with any value, and then mark the null value via BitMap (from v0.13) + We have two implementations of Tablet in Python API. -- Normal Tablet +* Normal Tablet ```python values_ = [ @@ -223,14 +224,12 @@ tablet_ = Tablet( ) session.insert_tablet(tablet_) ``` - -- Numpy Tablet +* Numpy Tablet Comparing with Tablet, Numpy Tablet is using [numpy.ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) to record data. With less memory footprint and time cost of serialization, the insert performance will be better. **Notice** - 1. time and numerical value columns in Tablet is ndarray 2. recommended to use the specific dtypes to each ndarray, see the example below (if not, the default dtypes are also ok). @@ -283,19 +282,19 @@ np_tablet_with_none = NumpyTablet( session.insert_tablet(np_tablet_with_none) ``` -- Insert multiple Tablets +* Insert multiple Tablets ```python session.insert_tablets(tablet_lst) ``` -- Insert a Record +* Insert a Record ```python session.insert_record(device_id, timestamp, measurements_, data_types_, values_) ``` -- Insert multiple Records +* Insert multiple Records ```python session.insert_records( @@ -303,9 +302,10 @@ session.insert_records( ) ``` -- Insert multiple Records that belong to the same device. +* Insert multiple Records that belong to the same device. With type info the server has no need to do type inference, which leads a better performance + ```python session.insert_records_of_one_device(device_id, time_list, measurements_list, data_types_list, values_list) ``` @@ -314,7 +314,7 @@ session.insert_records_of_one_device(device_id, time_list, measurements_list, da When the data is of String type, we can use the following interface to perform type inference based on the value of the value itself. For example, if value is "true" , it can be automatically inferred to be a boolean type. If value is "3.2" , it can be automatically inferred as a flout type. Without type information, server has to do type inference, which may cost some time. -- Insert a Record, which contains multiple measurement value of a device at a timestamp +* Insert a Record, which contains multiple measurement value of a device at a timestamp ```python session.insert_str_record(device_id, timestamp, measurements, string_values) @@ -324,38 +324,36 @@ session.insert_str_record(device_id, timestamp, measurements, string_values) The Insert of aligned timeseries uses interfaces like insert_aligned_XXX, and others are similar to the above interfaces: -- insert_aligned_record -- insert_aligned_records -- insert_aligned_records_of_one_device -- insert_aligned_tablet -- insert_aligned_tablets +* insert_aligned_record +* insert_aligned_records +* insert_aligned_records_of_one_device +* insert_aligned_tablet +* insert_aligned_tablets + ## IoTDB-SQL Interface -- Execute query statement +* Execute query statement ```python session.execute_query_statement(sql) ``` -- Execute non query statement +* Execute non query statement ```python session.execute_non_query_statement(sql) ``` -- Execute statement +* Execute statement ```python session.execute_statement(sql) ``` ## Schema Template - ### Create Schema Template - The step for creating a metadata template is as follows - 1. Create the template class 2. Adding MeasurementNode 3. Execute create schema template function @@ -373,87 +371,70 @@ template.add_template(m_node_z) session.create_schema_template(template) ``` - ### Modify Schema Template measurements - Modify measurements in a template, the template must be already created. These are functions that add or delete some measurement nodes. - -- add node in template - +* add node in template ```python session.add_measurements_in_template(template_name, measurements_path, data_types, encodings, compressors, is_aligned) ``` -- delete node in template - +* delete node in template ```python session.delete_node_in_template(template_name, path) ``` ### Set Schema Template - ```python session.set_schema_template(template_name, prefix_path) ``` ### Uset Schema Template - ```python session.unset_schema_template(template_name, prefix_path) ``` ### Show Schema Template - -- Show all schema templates - +* Show all schema templates ```python session.show_all_templates() ``` - -- Count all measurements in templates - +* Count all measurements in templates ```python session.count_measurements_in_template(template_name) ``` -- Judge whether the path is measurement or not in templates, This measurement must be in the template - +* Judge whether the path is measurement or not in templates, This measurement must be in the template ```python session.count_measurements_in_template(template_name, path) ``` -- Judge whether the path is exist or not in templates, This path may not belong to the template - +* Judge whether the path is exist or not in templates, This path may not belong to the template ```python session.is_path_exist_in_template(template_name, path) ``` -- Show nodes under in schema template - +* Show nodes under in schema template ```python session.show_measurements_in_template(template_name) ``` -- Show the path prefix where a schema template is set - +* Show the path prefix where a schema template is set ```python session.show_paths_template_set_on(template_name) ``` -- Show the path prefix where a schema template is used (i.e. the time series has been created) - +* Show the path prefix where a schema template is used (i.e. the time series has been created) ```python session.show_paths_template_using_on(template_name) ``` ### Drop Schema Template - Delete an existing metadata template,dropping an already set template is not supported - ```python session.drop_schema_template("template_python") ``` + ## Pandas Support To easily transform a query result to a [Pandas Dataframe](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) @@ -481,12 +462,12 @@ session.close() df = ... ``` + ## IoTDB Testcontainer -The Test Support is based on the lib `testcontainers` () which you need to install in your project if you want to use the feature. +The Test Support is based on the lib `testcontainers` (https://testcontainers-python.readthedocs.io/en/latest/index.html) which you need to install in your project if you want to use the feature. To start (and stop) an IoTDB Database in a Docker container simply do: - ```python class MyTestCase(unittest.TestCase): @@ -503,15 +484,13 @@ by default it will load the image `apache/iotdb:latest`, if you want a specific ## IoTDB DBAPI -IoTDB DBAPI implements the Python DB API 2.0 specification (), which defines a common +IoTDB DBAPI implements the Python DB API 2.0 specification (https://peps.python.org/pep-0249/), which defines a common interface for accessing databases in Python. ### Examples - -- Initialization ++ Initialization The initialized parameters are consistent with the session part (except for the sqlalchemy_mode). - ```python from iotdb.dbapi import connect @@ -522,27 +501,23 @@ password_ = "root" conn = connect(ip, port_, username_, password_,fetch_size=1024,zone_id="UTC+8",sqlalchemy_mode=False) cursor = conn.cursor() ``` - -- simple SQL statement execution - ++ simple SQL statement execution ```python cursor.execute("SELECT ** FROM root") for row in cursor.fetchall(): print(row) ``` -- execute SQL with parameter ++ execute SQL with parameter IoTDB DBAPI supports pyformat style parameters - ```python cursor.execute("SELECT ** FROM root WHERE time < %(time)s",{"time":"2017-11-01T00:08:00.000"}) for row in cursor.fetchall(): print(row) ``` -- execute SQL with parameter sequences - ++ execute SQL with parameter sequences ```python seq_of_parameters = [ {"timestamp": 1, "temperature": 1}, @@ -555,21 +530,17 @@ sql = "insert into root.cursor(timestamp,temperature) values(%(timestamp)s,%(tem cursor.executemany(sql,seq_of_parameters) ``` -- close the connection and cursor - ++ close the connection and cursor ```python cursor.close() conn.close() ``` ## IoTDB SQLAlchemy Dialect (Experimental) - The SQLAlchemy dialect of IoTDB is written to adapt to Apache Superset. This part is still being improved. Please do not use it in the production environment! - ### Mapping of the metadata - The data model used by SQLAlchemy is a relational data model, which describes the relationships between different entities through tables. While the data model of IoTDB is a hierarchical data model, which organizes the data through a tree structure. In order to adapt IoTDB to the dialect of SQLAlchemy, the original data model in IoTDB needs to be reorganized. @@ -583,27 +554,25 @@ The metadata in the IoTDB are: 4. Measurement The metadata in the SQLAlchemy are: - 1. Schema 2. Table 3. Column The mapping relationship between them is: -| The metadata in the SQLAlchemy | The metadata in the IoTDB | -| ------------------------------ | ----------------------------------------- | -| Schema | Database | -| Table | Path ( from database to entity ) + Entity | -| Column | Measurement | +| The metadata in the SQLAlchemy | The metadata in the IoTDB | +| -------------------- | -------------------------------------------- | +| Schema | Database | +| Table | Path ( from database to entity ) + Entity | +| Column | Measurement | The following figure shows the relationship between the two more intuitively: ![sqlalchemy-to-iotdb](https://alioss.timecho.com/docs/img/UserGuide/API/IoTDB-SQLAlchemy/sqlalchemy-to-iotdb.png?raw=true) ### Data type mapping - | data type in IoTDB | data type in SQLAlchemy | -| ------------------ | ----------------------- | +|--------------------|-------------------------| | BOOLEAN | Boolean | | INT32 | Integer | | INT64 | BigInteger | @@ -614,7 +583,7 @@ The following figure shows the relationship between the two more intuitively: ### Example -- execute statement ++ execute statement ```python from sqlalchemy import create_engine @@ -626,7 +595,7 @@ for row in result.fetchall(): print(row) ``` -- ORM (now only simple queries are supported) ++ ORM (now only simple queries are supported) ```python from sqlalchemy import create_engine, Column, Float, BigInteger, MetaData @@ -657,39 +626,49 @@ for row in res: print(row) ``` + ## Developers ### Introduction This is an example of how to connect to IoTDB with python, using the thrift rpc interfaces. Things are almost the same on Windows or Linux, but pay attention to the difference like path separator. + + ### Prerequisites Python3.7 or later is preferred. You have to install Thrift (0.11.0 or later) to compile our thrift file into python code. Below is the official tutorial of installation, eventually, you should have a thrift executable. - +``` +http://thrift.apache.org/docs/install/ +``` Before starting you need to install `requirements_dev.txt` in your python environment, e.g. by calling - ```shell pip install -r requirements_dev.txt ``` + + ### Compile the thrift library and Debug -In the root of IoTDB's source code folder, run `mvn clean generate-sources -pl iotdb-client/client-py -am`. +In the root of IoTDB's source code folder, run `mvn clean generate-sources -pl iotdb-client/client-py -am`. This will automatically delete and repopulate the folder `iotdb/thrift` with the generated thrift files. This folder is ignored from git and should **never be pushed to git!** **Notice** Do not upload `iotdb/thrift` to the git repo. + + + ### Session Client & Example We packed up the Thrift interface in `client-py/src/iotdb/Session.py` (similar with its Java counterpart), also provided an example file `client-py/src/SessionExample.py` of how to use the session module. please read it carefully. + Or, another simple example: ```python @@ -705,6 +684,8 @@ zone = session.get_time_zone() session.close() ``` + + ### Tests Please add your custom tests in `tests` folder. @@ -713,11 +694,15 @@ To run all defined tests just type `pytest .` in the root folder. **Notice** Some tests need docker to be started on your system as a test instance is started in a docker container using [testcontainers](https://testcontainers-python.readthedocs.io/en/latest/index.html). + + ### Futher Tools [black](https://pypi.org/project/black/) and [flake8](https://pypi.org/project/flake8/) are installed for autoformatting and linting. Both can be run by `black .` or `flake8 .` respectively. + + ## Releasing To do a release just ensure that you have the right set of generated thrift files. @@ -725,18 +710,23 @@ Then run linting and auto-formatting. Then, ensure that all tests work (via `pytest .`). Then you are good to go to do a release! + + ### Preparing your environment First, install all necessary dev dependencies via `pip install -r requirements_dev.txt`. + + ### Doing the Release There is a convenient script `release.sh` to do all steps for a release. Namely, these are -- Remove all transient directories from last release (if exists) -- (Re-)generate all generated sources via mvn -- Run Linting (flake8) -- Run Tests via pytest -- Build -- Release to pypi +* Remove all transient directories from last release (if exists) +* (Re-)generate all generated sources via mvn +* Run Linting (flake8) +* Run Tests via pytest +* Build +* Release to pypi + diff --git a/src/UserGuide/latest/API/Programming-Rust-Native-API.md b/src/UserGuide/latest/API/Programming-Rust-Native-API.md index 4ec73a52b..f58df68fc 100644 --- a/src/UserGuide/latest/API/Programming-Rust-Native-API.md +++ b/src/UserGuide/latest/API/Programming-Rust-Native-API.md @@ -1,77 +1,78 @@ # Rust Native API Native API -IoTDB uses Thrift as a cross language RPC framework, so access to IoTDB can be achieved through the interface provided by Thrift. +IoTDB uses Thrift as a cross language RPC framework, so access to IoTDB can be achieved through the interface provided by Thrift. This document will introduce how to generate a native Rust interface that can access IoTDB. ## Dependents -- JDK >= 1.8 -- Rust >= 1.0.0 -- thrift 0.14.1 -- Linux、Macos or like unix -- Windows+bash + * JDK >= 1.8 + * Rust >= 1.0.0 + * thrift 0.14.1 + * Linux、Macos or like unix + * Windows+bash Thrift (0.14.1 or higher) must be installed to compile Thrift files into Rust code. The following is the official installation tutorial, and in the end, you should receive a Thrift executable file. - +``` +http://thrift.apache.org/docs/install/ +``` ## Compile the Thrift library and generate the Rust native interface 1. Find the `pom.xml` file in the root directory of the IoTDB source code folder. 2. Open the `pom.xml` file and find the following content: - ```xml - - generate-thrift-sources-python - generate-sources - - compile - - - py - ${project.build.directory}/generated-sources-python/ - - + + generate-thrift-sources-python + generate-sources + + compile + + + py + ${project.build.directory}/generated-sources-python/ + + ``` - 3. Duplicate this block and change the `id`, `generator` and `outputDirectory` to this: - ```xml - - generate-thrift-sources-rust - generate-sources - - compile - - - rs - ${project.build.directory}/generated-sources-rust/ - - + + generate-thrift-sources-rust + generate-sources + + compile + + + rs + ${project.build.directory}/generated-sources-rust/ + + ``` - 4. In the root directory of the IoTDB source code folder,run `mvn clean generate-sources`. - This command will automatically delete the files in `iotdb/iotdb-protocol/thrift/target` and `iotdb/iotdb-protocol/thrift-commons/target`, and repopulate the folder with the newly generated files. - The newly generated Rust sources will be located in `iotdb/iotdb-protocol/thrift/target/generated-sources-rust` in the various modules of the `iotdb-protocol` module. +This command will automatically delete the files in `iotdb/iotdb-protocol/thrift/target` and `iotdb/iotdb-protocol/thrift-commons/target`, and repopulate the folder with the newly generated files. +The newly generated Rust sources will be located in `iotdb/iotdb-protocol/thrift/target/generated-sources-rust` in the various modules of the `iotdb-protocol` module. ## Using the Rust native interface @@ -79,7 +80,7 @@ Copy `iotdb/iotdb-protocol/thrift/target/generated-sources-rust/` and `iotdb/iot ## RPC interface -```cpp +``` // open a session TSOpenSessionResp openSession(1:TSOpenSessionReq req); @@ -95,7 +96,7 @@ TSStatus executeBatchStatement(1:TSExecuteBatchStatementReq req); // execute query SQL statement TSExecuteStatementResp executeQueryStatement(1:TSExecuteStatementReq req); -// execute insert, delete and update SQL statement +// execute insert, delete and update SQL statement TSExecuteStatementResp executeUpdateStatement(1:TSExecuteStatementReq req); // fetch next query result @@ -104,7 +105,7 @@ TSFetchResultsResp fetchResults(1:TSFetchResultsReq req) // fetch meta data TSFetchMetadataResp fetchMetadata(1:TSFetchMetadataReq req) -// cancel a query +// cancel a query TSStatus cancelOperation(1:TSCancelOperationReq req); // close a query dataset diff --git a/src/UserGuide/latest/API/RestServiceV1.md b/src/UserGuide/latest/API/RestServiceV1.md index 9d46c3c65..775235fed 100644 --- a/src/UserGuide/latest/API/RestServiceV1.md +++ b/src/UserGuide/latest/API/RestServiceV1.md @@ -1,23 +1,25 @@ -# RESTful API V1(Not Recommend) + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +--> +# RESTful API V1(Not Recommend) IoTDB's RESTful services can be used for query, write, and management operations, using the OpenAPI standard to define interfaces and generate frameworks. ## Enable RESTful Services @@ -31,7 +33,6 @@ RESTful services are disabled by default. ``` ## Authentication - Except the liveness probe API `/ping`, RESTful services use the basic authentication. Each URL request needs to carry `'Authorization': 'Basic ' + base64.encode(username + ':' + password)`. The username used in the following examples is: `root`, and password is: `root`. @@ -47,26 +48,24 @@ Authorization: Basic cm9vdDpyb290 HTTP Status Code:`401` HTTP response body: - - ```json - { - "code": 600, - "message": "WRONG_LOGIN_PASSWORD_ERROR" - } - ``` + ```json + { + "code": 600, + "message": "WRONG_LOGIN_PASSWORD_ERROR" + } + ``` - If the `Authorization` header is missing,the following error is returned: HTTP Status Code:`401` HTTP response body: - - ```json - { - "code": 603, - "message": "UNINITIALIZED_AUTH_ERROR" - } - ``` + ```json + { + "code": 603, + "message": "UNINITIALIZED_AUTH_ERROR" + } + ``` ## Interface @@ -80,7 +79,7 @@ Request path: `http://ip:port/ping` The user name used in the example is: root, password: root -Example request: +Example request: ```shell $ curl http://127.0.0.1:18080/ping @@ -93,10 +92,10 @@ Response status codes: Response parameters: -| parameter name | parameter type | parameter describe | -| :------------- | :------------- | :----------------- | -| code | integer | status code | -| message | string | message | +|parameter name |parameter type |parameter describe| +|:--- | :--- | :---| +|code | integer | status code | +| message | string | message | Sample response: @@ -132,18 +131,18 @@ Request path: `http://ip:port/rest/v1/query` Parameter Description: -| parameter name | parameter type | required | parameter description | -| -------------- | -------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| sql | string | yes | | -| rowLimit | integer | no | The maximum number of rows in the result set that can be returned by a query.
If this parameter is not set, the `rest_query_default_row_size_limit` of the configuration file will be used as the default value.
When the number of rows in the returned result set exceeds the limit, the status code `411` will be returned. | +| parameter name | parameter type | required | parameter description | +|----------------| -------------- | -------- | ------------------------------------------------------------ | +| sql | string | yes | | +| rowLimit | integer | no | The maximum number of rows in the result set that can be returned by a query.
If this parameter is not set, the `rest_query_default_row_size_limit` of the configuration file will be used as the default value.
When the number of rows in the returned result set exceeds the limit, the status code `411` will be returned. | Response parameters: -| parameter name | parameter type | parameter description | -| -------------- | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| expressions | array | Array of result set column names for data query, `null` for metadata query | -| columnNames | array | Array of column names for metadata query result set, `null` for data query | -| timestamps | array | Timestamp column, `null` for metadata query | +| parameter name | parameter type | parameter description | +|----------------| -------------- | ------------------------------------------------------------ | +| expressions | array | Array of result set column names for data query, `null` for metadata query | +| columnNames | array | Array of column names for metadata query result set, `null` for data query | +| timestamps | array | Timestamp column, `null` for metadata query | | values | array | A two-dimensional array, the first dimension has the same length as the result set column name array, and the second dimension array represents a column of the result set | **Examples:** @@ -152,24 +151,38 @@ Tip: Statements like `select * from root.xx.**` are not recommended because thos **Expression query** -```shell -curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"sql":"select s3, s4, s3 + 1 from root.sg27 limit 2"}' http://127.0.0.1:18080/rest/v1/query -``` - + ```shell + curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"sql":"select s3, s4, s3 + 1 from root.sg27 limit 2"}' http://127.0.0.1:18080/rest/v1/query + ```` Response instance - -```json -{ - "expressions": ["root.sg27.s3", "root.sg27.s4", "root.sg27.s3 + 1"], - "columnNames": null, - "timestamps": [1635232143960, 1635232153960], - "values": [ - [11, null], - [false, true], - [12.0, null] - ] -} -``` + ```json + { + "expressions": [ + "root.sg27.s3", + "root.sg27.s4", + "root.sg27.s3 + 1" + ], + "columnNames": null, + "timestamps": [ + 1635232143960, + 1635232153960 + ], + "values": [ + [ + 11, + null + ], + [ + false, + true + ], + [ + 12.0, + null + ] + ] + } + ``` **Show child paths** @@ -180,9 +193,16 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": ["child paths"], + "columnNames": [ + "child paths" + ], "timestamps": null, - "values": [["root.sg27", "root.sg28"]] + "values": [ + [ + "root.sg27", + "root.sg28" + ] + ] } ``` @@ -195,9 +215,16 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": ["child nodes"], + "columnNames": [ + "child nodes" + ], "timestamps": null, - "values": [["sg27", "sg28"]] + "values": [ + [ + "sg27", + "sg28" + ] + ] } ``` @@ -210,11 +237,20 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": ["database", "ttl"], + "columnNames": [ + "database", + "ttl" + ], "timestamps": null, "values": [ - ["root.sg27", "root.sg28"], - [null, null] + [ + "root.sg27", + "root.sg28" + ], + [ + null, + null + ] ] } ``` @@ -228,9 +264,19 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": ["database", "ttl"], + "columnNames": [ + "database", + "ttl" + ], "timestamps": null, - "values": [["root.sg27"], [null]] + "values": [ + [ + "root.sg27" + ], + [ + null + ] + ] } ``` @@ -293,14 +339,54 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ], "timestamps": null, "values": [ - ["root.sg27.s3", "root.sg27.s4", "root.sg28.s3", "root.sg28.s4"], - [null, null, null, null], - ["root.sg27", "root.sg27", "root.sg28", "root.sg28"], - ["INT32", "BOOLEAN", "INT32", "BOOLEAN"], - ["RLE", "RLE", "RLE", "RLE"], - ["SNAPPY", "SNAPPY", "SNAPPY", "SNAPPY"], - [null, null, null, null], - [null, null, null, null] + [ + "root.sg27.s3", + "root.sg27.s4", + "root.sg28.s3", + "root.sg28.s4" + ], + [ + null, + null, + null, + null + ], + [ + "root.sg27", + "root.sg27", + "root.sg28", + "root.sg28" + ], + [ + "INT32", + "BOOLEAN", + "INT32", + "BOOLEAN" + ], + [ + "RLE", + "RLE", + "RLE", + "RLE" + ], + [ + "SNAPPY", + "SNAPPY", + "SNAPPY", + "SNAPPY" + ], + [ + null, + null, + null, + null + ], + [ + null, + null, + null, + null + ] ] } ``` @@ -326,14 +412,54 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ], "timestamps": null, "values": [ - ["root.sg28.s4", "root.sg27.s4", "root.sg28.s3", "root.sg27.s3"], - [null, null, null, null], - ["root.sg28", "root.sg27", "root.sg28", "root.sg27"], - ["BOOLEAN", "BOOLEAN", "INT32", "INT32"], - ["RLE", "RLE", "RLE", "RLE"], - ["SNAPPY", "SNAPPY", "SNAPPY", "SNAPPY"], - [null, null, null, null], - [null, null, null, null] + [ + "root.sg28.s4", + "root.sg27.s4", + "root.sg28.s3", + "root.sg27.s3" + ], + [ + null, + null, + null, + null + ], + [ + "root.sg28", + "root.sg27", + "root.sg28", + "root.sg27" + ], + [ + "BOOLEAN", + "BOOLEAN", + "INT32", + "INT32" + ], + [ + "RLE", + "RLE", + "RLE", + "RLE" + ], + [ + "SNAPPY", + "SNAPPY", + "SNAPPY", + "SNAPPY" + ], + [ + null, + null, + null, + null + ], + [ + null, + null, + null, + null + ] ] } ``` @@ -347,9 +473,15 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": ["count"], + "columnNames": [ + "count" + ], "timestamps": null, - "values": [[4]] + "values": [ + [ + 4 + ] + ] } ``` @@ -362,9 +494,15 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": ["count"], + "columnNames": [ + "count" + ], "timestamps": null, - "values": [[4]] + "values": [ + [ + 4 + ] + ] } ``` @@ -377,11 +515,20 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": ["devices", "isAligned"], + "columnNames": [ + "devices", + "isAligned" + ], "timestamps": null, "values": [ - ["root.sg27", "root.sg28"], - ["false", "false"] + [ + "root.sg27", + "root.sg28" + ], + [ + "false", + "false" + ] ] } ``` @@ -395,12 +542,25 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": ["devices", "database", "isAligned"], + "columnNames": [ + "devices", + "database", + "isAligned" + ], "timestamps": null, "values": [ - ["root.sg27", "root.sg28"], - ["root.sg27", "root.sg28"], - ["false", "false"] + [ + "root.sg27", + "root.sg28" + ], + [ + "root.sg27", + "root.sg28" + ], + [ + "false", + "false" + ] ] } ``` @@ -414,9 +574,15 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": ["user"], + "columnNames": [ + "user" + ], "timestamps": null, - "values": [["root"]] + "values": [ + [ + "root" + ] + ] } ``` @@ -428,10 +594,22 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { - "expressions": ["count(root.sg27.s3)", "count(root.sg27.s4)"], + "expressions": [ + "count(root.sg27.s3)", + "count(root.sg27.s4)" + ], "columnNames": null, - "timestamps": [0], - "values": [[1], [2]] + "timestamps": [ + 0 + ], + "values": [ + [ + 1 + ], + [ + 2 + ] + ] } ``` @@ -444,9 +622,19 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "columnNames": ["count(root.sg27.*)", "count(root.sg28.*)"], + "columnNames": [ + "count(root.sg27.*)", + "count(root.sg28.*)" + ], "timestamps": null, - "values": [[3], [3]] + "values": [ + [ + 3 + ], + [ + 3 + ] + ] } ``` @@ -458,15 +646,48 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { - "expressions": ["count(root.sg27.s3)", "count(root.sg27.s4)"], + "expressions": [ + "count(root.sg27.s3)", + "count(root.sg27.s4)" + ], "columnNames": null, "timestamps": [ - 1635232143960, 1635232144960, 1635232145960, 1635232146960, 1635232147960, - 1635232148960, 1635232149960, 1635232150960, 1635232151960, 1635232152960 + 1635232143960, + 1635232144960, + 1635232145960, + 1635232146960, + 1635232147960, + 1635232148960, + 1635232149960, + 1635232150960, + 1635232151960, + 1635232152960 ], "values": [ - [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 0, 0, 0, 0, 0, 0, 0, 0, 0] + [ + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + [ + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] ] } ``` @@ -480,9 +701,25 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" - ```json { "expressions": null, - "columnNames": ["timeseries", "value", "dataType"], - "timestamps": [1635232143960], - "values": [["root.sg27.s3"], ["11"], ["INT32"]] + "columnNames": [ + "timeseries", + "value", + "dataType" + ], + "timestamps": [ + 1635232143960 + ], + "values": [ + [ + "root.sg27.s3" + ], + [ + "11" + ], + [ + "INT32" + ] + ] } ``` @@ -535,25 +772,23 @@ Request path: `http://ip:port/rest/v1/nonQuery` Parameter Description: -| parameter name | parameter type | parameter describe | -| :------------- | :------------- | :----------------- | -| sql | string | query content | +|parameter name |parameter type |parameter describe| +|:--- | :--- | :---| +| sql | string | query content | Example request: - ```shell curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"sql":"CREATE DATABASE root.ln"}' http://127.0.0.1:18080/rest/v1/nonQuery ``` Response parameters: -| parameter name | parameter type | parameter describe | -| :------------- | :------------- | :----------------- | -| code | integer | status code | -| message | string | message | +|parameter name |parameter type |parameter describe| +|:--- | :--- | :---| +| code | integer | status code | +| message | string | message | Sample response: - ```json { "code": 200, @@ -561,6 +796,8 @@ Sample response: } ``` + + ### insertTablet Request method: `POST` @@ -571,30 +808,28 @@ Request path: `http://ip:port/rest/v1/insertTablet` Parameter Description: -| parameter name | parameter type | is required | parameter describe | -| :------------- | :------------- | :---------- | :----------------------------------------------------- | -| timestamps | array | yes | Time column | -| measurements | array | yes | The name of the measuring point | -| dataTypes | array | yes | The data type | -| values | array | yes | Value columns, the values in each column can be `null` | -| isAligned | boolean | yes | Whether to align the timeseries | -| deviceId | string | yes | Device name | +| parameter name |parameter type |is required|parameter describe| +|:---------------| :--- | :---| :---| +| timestamps | array | yes | Time column | +| measurements | array | yes | The name of the measuring point | +| dataTypes | array | yes | The data type | +| values | array | yes | Value columns, the values in each column can be `null` | +| isAligned | boolean | yes | Whether to align the timeseries | +| deviceId | string | yes | Device name | Example request: - ```shell curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"timestamps":[1635232143960,1635232153960],"measurements":["s3","s4"],"dataTypes":["INT32","BOOLEAN"],"values":[[11,null],[false,true]],"isAligned":false,"deviceId":"root.sg27"}' http://127.0.0.1:18080/rest/v1/insertTablet ``` Sample response: -| parameter name | parameter type | parameter describe | -| :------------- | :------------- | :----------------- | -| code | integer | status code | -| message | string | message | +|parameter name |parameter type |parameter describe| +|:--- | :--- | :---| +| code | integer | status code | +| message | string | message | Sample response: - ```json { "code": 200, @@ -606,79 +841,83 @@ Sample response: The configuration is located in 'iotdb-system.properties'. -- Set 'enable_rest_service' to 'true' to enable the module, and 'false' to disable the module. By default, this value is' false '. +* Set 'enable_rest_service' to 'true' to enable the module, and 'false' to disable the module. By default, this value is' false '. ```properties enable_rest_service=true ``` -- This parameter is valid only when 'enable_REST_service =true'. Set 'rest_service_port' to a number (1025 to 65535) to customize the REST service socket port. By default, the value is 18080. +* This parameter is valid only when 'enable_REST_service =true'. Set 'rest_service_port' to a number (1025 to 65535) to customize the REST service socket port. By default, the value is 18080. ```properties rest_service_port=18080 ``` -- Set 'enable_swagger' to 'true' to display rest service interface information through swagger, and 'false' to do not display the rest service interface information through the swagger. By default, this value is' false '. +* Set 'enable_swagger' to 'true' to display rest service interface information through swagger, and 'false' to do not display the rest service interface information through the swagger. By default, this value is' false '. ```properties enable_swagger=false ``` -- The maximum number of rows in the result set that can be returned by a query. When the number of rows in the returned result set exceeds the limit, the status code `411` is returned. +* The maximum number of rows in the result set that can be returned by a query. When the number of rows in the returned result set exceeds the limit, the status code `411` is returned. -```properties +````properties rest_query_default_row_size_limit=10000 -``` +```` -- Expiration time for caching customer login information (used to speed up user authentication, in seconds, 8 hours by default) +* Expiration time for caching customer login information (used to speed up user authentication, in seconds, 8 hours by default) ```properties cache_expire=28800 ``` -- Maximum number of users stored in the cache (default: 100) + +* Maximum number of users stored in the cache (default: 100) ```properties cache_max_num=100 ``` -- Initial cache size (default: 10) +* Initial cache size (default: 10) ```properties cache_init_num=10 ``` -- REST Service whether to enable SSL configuration, set 'enable_https' to' true 'to enable the module, and set' false 'to disable the module. By default, this value is' false '. +* REST Service whether to enable SSL configuration, set 'enable_https' to' true 'to enable the module, and set' false 'to disable the module. By default, this value is' false '. ```properties enable_https=false ``` -- keyStore location path (optional) +* keyStore location path (optional) ```properties key_store_path= ``` -- keyStore password (optional) + +* keyStore password (optional) ```properties key_store_pwd= ``` -- trustStore location path (optional) + +* trustStore location path (optional) ```properties trust_store_path= ``` -- trustStore password (optional) +* trustStore password (optional) ```properties trust_store_pwd= ``` -- SSL timeout period, in seconds + +* SSL timeout period, in seconds ```properties idle_timeout=5000 diff --git a/src/UserGuide/latest/API/RestServiceV2.md b/src/UserGuide/latest/API/RestServiceV2.md index 36dbf72f0..6c6011bf5 100644 --- a/src/UserGuide/latest/API/RestServiceV2.md +++ b/src/UserGuide/latest/API/RestServiceV2.md @@ -1,23 +1,25 @@ -# RESTful API V2 + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +--> +# RESTful API V2 IoTDB's RESTful services can be used for query, write, and management operations, using the OpenAPI standard to define interfaces and generate frameworks. ## Enable RESTful Services @@ -31,7 +33,6 @@ RESTful services are disabled by default. ``` ## Authentication - Except the liveness probe API `/ping`, RESTful services use the basic authentication. Each URL request needs to carry `'Authorization': 'Basic ' + base64.encode(username + ':' + password)`. The username used in the following examples is: `root`, and password is: `root`. @@ -47,26 +48,24 @@ Authorization: Basic cm9vdDpyb290 HTTP Status Code:`401` HTTP response body: - - ```json - { - "code": 600, - "message": "WRONG_LOGIN_PASSWORD_ERROR" - } - ``` + ```json + { + "code": 600, + "message": "WRONG_LOGIN_PASSWORD_ERROR" + } + ``` - If the `Authorization` header is missing,the following error is returned: HTTP Status Code:`401` HTTP response body: - - ```json - { - "code": 603, - "message": "UNINITIALIZED_AUTH_ERROR" - } - ``` + ```json + { + "code": 603, + "message": "UNINITIALIZED_AUTH_ERROR" + } + ``` ## Interface @@ -80,7 +79,7 @@ Request path: `http://ip:port/ping` The user name used in the example is: root, password: root -Example request: +Example request: ```shell $ curl http://127.0.0.1:18080/ping @@ -93,10 +92,10 @@ Response status codes: Response parameters: -| parameter name | parameter type | parameter describe | -| :------------- | :------------- | :----------------- | -| code | integer | status code | -| message | string | message | +|parameter name |parameter type |parameter describe| +|:--- | :--- | :---| +|code | integer | status code | +| message | string | message | Sample response: @@ -132,18 +131,18 @@ Request path: `http://ip:port/rest/v2/query` Parameter Description: -| parameter name | parameter type | required | parameter description | -| -------------- | -------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| sql | string | yes | | +| parameter name | parameter type | required | parameter description | +|----------------| -------------- | -------- | ------------------------------------------------------------ | +| sql | string | yes | | | row_limit | integer | no | The maximum number of rows in the result set that can be returned by a query.
If this parameter is not set, the `rest_query_default_row_size_limit` of the configuration file will be used as the default value.
When the number of rows in the returned result set exceeds the limit, the status code `411` will be returned. | Response parameters: -| parameter name | parameter type | parameter description | -| -------------- | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| expressions | array | Array of result set column names for data query, `null` for metadata query | -| column_names | array | Array of column names for metadata query result set, `null` for data query | -| timestamps | array | Timestamp column, `null` for metadata query | +| parameter name | parameter type | parameter description | +|----------------| -------------- | ------------------------------------------------------------ | +| expressions | array | Array of result set column names for data query, `null` for metadata query | +| column_names | array | Array of column names for metadata query result set, `null` for data query | +| timestamps | array | Timestamp column, `null` for metadata query | | values | array | A two-dimensional array, the first dimension has the same length as the result set column name array, and the second dimension array represents a column of the result set | **Examples:** @@ -154,17 +153,33 @@ Tip: Statements like `select * from root.xx.**` are not recommended because thos ```shell curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"sql":"select s3, s4, s3 + 1 from root.sg27 limit 2"}' http://127.0.0.1:18080/rest/v2/query -``` +```` ```json { - "expressions": ["root.sg27.s3", "root.sg27.s4", "root.sg27.s3 + 1"], + "expressions": [ + "root.sg27.s3", + "root.sg27.s4", + "root.sg27.s3 + 1" + ], "column_names": null, - "timestamps": [1635232143960, 1635232153960], + "timestamps": [ + 1635232143960, + 1635232153960 + ], "values": [ - [11, null], - [false, true], - [12.0, null] + [ + 11, + null + ], + [ + false, + true + ], + [ + 12.0, + null + ] ] } ``` @@ -178,9 +193,16 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": ["child paths"], + "column_names": [ + "child paths" + ], "timestamps": null, - "values": [["root.sg27", "root.sg28"]] + "values": [ + [ + "root.sg27", + "root.sg28" + ] + ] } ``` @@ -193,9 +215,16 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": ["child nodes"], + "column_names": [ + "child nodes" + ], "timestamps": null, - "values": [["sg27", "sg28"]] + "values": [ + [ + "sg27", + "sg28" + ] + ] } ``` @@ -208,11 +237,20 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": ["database", "ttl"], + "column_names": [ + "database", + "ttl" + ], "timestamps": null, "values": [ - ["root.sg27", "root.sg28"], - [null, null] + [ + "root.sg27", + "root.sg28" + ], + [ + null, + null + ] ] } ``` @@ -226,9 +264,19 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": ["database", "ttl"], + "column_names": [ + "database", + "ttl" + ], "timestamps": null, - "values": [["root.sg27"], [null]] + "values": [ + [ + "root.sg27" + ], + [ + null + ] + ] } ``` @@ -291,14 +339,54 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ], "timestamps": null, "values": [ - ["root.sg27.s3", "root.sg27.s4", "root.sg28.s3", "root.sg28.s4"], - [null, null, null, null], - ["root.sg27", "root.sg27", "root.sg28", "root.sg28"], - ["INT32", "BOOLEAN", "INT32", "BOOLEAN"], - ["RLE", "RLE", "RLE", "RLE"], - ["SNAPPY", "SNAPPY", "SNAPPY", "SNAPPY"], - [null, null, null, null], - [null, null, null, null] + [ + "root.sg27.s3", + "root.sg27.s4", + "root.sg28.s3", + "root.sg28.s4" + ], + [ + null, + null, + null, + null + ], + [ + "root.sg27", + "root.sg27", + "root.sg28", + "root.sg28" + ], + [ + "INT32", + "BOOLEAN", + "INT32", + "BOOLEAN" + ], + [ + "RLE", + "RLE", + "RLE", + "RLE" + ], + [ + "SNAPPY", + "SNAPPY", + "SNAPPY", + "SNAPPY" + ], + [ + null, + null, + null, + null + ], + [ + null, + null, + null, + null + ] ] } ``` @@ -324,14 +412,54 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ], "timestamps": null, "values": [ - ["root.sg28.s4", "root.sg27.s4", "root.sg28.s3", "root.sg27.s3"], - [null, null, null, null], - ["root.sg28", "root.sg27", "root.sg28", "root.sg27"], - ["BOOLEAN", "BOOLEAN", "INT32", "INT32"], - ["RLE", "RLE", "RLE", "RLE"], - ["SNAPPY", "SNAPPY", "SNAPPY", "SNAPPY"], - [null, null, null, null], - [null, null, null, null] + [ + "root.sg28.s4", + "root.sg27.s4", + "root.sg28.s3", + "root.sg27.s3" + ], + [ + null, + null, + null, + null + ], + [ + "root.sg28", + "root.sg27", + "root.sg28", + "root.sg27" + ], + [ + "BOOLEAN", + "BOOLEAN", + "INT32", + "INT32" + ], + [ + "RLE", + "RLE", + "RLE", + "RLE" + ], + [ + "SNAPPY", + "SNAPPY", + "SNAPPY", + "SNAPPY" + ], + [ + null, + null, + null, + null + ], + [ + null, + null, + null, + null + ] ] } ``` @@ -345,9 +473,15 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": ["count"], + "column_names": [ + "count" + ], "timestamps": null, - "values": [[4]] + "values": [ + [ + 4 + ] + ] } ``` @@ -360,9 +494,15 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": ["count"], + "column_names": [ + "count" + ], "timestamps": null, - "values": [[4]] + "values": [ + [ + 4 + ] + ] } ``` @@ -375,11 +515,20 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": ["devices", "isAligned"], + "column_names": [ + "devices", + "isAligned" + ], "timestamps": null, "values": [ - ["root.sg27", "root.sg28"], - ["false", "false"] + [ + "root.sg27", + "root.sg28" + ], + [ + "false", + "false" + ] ] } ``` @@ -393,12 +542,25 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": ["devices", "database", "isAligned"], + "column_names": [ + "devices", + "database", + "isAligned" + ], "timestamps": null, "values": [ - ["root.sg27", "root.sg28"], - ["root.sg27", "root.sg28"], - ["false", "false"] + [ + "root.sg27", + "root.sg28" + ], + [ + "root.sg27", + "root.sg28" + ], + [ + "false", + "false" + ] ] } ``` @@ -412,9 +574,15 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": ["user"], + "column_names": [ + "user" + ], "timestamps": null, - "values": [["root"]] + "values": [ + [ + "root" + ] + ] } ``` @@ -426,10 +594,22 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { - "expressions": ["count(root.sg27.s3)", "count(root.sg27.s4)"], + "expressions": [ + "count(root.sg27.s3)", + "count(root.sg27.s4)" + ], "column_names": null, - "timestamps": [0], - "values": [[1], [2]] + "timestamps": [ + 0 + ], + "values": [ + [ + 1 + ], + [ + 2 + ] + ] } ``` @@ -442,9 +622,19 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { "expressions": null, - "column_names": ["count(root.sg27.*)", "count(root.sg28.*)"], + "column_names": [ + "count(root.sg27.*)", + "count(root.sg28.*)" + ], "timestamps": null, - "values": [[3], [3]] + "values": [ + [ + 3 + ], + [ + 3 + ] + ] } ``` @@ -456,15 +646,48 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X ```json { - "expressions": ["count(root.sg27.s3)", "count(root.sg27.s4)"], + "expressions": [ + "count(root.sg27.s3)", + "count(root.sg27.s4)" + ], "column_names": null, "timestamps": [ - 1635232143960, 1635232144960, 1635232145960, 1635232146960, 1635232147960, - 1635232148960, 1635232149960, 1635232150960, 1635232151960, 1635232152960 + 1635232143960, + 1635232144960, + 1635232145960, + 1635232146960, + 1635232147960, + 1635232148960, + 1635232149960, + 1635232150960, + 1635232151960, + 1635232152960 ], "values": [ - [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 0, 0, 0, 0, 0, 0, 0, 0, 0] + [ + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + [ + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] ] } ``` @@ -478,9 +701,25 @@ curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" - ```json { "expressions": null, - "column_names": ["timeseries", "value", "dataType"], - "timestamps": [1635232143960], - "values": [["root.sg27.s3"], ["11"], ["INT32"]] + "column_names": [ + "timeseries", + "value", + "dataType" + ], + "timestamps": [ + 1635232143960 + ], + "values": [ + [ + "root.sg27.s3" + ], + [ + "11" + ], + [ + "INT32" + ] + ] } ``` @@ -533,25 +772,23 @@ Request path: `http://ip:port/rest/v2/nonQuery` Parameter Description: -| parameter name | parameter type | parameter describe | -| :------------- | :------------- | :----------------- | -| sql | string | query content | +|parameter name |parameter type |parameter describe| +|:--- | :--- | :---| +| sql | string | query content | Example request: - ```shell curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"sql":"CREATE DATABASE root.ln"}' http://127.0.0.1:18080/rest/v2/nonQuery ``` Response parameters: -| parameter name | parameter type | parameter describe | -| :------------- | :------------- | :----------------- | -| code | integer | status code | -| message | string | message | +|parameter name |parameter type |parameter describe| +|:--- | :--- | :---| +| code | integer | status code | +| message | string | message | Sample response: - ```json { "code": 200, @@ -559,6 +796,8 @@ Sample response: } ``` + + ### insertTablet Request method: `POST` @@ -569,30 +808,28 @@ Request path: `http://ip:port/rest/v2/insertTablet` Parameter Description: -| parameter name | parameter type | is required | parameter describe | -| :------------- | :------------- | :---------- | :----------------------------------------------------- | -| timestamps | array | yes | Time column | -| measurements | array | yes | The name of the measuring point | -| data_types | array | yes | The data type | -| values | array | yes | Value columns, the values in each column can be `null` | -| is_aligned | boolean | yes | Whether to align the timeseries | -| device | string | yes | Device name | +| parameter name |parameter type |is required|parameter describe| +|:---------------| :--- | :---| :---| +| timestamps | array | yes | Time column | +| measurements | array | yes | The name of the measuring point | +| data_types | array | yes | The data type | +| values | array | yes | Value columns, the values in each column can be `null` | +| is_aligned | boolean | yes | Whether to align the timeseries | +| device | string | yes | Device name | Example request: - ```shell curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"timestamps":[1635232143960,1635232153960],"measurements":["s3","s4"],"data_types":["INT32","BOOLEAN"],"values":[[11,null],[false,true]],"is_aligned":false,"device":"root.sg27"}' http://127.0.0.1:18080/rest/v2/insertTablet ``` Sample response: -| parameter name | parameter type | parameter describe | -| :------------- | :------------- | :----------------- | -| code | integer | status code | -| message | string | message | +|parameter name |parameter type |parameter describe| +|:--- | :--- | :---| +| code | integer | status code | +| message | string | message | Sample response: - ```json { "code": 200, @@ -610,30 +847,28 @@ Request path: `http://ip:port/rest/v2/insertRecords` Parameter Description: -| parameter name | parameter type | is required | parameter describe | -| :---------------- | :------------- | :---------- | :----------------------------------------------------- | -| timestamps | array | yes | Time column | -| measurements_list | array | yes | The name of the measuring point | -| data_types_list | array | yes | The data type | -| values_list | array | yes | Value columns, the values in each column can be `null` | -| devices | string | yes | Device name | -| is_aligned | boolean | yes | Whether to align the timeseries | +| parameter name |parameter type |is required|parameter describe| +|:------------------| :--- | :---| :---| +| timestamps | array | yes | Time column | +| measurements_list | array | yes | The name of the measuring point | +| data_types_list | array | yes | The data type | +| values_list | array | yes | Value columns, the values in each column can be `null` | +| devices | string | yes | Device name | +| is_aligned | boolean | yes | Whether to align the timeseries | Example request: - ```shell curl -H "Content-Type:application/json" -H "Authorization:Basic cm9vdDpyb290" -X POST --data '{"timestamps":[1635232113960,1635232151960,1635232143960,1635232143960],"measurements_list":[["s33","s44"],["s55","s66"],["s77","s88"],["s771","s881"]],"data_types_list":[["INT32","INT64"],["FLOAT","DOUBLE"],["FLOAT","DOUBLE"],["BOOLEAN","TEXT"]],"values_list":[[1,11],[2.1,2],[4,6],[false,"cccccc"]],"is_aligned":false,"devices":["root.s1","root.s1","root.s1","root.s3"]}' http://127.0.0.1:18080/rest/v2/insertRecords ``` Sample response: -| parameter name | parameter type | parameter describe | -| :------------- | :------------- | :----------------- | -| code | integer | status code | -| message | string | message | +|parameter name |parameter type |parameter describe| +|:--- | :--- | :---| +| code | integer | status code | +| message | string | message | Sample response: - ```json { "code": 200, @@ -641,83 +876,88 @@ Sample response: } ``` + ## Configuration The configuration is located in 'iotdb-system.properties'. -- Set 'enable_rest_service' to 'true' to enable the module, and 'false' to disable the module. By default, this value is' false '. +* Set 'enable_rest_service' to 'true' to enable the module, and 'false' to disable the module. By default, this value is' false '. ```properties enable_rest_service=true ``` -- This parameter is valid only when 'enable_REST_service =true'. Set 'rest_service_port' to a number (1025 to 65535) to customize the REST service socket port. By default, the value is 18080. +* This parameter is valid only when 'enable_REST_service =true'. Set 'rest_service_port' to a number (1025 to 65535) to customize the REST service socket port. By default, the value is 18080. ```properties rest_service_port=18080 ``` -- Set 'enable_swagger' to 'true' to display rest service interface information through swagger, and 'false' to do not display the rest service interface information through the swagger. By default, this value is' false '. +* Set 'enable_swagger' to 'true' to display rest service interface information through swagger, and 'false' to do not display the rest service interface information through the swagger. By default, this value is' false '. ```properties enable_swagger=false ``` -- The maximum number of rows in the result set that can be returned by a query. When the number of rows in the returned result set exceeds the limit, the status code `411` is returned. +* The maximum number of rows in the result set that can be returned by a query. When the number of rows in the returned result set exceeds the limit, the status code `411` is returned. -```properties +````properties rest_query_default_row_size_limit=10000 -``` +```` -- Expiration time for caching customer login information (used to speed up user authentication, in seconds, 8 hours by default) +* Expiration time for caching customer login information (used to speed up user authentication, in seconds, 8 hours by default) ```properties cache_expire=28800 ``` -- Maximum number of users stored in the cache (default: 100) + +* Maximum number of users stored in the cache (default: 100) ```properties cache_max_num=100 ``` -- Initial cache size (default: 10) +* Initial cache size (default: 10) ```properties cache_init_num=10 ``` -- REST Service whether to enable SSL configuration, set 'enable_https' to' true 'to enable the module, and set' false 'to disable the module. By default, this value is' false '. +* REST Service whether to enable SSL configuration, set 'enable_https' to' true 'to enable the module, and set' false 'to disable the module. By default, this value is' false '. ```properties enable_https=false ``` -- keyStore location path (optional) +* keyStore location path (optional) ```properties key_store_path= ``` -- keyStore password (optional) + +* keyStore password (optional) ```properties key_store_pwd= ``` -- trustStore location path (optional) + +* trustStore location path (optional) ```properties trust_store_path= ``` -- trustStore password (optional) +* trustStore password (optional) ```properties trust_store_pwd= ``` -- SSL timeout period, in seconds + +* SSL timeout period, in seconds ```properties idle_timeout=5000 diff --git a/src/UserGuide/latest/Background-knowledge/Data-Type.md b/src/UserGuide/latest/Background-knowledge/Data-Type.md index 03fcf7a6e..bc1f03e1a 100644 --- a/src/UserGuide/latest/Background-knowledge/Data-Type.md +++ b/src/UserGuide/latest/Background-knowledge/Data-Type.md @@ -1,19 +1,22 @@ # Data Type @@ -22,41 +25,40 @@ IoTDB supports the following data types: -- BOOLEAN (Boolean) -- INT32 (Integer) -- INT64 (Long Integer) -- FLOAT (Single Precision Floating Point) -- DOUBLE (Double Precision Floating Point) -- TEXT (Long String) -- STRING(String) -- BLOB(Large binary Object) -- TIMESTAMP(Timestamp) -- DATE(Date) - +* BOOLEAN (Boolean) +* INT32 (Integer) +* INT64 (Long Integer) +* FLOAT (Single Precision Floating Point) +* DOUBLE (Double Precision Floating Point) +* TEXT (Long String) +* STRING(String) +* BLOB(Large binary Object) +* TIMESTAMP(Timestamp) +* DATE(Date) + The difference between STRING and TEXT types is that STRING type has more statistical information and can be used to optimize value filtering queries, while TEXT type is suitable for storing long strings. ### Float Precision -The time series of **FLOAT** and **DOUBLE** type can specify (MAX_POINT_NUMBER, see [this page](../SQL-Manual/SQL-Manual.md) for more information on how to specify), which is the number of digits after the decimal point of the floating point number, if the encoding method is [RLE](../Technical-Insider/Encoding-and-Compression.md) or [TS_2DIFF](../Technical-Insider/Encoding-and-Compression.md). If MAX_POINT_NUMBER is not specified, the system will use [float_precision](../Reference/DataNode-Config-Manual.md) in the configuration file `iotdb-system.properties`. +The time series of **FLOAT** and **DOUBLE** type can specify (MAX\_POINT\_NUMBER, see [this page](../SQL-Manual/SQL-Manual.md) for more information on how to specify), which is the number of digits after the decimal point of the floating point number, if the encoding method is [RLE](../Technical-Insider/Encoding-and-Compression.md) or [TS\_2DIFF](../Technical-Insider/Encoding-and-Compression.md). If MAX\_POINT\_NUMBER is not specified, the system will use [float\_precision](../Reference/DataNode-Config-Manual.md) in the configuration file `iotdb-system.properties`. ```sql CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=FLOAT, ENCODING=RLE, 'MAX_POINT_NUMBER'='2'; ``` -- For Float data value, The data range is (-Integer.MAX_VALUE, Integer.MAX_VALUE), rather than Float.MAX_VALUE, and the max_point_number is 19, caused by the limitation of function Math.round(float) in Java. -- For Double data value, The data range is (-Long.MAX_VALUE, Long.MAX_VALUE), rather than Double.MAX_VALUE, and the max_point_number is 19, caused by the limitation of function Math.round(double) in Java (Long.MAX_VALUE=9.22E18). +* For Float data value, The data range is (-Integer.MAX_VALUE, Integer.MAX_VALUE), rather than Float.MAX_VALUE, and the max_point_number is 19, caused by the limition of function Math.round(float) in Java. +* For Double data value, The data range is (-Long.MAX_VALUE, Long.MAX_VALUE), rather than Double.MAX_VALUE, and the max_point_number is 19, caused by the limition of function Math.round(double) in Java (Long.MAX_VALUE=9.22E18). ### Data Type Compatibility When the written data type is inconsistent with the data type of time-series, - - If the data type of time-series is not compatible with the written data type, the system will give an error message. - If the data type of time-series is compatible with the written data type, the system will automatically convert the data type. The compatibility of each data type is shown in the following table: | Series Data Type | Supported Written Data Types | -| ---------------- | ---------------------------- | +|------------------|------------------------------| | BOOLEAN | BOOLEAN | | INT32 | INT32 | | INT64 | INT32 INT64 | @@ -72,10 +74,12 @@ The timestamp is the time point at which data is produced. It includes absolute Absolute timestamps in IoTDB are divided into two types: LONG and DATETIME (including DATETIME-INPUT and DATETIME-DISPLAY). When a user inputs a timestamp, he can use a LONG type timestamp or a DATETIME-INPUT type timestamp, and the supported formats of the DATETIME-INPUT type timestamp are shown in the table below: -::: center +
**Supported formats of DATETIME-INPUT type timestamp** + + | Format | | :--------------------------: | | yyyy-MM-dd HH:mm:ss | @@ -92,14 +96,16 @@ Absolute timestamps in IoTDB are divided into two types: LONG and DATETIME (incl | yyyy.MM.dd HH:mm:ss.SSSZZ | | ISO8601 standard time format | -::: +
+ IoTDB can support LONG types and DATETIME-DISPLAY types when displaying timestamps. The DATETIME-DISPLAY type can support user-defined time formats. The syntax of the custom time format is shown in the table below: -::: center +
**The syntax of the custom time format** + | Symbol | Meaning | Presentation | Examples | | :----: | :-------------------------: | :----------: | :--------------------------------: | | G | era | era | era | @@ -132,23 +138,25 @@ IoTDB can support LONG types and DATETIME-DISPLAY types when displaying timestam | ' | escape for text | delimiter | | | '' | single quote | literal | ' | -::: +
### Relative timestamp -Relative time refers to the time relative to the server time `now()` and `DATETIME` time. +Relative time refers to the time relative to the server time ```now()``` and ```DATETIME``` time. -Syntax: + Syntax: -``` -Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ -RelativeTime = (now() | DATETIME) ((+|-) Duration)+ -``` + ``` + Duration = (Digit+ ('Y'|'MO'|'W'|'D'|'H'|'M'|'S'|'MS'|'US'|'NS'))+ + RelativeTime = (now() | DATETIME) ((+|-) Duration)+ + + ``` -::: center +
**The syntax of the duration unit** + | Symbol | Meaning | Presentation | Examples | | :----: | :---------: | :----------------------: | :------: | | y | year | 1y=365 days | 1y | @@ -164,13 +172,13 @@ RelativeTime = (now() | DATETIME) ((+|-) Duration)+ | us | microsecond | 1us=1000 nanoseconds | 1us | | ns | nanosecond | 1ns=1 nanosecond | 1ns | -::: +
-eg: + eg: -``` -now() - 1d2h //1 day and 2 hours earlier than the current server time -now() - 1w //1 week earlier than the current server time -``` + ``` + now() - 1d2h //1 day and 2 hours earlier than the current server time + now() - 1w //1 week earlier than the current server time + ``` -> Note:There must be spaces on the left and right of '+' and '-'. + > Note:There must be spaces on the left and right of '+' and '-'. diff --git a/src/UserGuide/latest/Basic-Concept/Data-Model-and-Terminology.md b/src/UserGuide/latest/Basic-Concept/Data-Model-and-Terminology.md index 7a10118a9..e1aeb3564 100644 --- a/src/UserGuide/latest/Basic-Concept/Data-Model-and-Terminology.md +++ b/src/UserGuide/latest/Basic-Concept/Data-Model-and-Terminology.md @@ -94,7 +94,6 @@ The following are the constraints on the `nodeName`: If you need to use special characters in the path node name, you can use reverse quotation marks to reference the path node name. For specific usage, please refer to [Reverse Quotation Marks](../Reference/Syntax-Rule.md#reverse-quotation-marks). - ### Path Pattern In order to make it easier and faster to express multiple timeseries paths, IoTDB provides users with the path pattern. Users can construct a path pattern by using wildcard `*` and `**`. Wildcard can appear in any node of the path. diff --git a/src/UserGuide/latest/Basic-Concept/Operate-Metadata.md b/src/UserGuide/latest/Basic-Concept/Operate-Metadata.md index 4eb80c594..e0ddf712e 100644 --- a/src/UserGuide/latest/Basic-Concept/Operate-Metadata.md +++ b/src/UserGuide/latest/Basic-Concept/Operate-Metadata.md @@ -20,4 +20,4 @@ redirectTo: Operate-Metadata_apache.html specific language governing permissions and limitations under the License. ---> +--> \ No newline at end of file diff --git a/src/UserGuide/latest/Deployment-and-Maintenance/Cluster-Deployment_timecho.md b/src/UserGuide/latest/Deployment-and-Maintenance/Cluster-Deployment_timecho.md index 08579e8a7..cf9658ff9 100644 --- a/src/UserGuide/latest/Deployment-and-Maintenance/Cluster-Deployment_timecho.md +++ b/src/UserGuide/latest/Deployment-and-Maintenance/Cluster-Deployment_timecho.md @@ -20,239 +20,244 @@ --> # Cluster Deployment -This section describes how to manually deploy an instance that includes 3 ConfigNodes and 3 DataNodes, commonly known as a 3C3D cluster. +This guide describes how to manually deploy a cluster instance consisting of 3 ConfigNodes and 3 DataNodes (commonly referred to as a 3C3D cluster).
-## Note -1. Before installation, ensure that the system is complete by referring to [System configuration](./Environment-Requirements.md) -2. It is recommended to prioritize using `hostname` for IP configuration during deployment, which can avoid the problem of modifying the host IP in the later stage and causing the database to fail to start. To set the host name, you need to configure /etc/hosts on the target server. For example, if the local IP is 192.168.1.3 and the host name is iotdb-1, you can use the following command to set the server's host name and configure the `cn_internal_address` and `dn_internal_address` of IoTDB using the host name. +## Prerequisites + +1. [System configuration](./Environment-Requirements.md):Ensure the system has been configured according to the preparation guidelines. + +2. **IP Configuration**: It is recommended to use hostnames for IP configuration to prevent issues caused by IP address changes. Configure the `/etc/hosts` file on each server. For example, if the local IP is `11.101.17.224` and the hostname is `iotdb-1`, use the following command to set the hostname: + ``` shell echo "192.168.1.3 iotdb-1" >> /etc/hosts ``` -3. Some parameters cannot be modified after the first startup. Please refer to the "Parameter Configuration" section below for settings. + Use the hostname for `cn_internal_address` and `dn_internal_address` in IoTDB configuration. + +3. **Unmodifiable Parameters**: Some parameters cannot be changed after the first startup. Refer to the Parameter Configuration section. + +4. **Installation Path**: Ensure the installation path contains no spaces or non-ASCII characters to prevent runtime issues. -4. Whether in linux or windows, ensure that the IoTDB installation path does not contain Spaces and Chinese characters to avoid software exceptions. +5. **User Permissions**: Choose one of the following permissions during installation and deployment: -5. Please note that when installing and deploying IoTDB (including activating and using software), it is necessary to use the same user for operations. You can: -- Using root user (recommended): Using root user can avoid issues such as permissions. -- Using a fixed non root user: - - Using the same user operation: Ensure that the same user is used for start, activation, stop, and other operations, and do not switch users. - - Avoid using sudo: Try to avoid using sudo commands as they execute commands with root privileges, which may cause confusion or security issues. + - **Root User (Recommended)**: This avoids permission-related issues. + - **Non-Root User**: + - Use the same user for all operations, including starting, activating, and stopping services. + - Avoid using `sudo`, which can cause permission conflicts. -6. It is recommended to deploy a monitoring panel, which can monitor important operational indicators and keep track of database operation status at any time. The monitoring panel can be obtained by contacting the business department,The steps for deploying a monitoring panel can refer to:[Monitoring Panel Deployment](./Monitoring-panel-deployment.md) +6. **Monitoring Panel**: Deploy a monitoring panel to track key performance metrics. Contact the Timecho team for access and refer to the "[Monitoring Panel Deployment](./Monitoring-panel-deployment.md)" guide. -## Preparation Steps +## Preparation -1. Prepare the IoTDB database installation package: iotdb enterprise- {version}-bin.zip(The installation package can be obtained from:[IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_timecho.md)) -2. Configure the operating system environment according to environmental requirements(The system environment configuration can be found in:[Environment Requirement](https://www.timecho.com/docs/UserGuide/latest/Deployment-and-Maintenance/Environment-Requirements.html)) +1. Obtain the TimechoDB installation package: `timechodb-{version}-bin.zip` following[IoTDB-Package](../Deployment-and-Maintenance/IoTDB-Package_timecho.md) +2. Configure the operating system environment according to [Environment Requirement](./Environment-Requirements.md) ## Installation Steps -Assuming there are three Linux servers now, the IP addresses and service roles are assigned as follows: +Taking a cluster with three Linux servers with the following information as example: -| Node IP | Host Name | Service | -| ----------- | --------- | -------------------- | -| 192.168.1.3 | iotdb-1 | ConfigNode、DataNode | -| 192.168.1.4 | iotdb-2 | ConfigNode、DataNode | -| 192.168.1.5 | iotdb-3 | ConfigNode、DataNode | +| Node IP | Host Name | Service | +| ------------- | --------- | -------------------- | +| 11.101.17.224 | iotdb-1 | ConfigNode、DataNode | +| 11.101.17.225 | iotdb-2 | ConfigNode、DataNode | +| 11.101.17.226 | iotdb-3 | ConfigNode、DataNode | -### Set Host Name +### 1.Configure Hostnames -On three machines, configure the host names separately. To set the host names, configure `/etc/hosts` on the target server. Use the following command: +On all three servers, configure the hostnames by editing the `/etc/hosts` file. Use the following commands: ```Bash -echo "192.168.1.3 iotdb-1" >> /etc/hosts -echo "192.168.1.4 iotdb-2" >> /etc/hosts -echo "192.168.1.5 iotdb-3" >> /etc/hosts +echo "11.101.17.224 iotdb-1" >> /etc/hosts +echo "11.101.17.225 iotdb-2" >> /etc/hosts +echo "11.101.17.226 iotdb-3" >> /etc/hosts ``` -### Configuration +### 2. Extract Installation Package -Unzip the installation package and enter the installation directory +Unzip the installation package and enter the installation directory: ```Plain -unzip iotdb-enterprise-{version}-bin.zip -cd iotdb-enterprise-{version}-bin +unzip timechodb-{version}-bin.zip +cd timechodb-{version}-bin ``` -#### Environment script configuration +### 3. Parameters Configuration -- `./conf/confignode-env.sh` configuration +- #### Memory Configuration - | **Configuration** | **Description** | **Default** | **Recommended value** | **Note** | - | :---------------- | :----------------------------------------------------------- | :---------- | :----------------------------------------------------------- | :---------------------------------- | - | MEMORY_SIZE | The total amount of memory that IoTDB ConfigNode nodes can use | - | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect | + Edit the following files for memory allocation: -- `./conf/datanode-env.sh` configuration + - **ConfigNode**: `./conf/confignode-env.sh` (or `.bat` for Windows) + - **DataNode**: `./conf/datanode-env.sh` (or `.bat` for Windows) - | **Configuration** | **Description** | **Default** | **Recommended value** | **Note** | - | :---------------- | :----------------------------------------------------------- | :---------- | :----------------------------------------------------------- | :---------------------------------- | - | MEMORY_SIZE | The total amount of memory that IoTDB DataNode nodes can use | - | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect | + | **Parameter** | **Description** | **Default** | **Recommended** | **Notes** | + | :------------ | :--------------------------------- | :---------- | :-------------- | :-------------------------------------- | + | MEMORY_SIZE | Total memory allocated to the node | Empty | As needed | Effective after restarting the service. | -#### General Configuration +**General Configuration** -Open the general configuration file `./conf/iotdb-system.properties`,The following parameters can be set according to the deployment method: +Set the following parameters in `./conf/iotdb-system.properties`. Refer to `./conf/iotdb-system.properties.template` for a complete list. -| **Configuration** | **Description** | 192.168.1.3 | 192.168.1.4 | 192.168.1.5 | -| ------------------------- | ------------------------------------------------------------ | -------------- | -------------- | -------------- | -| cluster_name | Cluster Name | defaultCluster | defaultCluster | defaultCluster | -| schema_replication_factor | The number of metadata replicas, the number of DataNodes should not be less than this number | 3 | 3 | 3 | -| data_replication_factor | The number of data replicas should not be less than this number of DataNodes | 2 | 2 | 2 | +**Cluster-Level Parameters**: -#### ConfigNode Configuration +| **Parameter** | **Description** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | +| :------------------------ | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | +| cluster_name | Name of the cluster | defaultCluster | defaultCluster | defaultCluster | +| schema_replication_factor | Metadata replication factor; DataNode count shall not be fewer than this value | 3 | 3 | 3 | +| data_replication_factor | Data replication factor; DataNode count shall not be fewer than this value | 2 | 2 | 2 | -Open the ConfigNode configuration file `./conf/iotdb-system.properties`,Set the following parameters +#### ConfigNode Parameters -| **Configuration** | **Description** | **Default** | **Recommended value** | 192.168.1.3 | 192.168.1.4 | 192.168.1.5 | Note | -| ------------------- | ------------------------------------------------------------ | --------------- | ------------------------------------------------------------ | ------------- | ------------- | ------------- | ---------------------------------------- | -| cn_internal_address | The address used by ConfigNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | iotdb-1 | iotdb-2 | iotdb-3 | Cannot be modified after initial startup | -| cn_internal_port | The port used by ConfigNode for communication within the cluster | 10710 | 10710 | 10710 | 10710 | 10710 | Cannot be modified after initial startup | -| cn_consensus_port | The port used for ConfigNode replica group consensus protocol communication | 10720 | 10720 | 10720 | 10720 | 10720 | Cannot be modified after initial startup | -| cn_seed_config_node | The address of the ConfigNode that the node connects to when registering to join the cluster, `cn_internal_address:cn_internal_port` | 127.0.0.1:10710 | The first CongfigNode's `cn_internal-address: cn_internal_port` | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | Cannot be modified after initial startup | +| **Parameter** | **Description** | **Default** | **Recommended** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | **Notes** | +| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | :--------------------------------------------------------- | +| cn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | iotdb-1 | iotdb-2 | iotdb-3 | This parameter cannot be modified after the first startup. | +| cn_internal_port | Port used for internal communication within the cluster | 10710 | 10710 | 10710 | 10710 | 10710 | This parameter cannot be modified after the first startup. | +| cn_consensus_port | Port used for consensus protocol communication among ConfigNode replicas | 10720 | 10720 | 10720 | 10720 | 10720 | This parameter cannot be modified after the first startup. | +| cn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Address and port of the seed ConfigNode (e.g., `cn_internal_address:cn_internal_port`) | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | This parameter cannot be modified after the first startup. | -#### DataNode Configuration +#### DataNode Parameters -Open DataNode Configuration File `./conf/iotdb-system.properties`,Set the following parameters: +| **Parameter** | **Description** | **Default** | **Recommended** | **11.101.17.224** | **11.101.17.225** | **11.101.17.226** | **Notes** | +| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :---------------- | :---------------- | :---------------- | :--------------------------------------------------------- | +| dn_rpc_address | Address for the client RPC service | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | Effective after restarting the service. | +| dn_rpc_port | Port for the client RPC service | 6667 | 6667 | 6667 | 6667 | 6667 | Effective after restarting the service. | +| dn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | iotdb-1 | iotdb-2 | iotdb-3 | This parameter cannot be modified after the first startup. | +| dn_internal_port | Port used for internal communication within the cluster | 10730 | 10730 | 10730 | 10730 | 10730 | This parameter cannot be modified after the first startup. | +| dn_mpp_data_exchange_port | Port used for receiving data streams | 10740 | 10740 | 10740 | 10740 | 10740 | This parameter cannot be modified after the first startup. | +| dn_data_region_consensus_port | Port used for data replica consensus protocol communication | 10750 | 10750 | 10750 | 10750 | 10750 | This parameter cannot be modified after the first startup. | +| dn_schema_region_consensus_port | Port used for metadata replica consensus protocol communication | 10760 | 10760 | 10760 | 10760 | 10760 | This parameter cannot be modified after the first startup. | +| dn_seed_config_node | Address of the ConfigNode for registering and joining the cluster.(e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Address of the first ConfigNode | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | This parameter cannot be modified after the first startup. | -| **Configuration** | **Description** | **Default** | **Recommended value** | 192.168.1.3 | 192.168.1.4 | 192.168.1.5 | Note | -| ------------------------------- | ------------------------------------------------------------ | --------------- | ------------------------------------------------------------ | ------------- | ------------- | ------------- | ---------------------------------------- | -| dn_rpc_address | The address of the client RPC service | 127.0.0.1 | Recommend using the **IPV4 address or hostname** of the server where it is located | iotdb-1 |iotdb-2 | iotdb-3 | Restarting the service takes effect | -| dn_rpc_port | The port of the client RPC service | 6667 | 6667 | 6667 | 6667 | 6667 | Restarting the service takes effect | -| dn_internal_address | The address used by DataNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | iotdb-1 | iotdb-2 | iotdb-3 | Cannot be modified after initial startup | -| dn_internal_port | The port used by DataNode for communication within the cluster | 10730 | 10730 | 10730 | 10730 | 10730 | Cannot be modified after initial startup | -| dn_mpp_data_exchange_port | The port used by DataNode to receive data streams | 10740 | 10740 | 10740 | 10740 | 10740 | Cannot be modified after initial startup | -| dn_data_region_consensus_port | The port used by DataNode for data replica consensus protocol communication | 10750 | 10750 | 10750 | 10750 | 10750 | Cannot be modified after initial startup | -| dn_schema_region_consensus_port | The port used by DataNode for metadata replica consensus protocol communication | 10760 | 10760 | 10760 | 10760 | 10760 | Cannot be modified after initial startup | -| dn_seed_config_node | The address of the ConfigNode that the node connects to when registering to join the cluster, i.e. `cn_internal-address: cn_internal_port` | 127.0.0.1:10710 | The first CongfigNode's cn_internal-address: cn_internal_port | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | Cannot be modified after initial startup | +**Note:** Ensure files are saved after editing. Tools like VSCode Remote do not save changes automatically. -> ❗️Attention: Editors such as VSCode Remote do not have automatic configuration saving function. Please ensure that the modified files are saved persistently, otherwise the configuration items will not take effect +### 4. Start ConfigNode Instances -### Start ConfigNode - -Start the first confignode of IoTDB-1 first, ensuring that the seed confignode node starts first, and then start the second and third confignode nodes in sequence +1. Start the first ConfigNode (`iotdb-1`) as the seed node ```Bash cd sbin ./start-confignode.sh -d #"- d" parameter will start in the background ``` -If the startup fails, please refer to [Common Questions](#common-questions). +2. Start the remaining ConfigNodes (`iotdb-2` and `iotdb-3`) in sequence. + If the startup fails, refer to the [Common Questions](#common-questions) section below for troubleshooting. -### Activate Database +### 5.Start DataNode Instances -#### Method 1: Activate file copy activation +On each server, navigate to the `sbin` directory and start the DataNode: -- After starting three confignode nodes in sequence, copy the `activation` folder of each machine and the `system_info` file of each machine to the Timecho staff; -- The staff will return the license files for each ConfigNode node, where 3 license files will be returned; -- Put the three license files into the `activation` folder of the corresponding ConfigNode node; +```Go +cd sbin +./start-datanode.sh -d #"- d" parameter will start in the background +``` -#### Method 2: Activate Script Activation +### 6.Activate Database -- Obtain the machine codes of three machines in sequence, enter the `sbin` directory of the installation directory, and execute the activation script `start activate.sh`: +#### Option 1: File-Based Activation - ```Bash - cd sbin - ./start-activate.sh - ``` +1. Start all ConfigNodes and DataNodes. +2. Copy the `system_info` file from the `activation` directory on each server and send them to the Timecho team. +3. Place the license files provided by the Timecho team into the corresponding `activation` folder for each node. -- The following information is displayed, where the machine code of one machine is displayed: +#### Option 2: Command-Based Activation - ```Bash - Please copy the system_info's content and send it to Timecho: - 01-KU5LDFFN-PNBEHDRH - Please enter license: - ``` +1. Enter the IoTDB CLI for each node: -- The other two nodes execute the activation script `start activate.sh` in sequence, and then copy the machine codes of the three machines obtained to the Timecho staff -- The staff will return 3 activation codes, which normally correspond to the order of the provided 3 machine codes. Please paste each activation code into the previous command line prompt `Please enter license:`, as shown below: +- **For Table Model**: - ```Bash - Please enter license: - Jw+MmF+Atxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx5bAOXNeob5l+HO5fEMgzrW8OJPh26Vl6ljKUpCvpTiw== - License has been stored to sbin/../activation/license - Import completed. Please start cluster and excute 'show cluster' to verify activation status - ``` + ```SQL + # For Linux or macOS + ./start-cli.sh -sql_dialect table + + # For Windows + ./start-cli.bat -sql_dialect table + ``` -### Start DataNode +- **For Tree Model**: - Enter the `sbin` directory of iotdb and start three datanode nodes in sequence: + ```SQL + # For Linux or macOS + ./start-cli.sh + + # For Windows + ./start-cli.bat + ``` -```Bash -cd sbin -./start-datanode.sh -d #"- d" parameter will start in the background -``` +2. Run the following command to retrieve the machine code required for activation: -### Verify Deployment + ```Bash + show system info + ``` -Can be executed directly Cli startup script in `./sbin` directory: + **Note**: Activation is currently supported only in the Tree Model. -```Plain -./start-cli.sh -h ip(local IP or domain name) -p port(6667) -``` +3. Copy the returned machine code of each server (displayed as a green string) and send it to the Timecho team: - After successful startup, the following interface will appear displaying successful installation of IOTDB. + ```Bash + +--------------------------------------------------------------+ + | SystemInfo| + +--------------------------------------------------------------+ + |01-TE5NLES4-UDDWCMYE,01-GG5NLES4-XXDWCMYE,01-FF5NLES4-WWWWCMYE| + +--------------------------------------------------------------+ + Total line number = 1 + It costs 0.030s + ``` + +4. Enter the activation codes provided by the Timecho team in the CLI in sequence using the following format. Wrap the activation code in single quotes ('): + + ```Bash + IoTDB> activate '01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===,01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===,01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===' + ``` -![](https://alioss.timecho.com/docs/img/%E4%BC%81%E4%B8%9A%E7%89%88%E6%88%90%E5%8A%9F.png) +### 7.Verify Activation -After the installation success interface appears, continue to check if the activation is successful and use the `show cluster` command. +Check the `ClusterActivationStatus` field. If it shows `ACTIVATED`, the database has been successfully activated. -When you see the display of `Activated` on the far right, it indicates successful activation. +![](https://alioss.timecho.com/docs/img/%E9%9B%86%E7%BE%A4-%E9%AA%8C%E8%AF%81.png) -![](https://alioss.timecho.com/docs/img/%E4%BC%81%E4%B8%9A%E7%89%88%E6%BF%80%E6%B4%BB.png) +## Maintenance -> The appearance of `ACTIVATED (W)` indicates passive activation, which means that this Configurable Node does not have a license file (or has not issued the latest license file with a timestamp), and its activation depends on other Activated Configurable Nodes in the cluster. At this point, it is recommended to check if the license file has been placed in the license folder. If not, please place the license file. If a license file already exists, it may be due to inconsistency between the license file of this node and the information of other nodes. Please contact Timecho staff to reapply. +### ConfigNode Maintenance -## Node Maintenance Steps +ConfigNode maintenance includes adding and removing ConfigNodes. Common use cases include: -### ConfigNode Node Maintenance +- **Cluster Expansion:** If the cluster contains only 1 ConfigNode, adding 2 more ConfigNodes enhances high availability, resulting in a total of 3 ConfigNodes. +- **Cluster Fault Recovery:** If a ConfigNode's machine fails and it cannot function normally, remove the faulty ConfigNode and add a new one to the cluster. -ConfigNode node maintenance is divided into two types of operations: adding and removing ConfigNodes, with two common use cases: -- Cluster expansion: For example, when there is only one ConfigNode in the cluster, and you want to increase the high availability of ConfigNode nodes, you can add two ConfigNodes, making a total of three ConfigNodes in the cluster. -- Cluster failure recovery: When the machine where a ConfigNode is located fails, making the ConfigNode unable to run normally, you can remove this ConfigNode and then add a new ConfigNode to the cluster. +**Note:** After completing ConfigNode maintenance, ensure that the cluster contains either 1 or 3 active ConfigNodes. Two ConfigNodes do not provide high availability, and more than three ConfigNodes can degrade performance. -> ❗️Note, after completing ConfigNode node maintenance, you need to ensure that there are 1 or 3 ConfigNodes running normally in the cluster. Two ConfigNodes do not have high availability, and more than three ConfigNodes will lead to performance loss. +#### Adding a ConfigNode -#### Adding ConfigNode Nodes +**Linux /** **MacOS**: -Script command: -```shell -# Linux / MacOS -# First switch to the IoTDB root directory +```Plain sbin/start-confignode.sh +``` + +**Windows:** -# Windows -# First switch to the IoTDB root directory +```Plain sbin/start-confignode.bat ``` -Parameter introduction: +#### Removing a ConfigNode -| Parameter | Description | Is it required | -| :--- | :--------------------------------------------- | :----------- | -| -v | Show version information | No | -| -f | Run the script in the foreground, do not put it in the background | No | -| -d | Start in daemon mode, i.e. run in the background | No | -| -p | Specify a file to store the process ID for process management | No | -| -c | Specify the path to the configuration file folder, the script will load the configuration file from here | No | -| -g | Print detailed garbage collection (GC) information | No | -| -H | Specify the path of the Java heap dump file, used when JVM memory overflows | No | -| -E | Specify the path of the JVM error log file | No | -| -D | Define system properties, in the format key=value | No | -| -X | Pass -XX parameters directly to the JVM | No | -| -h | Help instruction | No | +1. Connect to the cluster using the CLI and confirm the internal address and port of the ConfigNode to be removed: -#### Removing ConfigNode Nodes + ```Plain + show confignodes; + ``` -First connect to the cluster through the CLI and confirm the internal address and port number of the ConfigNode you want to remove by using `show confignodes`: +Example output: -```Bash +```Plain IoTDB> show confignodes +------+-------+---------------+------------+--------+ |NodeID| Status|InternalAddress|InternalPort| Role| @@ -265,63 +270,60 @@ Total line number = 3 It costs 0.030s ``` -Then use the script to remove the DataNode. Script command: +2. Remove the ConfigNode using the script: + +**Linux /** **MacOS**: ```Bash -# Linux / MacOS sbin/remove-confignode.sh [confignode_id] +# Or: +sbin/remove-confignode.sh [cn_internal_address:cn_internal_port] +``` -#Windows -sbin/remove-confignode.bat [confignode_id] +**Windows:** +```Bash +sbin/remove-confignode.bat [confignode_id] +# Or: +sbin/remove-confignode.bat [cn_internal_address:cn_internal_port] ``` -### DataNode Node Maintenance +### DataNode Maintenance -There are two common scenarios for DataNode node maintenance: +DataNode maintenance includes adding and removing DataNodes. Common use cases include: -- Cluster expansion: For the purpose of expanding cluster capabilities, add new DataNodes to the cluster -- Cluster failure recovery: When a machine where a DataNode is located fails, making the DataNode unable to run normally, you can remove this DataNode and add a new DataNode to the cluster +- **Cluster Expansion:** Add new DataNodes to increase cluster capacity. +- **Cluster Fault Recovery:** If a DataNode's machine fails and it cannot function normally, remove the faulty DataNode and add a new one to the cluster. -> ❗️Note, in order for the cluster to work normally, during the process of DataNode node maintenance and after the maintenance is completed, the total number of DataNodes running normally should not be less than the number of data replicas (usually 2), nor less than the number of metadata replicas (usually 3). +**Note:** During and after DataNode maintenance, ensure that the number of active DataNodes is not fewer than the data replication factor (usually 2) or the schema replication factor (usually 3). -#### Adding DataNode Nodes +#### Adding a DataNode -Script command: +**Linux /** **MacOS**: -```Bash -# Linux / MacOS -# First switch to the IoTDB root directory +```Plain sbin/start-datanode.sh +``` + +**Windows:** -# Windows -# First switch to the IoTDB root directory +```Plain sbin/start-datanode.bat ``` -Parameter introduction: +**Note:** After adding a DataNode, the cluster load will gradually balance across all nodes as new writes arrive and old data expires (if TTL is set). -| Abbreviation | Description | Is it required | -| :--- | :--------------------------------------------- | :----------- | -| -v | Show version information | No | -| -f | Run the script in the foreground, do not put it in the background | No | -| -d | Start in daemon mode, i.e. run in the background | No | -| -p | Specify a file to store the process ID for process management | No | -| -c | Specify the path to the configuration file folder, the script will load the configuration file from here | No | -| -g | Print detailed garbage collection (GC) information | No | -| -H | Specify the path of the Java heap dump file, used when JVM memory overflows | No | -| -E | Specify the path of the JVM error log file | No | -| -D | Define system properties, in the format key=value | No | -| -X | Pass -XX parameters directly to the JVM | No | -| -h | Help instruction | No | +#### Removing a DataNode -Note: After adding a DataNode, as new writes arrive (and old data expires, if TTL is set), the cluster load will gradually balance towards the new DataNode, eventually achieving a balance of storage and computation resources on all nodes. +1. Connect to the cluster using the CLI and confirm the RPC address and port of the DataNode to be removed: -#### Removing DataNode Nodes +```Plain +show datanodes; +``` -First connect to the cluster through the CLI and confirm the RPC address and port number of the DataNode you want to remove with `show datanodes`: +Example output: -```Bash +```Plain IoTDB> show datanodes +------+-------+----------+-------+-------------+---------------+ |NodeID| Status|RpcAddress|RpcPort|DataRegionNum|SchemaRegionNum| @@ -334,51 +336,77 @@ Total line number = 3 It costs 0.110s ``` -Then use the script to remove the DataNode. Script command: +2. Remove the DataNode using the script: -```Bash -# Linux / MacOS -sbin/remove-datanode.sh [datanode_id] +**Linux / MacOS:** -#Windows -sbin/remove-datanode.bat [datanode_id] +```Bash +sbin/remove-datanode.sh [dn_rpc_address:dn_rpc_port] ``` -## Common Questions -1. Multiple prompts indicating activation failure during deployment process - - Use the `ls -al` command: Use the `ls -al` command to check if the owner information of the installation package root directory is the current user. - - Check activation directory: Check all files in the `./activation` directory and whether the owner information is the current user. - -2. Confignode failed to start +**Windows:** - Step 1: Please check the startup log to see if any parameters that cannot be changed after the first startup have been modified. - - Step 2: Please check the startup log for any other abnormalities. If there are any abnormal phenomena in the log, please contact Timecho Technical Support personnel for consultation on solutions. - - Step 3: If it is the first deployment or data can be deleted, you can also clean up the environment according to the following steps, redeploy, and restart. +```Bash +sbin/remove-datanode.bat [dn_rpc_address:dn_rpc_port] +``` - Step 4: Clean up the environment: +## Common Questions - a. Terminate all ConfigNode Node and DataNode processes. - ```Bash - # 1. Stop the ConfigNode and DataNode services +1. Activation Fails Repeatedly + - Use the `ls -al` command to verify that the ownership of the installation directory matches the current user. + - Check the ownership of all files in the `./activation` directory to ensure they belong to the current user. +2. ConfigNode Fails to Start + - Review the startup logs to check if any parameters, which cannot be modified after the first startup, were changed. + - Check the logs for any other errors. If unresolved, contact technical support for assistance. + - If the deployment is fresh or data can be discarded, clean the environment and redeploy using the following steps: + **Clean the Environment** + + - Stop all ConfigNode and DataNode processes: + ```Bash sbin/stop-standalone.sh - - # 2. Check for any remaining processes - jps - # Or - ps -ef|gerp iotdb - - # 3. If there are any remaining processes, manually kill the - kill -9 - # If you are sure there is only one iotdb on the machine, you can use the following command to clean up residual processes - ps -ef|grep iotdb|grep -v grep|tr -s ' ' ' ' |cut -d ' ' -f2|xargs kill -9 - ``` - b. Delete the data and logs directories. - - Explanation: Deleting the data directory is necessary, deleting the logs directory is for clean logs and is not mandatory. - - ```Bash - cd /data/iotdb - rm -rf data logs - ``` \ No newline at end of file + ``` + + - Check for any remaining processes: + ```Bash + jps + # or + ps -ef | grep iotdb + ``` + + - If processes remain, terminate them manually: + ```Bash + kill -9 + + #For systems with a single IoTDB instance, you can clean up residual processes with: + ps -ef | grep iotdb | grep -v grep | tr -s ' ' ' ' | cut -d ' ' -f2 | xargs kill -9 + ``` + + - Delete the `data` and `logs` directories: + ```Bash + cd /data/iotdb + rm -rf data logs + ``` + +## Appendix + +### ConfigNode Parameters + +| Parameter | Description | Is it required | +| :-------- | :---------------------------------------------------------- | :------------- | +| -d | Starts the process in daemon mode (runs in the background). | No | + +### DataNode Parameters + +| Parameter | Description | Required | +| :-------- | :----------------------------------------------------------- | :------- | +| -v | Displays version information. | No | +| -f | Runs the script in the foreground without backgrounding it. | No | +| -d | Starts the process in daemon mode (runs in the background). | No | +| -p | Specifies a file to store the process ID for process management. | No | +| -c | Specifies the path to the configuration folder; the script loads configuration files from this location. | No | +| -g | Prints detailed garbage collection (GC) information. | No | +| -H | Specifies the path for the Java heap dump file, used during JVM memory overflow. | No | +| -E | Specifies the file for JVM error logs. | No | +| -D | Defines system properties in the format `key=value`. | No | +| -X | Passes `-XX` options directly to the JVM. | No | +| -h | Displays the help instructions. | No | \ No newline at end of file diff --git a/src/UserGuide/latest/Deployment-and-Maintenance/Database-Resources.md b/src/UserGuide/latest/Deployment-and-Maintenance/Database-Resources.md index 374b03e2f..d6210318a 100644 --- a/src/UserGuide/latest/Deployment-and-Maintenance/Database-Resources.md +++ b/src/UserGuide/latest/Deployment-and-Maintenance/Database-Resources.md @@ -188,6 +188,7 @@ Calculation formula: Number of measurement points * Sampling frequency (Hz) * Si Example: 1000 devices, each with 100 measurement points, a total of 100000 sequences, INT32 type. Sampling frequency 1Hz (once per second), storage for 1 year, 3 copies. - Complete calculation formula: 1000 devices * 100 measurement points * 12 bytes per data point * 86400 seconds per day * 365 days per year * 3 copies / 10 compression ratio / 1024 / 1024 / 1024 / 1024 =11T - Simplified calculation formula: 1000 * 100 * 12 * 86400 * 365 * 3 / 10 / 1024 / 1024 / 1024 / 1024 =11T + ### Storage Configuration If the number of nodes is over 10000000 or the query load is high, it is recommended to configure SSD ## Network (Network card) diff --git a/src/UserGuide/latest/Deployment-and-Maintenance/Environment-Requirements.md b/src/UserGuide/latest/Deployment-and-Maintenance/Environment-Requirements.md index 887bda839..e286154e1 100644 --- a/src/UserGuide/latest/Deployment-and-Maintenance/Environment-Requirements.md +++ b/src/UserGuide/latest/Deployment-and-Maintenance/Environment-Requirements.md @@ -80,7 +80,6 @@ IoTDB supports operating systems such as Linux, Windows, and MacOS, while the en - The system disk needs only the space used by the operating system, and does not need to reserve space for the IoTDB. - Each disk group corresponds to only one partition. Data disks (with multiple disk groups, corresponding to raid) do not need additional partitions. All space is used by the IoTDB. The following table lists the recommended disk partitioning methods. - @@ -120,7 +119,6 @@ The following table lists the recommended disk partitioning methods.
- ### Network Configuration 1. Disable the firewall diff --git a/src/UserGuide/latest/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md b/src/UserGuide/latest/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md index a4e3e3c59..cf3ed80a6 100644 --- a/src/UserGuide/latest/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md +++ b/src/UserGuide/latest/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md @@ -20,201 +20,235 @@ --> # Stand-Alone Deployment -This chapter will introduce how to start an IoTDB standalone instance, which includes 1 ConfigNode and 1 DataNode (commonly known as 1C1D). +This guide introduces how to set up a standalone TimechoDB instance, which includes one ConfigNode and one DataNode (commonly referred to as 1C1D). -## Matters Needing Attention +## Prerequisites -1. Before installation, ensure that the system is complete by referring to [System configuration](./Environment-Requirements.md). +1. [System configuration](./Environment-Requirements.md): Ensure the system has been configured according to the preparation guidelines. -2. It is recommended to prioritize using 'hostname' for IP configuration during deployment, which can avoid the problem of modifying the host IP in the later stage and causing the database to fail to start. To set the host name, you need to configure/etc/hosts on the target server. For example, if the local IP is 192.168.1.3 and the host name is iotdb-1, you can use the following command to set the server's host name and configure IoTDB's' cn_internal-address' using the host name dn_internal_address、dn_rpc_address。 +2. **IP Configuration**: It is recommended to use hostnames for IP configuration to prevent issues caused by IP address changes. Set the hostname by editing the `/etc/hosts` file. For example, if the local IP is `192.168.1.3` and the hostname is `iotdb-1`, run: ```shell echo "192.168.1.3 iotdb-1" >> /etc/hosts ``` -3. Some parameters cannot be modified after the first startup. Please refer to the "Parameter Configuration" section below for settings. + Use the hostname for `cn_internal_address` and `dn_internal_address` in IoTDB configuration. -4. Whether in linux or windows, ensure that the IoTDB installation path does not contain Spaces and Chinese characters to avoid software exceptions. +3. **Unmodifiable Parameters**: Some parameters cannot be changed after the first startup. Refer to the Parameter Configuration section. -5. Please note that when installing and deploying IoTDB (including activating and using software), it is necessary to use the same user for operations. You can: -- Using root user (recommended): Using root user can avoid issues such as permissions. -- Using a fixed non root user: - - Using the same user operation: Ensure that the same user is used for start, activation, stop, and other operations, and do not switch users. - - Avoid using sudo: Try to avoid using sudo commands as they execute commands with root privileges, which may cause confusion or security issues. +4. **Installation Path**: Ensure the installation path contains no spaces or non-ASCII characters to prevent runtime issues. -6. It is recommended to deploy a monitoring panel, which can monitor important operational indicators and keep track of database operation status at any time. The monitoring panel can be obtained by contacting the business department, and the steps for deploying the monitoring panel can be referred to:[Monitoring Board Install and Deploy](./Monitoring-panel-deployment.md). +5. - **User Permissions**: Choose one of the following permissions during installation and deployment: + - **Root User (Recommended)**: This avoids permission-related issues. + - **Non-Root User**: + - Use the same user for all operations, including starting, activating, and stopping services. + - Avoid using `sudo`, which can cause permission conflicts. + +6. **Monitoring Panel**: Deploy a monitoring panel to track key performance metrics. Contact the Timecho team for access and refer to the "[Monitoring Board Install and Deploy](./Monitoring-panel-deployment.md)" guide. ## Installation Steps -### 1、Unzip the installation package and enter the installation directory +### 1、Extract Installation Package + +Unzip the installation package and navigate to the directory: -```shell -unzip iotdb-enterprise-{version}-bin.zip -cd iotdb-enterprise-{version}-bin +```Plain +unzip timechodb-{version}-bin.zip +cd timechodb-{version}-bin ``` ### 2、Parameter Configuration -#### Environment Script Configuration +#### Memory Configuration -- ./conf/confignode-env.sh (./conf/confignode-env.bat) configuration +Edit the following files for memory allocation: -| **Configuration** | **Description** | **Default** | **Recommended value** | Note | -| :---------------: | :----------------------------------------------------------: | :---------: | :----------------------------------------------------------: | :---------------------------------: | -| MEMORY_SIZE | The total amount of memory that IoTDB ConfigNode nodes can use | empty | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect | +- **ConfigNode**: `conf/confignode-env.sh` (or `.bat` for Windows) +- **DataNode**: `conf/datanode-env.sh` (or `.bat` for Windows) -- ./conf/datanode-env.sh (./conf/datanode-env.bat) configuration +| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** | +| :------------ | :---------------------------------- | :---------- | :-------------- | :---------------------- | +| MEMORY_SIZE | Total memory allocated for the node | Empty | As needed | Effective after restart | -| **Configuration** | **Description** | **Default** | **Recommended value** | **Note** | -| :---------: | :----------------------------------: | :--------: | :----------------------------------------------: | :----------: | -| MEMORY_SIZE | The total amount of memory that IoTDB DataNode nodes can use | empty | Can be filled in as needed, and the system will allocate memory based on the filled in values | Restarting the service takes effect | +#### General Configuration -#### System General Configuration +Set the following parameters in `conf/iotdb-system.properties`. Refer to `conf/iotdb-system.properties.template` for a complete list. -Open the general configuration file (./conf/iotdb-system. properties file) and set the following parameters: +**Cluster-Level Parameters**: -| **Configuration** | **Description** | **Default** | **Recommended value** | Note | -| :-----------------------: | :----------------------------------------------------------: | :------------: | :----------------------------------------------------------: | :---------------------------------------------------: | -| cluster_name | Cluster Name | defaultCluster | The cluster name can be set as needed, and if there are no special needs, the default can be kept | Cannot be modified after initial startup | -| schema_replication_factor | Number of metadata replicas, set to 1 for the standalone version here | 1 | 1 | Default 1, cannot be modified after the first startup | -| data_replication_factor | Number of data replicas, set to 1 for the standalone version here | 1 | 1 | Default 1, cannot be modified after the first startup | +| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** | +| :------------------------ | :-------------------------- | :------------- | :-------------- | :----------------------------------------------------------- | +| cluster_name | Name of the cluster | defaultCluster | Customizable | If there is no specific requirement, keep the default value. | +| schema_replication_factor | Number of metadata replicas | 1 | 1 | In standalone mode, set this to 1. This value cannot be modified after the first startup. | +| data_replication_factor | Number of data replicas | 1 | 1 | In standalone mode, set this to 1. This value cannot be modified after the first startup. | -#### ConfigNode Configuration +**ConfigNode Parameters**: -Open the ConfigNode configuration file (./conf/iotdb-system. properties file) and set the following parameters: +| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** | +| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------------------------- | +| cn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | This parameter cannot be modified after the first startup. | +| cn_internal_port | Port used for internal communication within the cluster | 10710 | 10710 | This parameter cannot be modified after the first startup. | +| cn_consensus_port | Port used for consensus protocol communication among ConfigNode replicas | 10720 | 10720 | This parameter cannot be modified after the first startup. | +| cn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Use `cn_internal_address:cn_internal_port` | This parameter cannot be modified after the first startup. | -| **Configuration** | **Description** | **Default** | **Recommended value** | Note | -| :-----------------: | :----------------------------------------------------------: | :-------------: | :----------------------------------------------------------: | :--------------------------------------: | -| cn_internal_address | The address used by ConfigNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | Cannot be modified after initial startup | -| cn_internal_port | The port used by ConfigNode for communication within the cluster | 10710 | 10710 | Cannot be modified after initial startup | -| cn_consensus_port | The port used for ConfigNode replica group consensus protocol communication | 10720 | 10720 | Cannot be modified after initial startup | -| cn_seed_config_node | The address of the ConfigNode that the node connects to when registering to join the cluster, cn_internal_address:cn_internal_port | 127.0.0.1:10710 | cn_internal_address:cn_internal_port | Cannot be modified after initial startup | +**DataNode** **Parameters**: -#### DataNode Configuration +| **Parameter** | **Description** | **Default** | **Recommended** | **Notes** | +| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------------------------- | +| dn_rpc_address | Address for the client RPC service | 0.0.0.0 | 0.0.0.0 | Effective after restarting the service. | +| dn_rpc_port | Port for the client RPC service | 6667 | 6667 | Effective after restarting the service. | +| dn_internal_address | Address used for internal communication within the cluster | 127.0.0.1 | Server's IPv4 address or hostname. Use hostname to avoid issues when the IP changes. | This parameter cannot be modified after the first startup. | +| dn_internal_port | Port used for internal communication within the cluster | 10730 | 10730 | This parameter cannot be modified after the first startup. | +| dn_mpp_data_exchange_port | Port used for receiving data streams | 10740 | 10740 | This parameter cannot be modified after the first startup. | +| dn_data_region_consensus_port | Port used for data replica consensus protocol communication | 10750 | 10750 | This parameter cannot be modified after the first startup. | +| dn_schema_region_consensus_port | Port used for metadata replica consensus protocol communication | 10760 | 10760 | This parameter cannot be modified after the first startup. | +| dn_seed_config_node | Address of the ConfigNode for registering and joining the cluster. (e.g.,`cn_internal_address:cn_internal_port`) | 127.0.0.1:10710 | Use `cn_internal_address:cn_internal_port` | This parameter cannot be modified after the first startup. | -Open the DataNode configuration file (./conf/iotdb-system. properties file) and set the following parameters: +### 3、Start ConfigNode -| **Configuration** | **Description** | **Default** | **Recommended value** | **Note** | -| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------------------- | :--------------------------------------- | -| dn_rpc_address | The address of the client RPC service | 0.0.0.0 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | Restarting the service takes effect | -| dn_rpc_port | The port of the client RPC service | 6667 | 6667 | Restarting the service takes effect | -| dn_internal_address | The address used by DataNode for communication within the cluster | 127.0.0.1 | The IPV4 address or host name of the server where it is located, and it is recommended to use host name | Cannot be modified after initial startup | -| dn_internal_port | The port used by DataNode for communication within the cluster | 10730 | 10730 | Cannot be modified after initial startup | -| dn_mpp_data_exchange_port | The port used by DataNode to receive data streams | 10740 | 10740 | Cannot be modified after initial startup | -| dn_data_region_consensus_port | The port used by DataNode for data replica consensus protocol communication | 10750 | 10750 | Cannot be modified after initial startup | -| dn_schema_region_consensus_port | The port used by DataNode for metadata replica consensus protocol communication | 10760 | 10760 | Cannot be modified after initial startup | -| dn_seed_config_node | The ConfigNode address that the node connects to when registering to join the cluster, i.e. cn_internal-address: cn_internal_port | 127.0.0.1:10710 | cn_internal_address:cn_internal_port | Cannot be modified after initial startup | +Navigate to the `sbin` directory and start ConfigNode: -> ❗️Attention: Editors such as VSCode Remote do not have automatic configuration saving function. Please ensure that the modified files are saved persistently, otherwise the configuration items will not take effect +```Bash +./sbin/start-confignode.sh -d # The "-d" flag starts the process in the background. +``` -### 3、Start ConfigNode + If the startup fails, refer to the [**Common Problem**](#Common Problem) section below for troubleshooting. -Enter the sbin directory of iotdb and start confignode +### 4、Start DataNode -```shell -./start-confignode.sh -d #The "- d" parameter will start in the background -``` -If the startup fails, please refer to [Common Questions](#common-questions). +Navigate to the `sbin` directory of IoTDB and start the DataNode: -### 4、Activate Database +````shell +./sbin/start-datanode.sh -d # The "-d" flag starts the process in the background. +```` -#### Method 1: Activate file copy activation +### 5、Activate Database -- After starting the confignode node, enter the activation folder and copy the systeminfo file to the Timecho staff -- Received the license file returned by the staff -- Place the license file in the activation folder of the corresponding node; +#### Option 1: File-Based Activation -#### Method 2: Activate Script Activation +1. Start both the ConfigNode and DataNode. +2. Navigate to the `activation` folder and copy the `system_info` file. +3. Send the `system_info` file to the Timecho team. +4. Place the license file provided by the Timecho team into the corresponding `activation` folder for each node. -- Obtain the required machine code for activation, enter the sbin directory of the installation directory, and execute the activation script: +#### Option 2: Command-Based Activation -```shell - cd sbin -./start-activate.sh -``` +1. Enter the IoTDB CLI. + +- **For Table Model**: -- The following information is displayed. Please copy the machine code (i.e. the string of characters) to the Timecho staff: +```SQL +# For Linux or macOS +./start-cli.sh -sql_dialect table -```shell -Please copy the system_info's content and send it to Timecho: -01-KU5LDFFN-PNBEHDRH -Please enter license: +# For Windows +./start-cli.bat -sql_dialect table ``` -- Enter the activation code returned by the staff into the previous command line prompt 'Please enter license:', as shown below: +- **For Tree Model**: -```shell -Please enter license: -JJw+MmF+AtexsfgNGOFgTm83Bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxm6pF+APW1CiXLTSijK9Qh3nsLgzrW8OJPh26Vl6ljKUpCvpTiw== -License has been stored to sbin/../activation/license -Import completed. Please start cluster and excute 'show cluster' to verify activation status +```SQL +# For Linux or macOS +./start-cli.sh + +# For Windows +./start-cli.bat ``` -### 5、Start DataNode +2. Run the following command to retrieve the machine code required for activation: + + ```Bash + show system info + ``` -Enter the sbin directory of iotdb and start datanode: + **Note**: Activation is currently supported only in the Tree Model. -```shell -cd sbin -./start-datanode.sh -d # The "- d" parameter will start in the background -``` +3. Copy the returned machine code (displayed as a green string) and send it to the Timecho team: -### 6、Verify Deployment +```Bash ++--------------------------------------------------------------+ +| SystemInfo| ++--------------------------------------------------------------+ +| 01-TE5NLES4-UDDWCMYE| ++--------------------------------------------------------------+ +Total line number = 1 +It costs 0.030s +``` -Can be executed directly/ Cli startup script in sbin directory: +4. Enter the activation code provided by the Timecho team in the CLI using the following format. Wrap the activation code in single quotes ('): -```shell -./start-cli.sh -h ip(local IP or domain name) -p port(6667) +```Bash +IoTDB> activate '01-D4EYQGPZ-EAUJJODW-NUKRDR6F-TUQS3B75-EDZFLK3A-6BOKJFFZ-ALDHOMN7-NB2E4BHI-7ZKGFVK6-GCIFXA4T-UG3XJTTD-SHJV6F2P-Q27B4OMJ-R47ZDIM3-UUASUXG2-OQXGVZCO-MMYKICZU-TWFQYYAO-ZOAGOKJA-NYHQTA5U-EWAR4EP5-MRC6R2CI-PKUTKRCT-7UDGRH3F-7BYV4P5D-6KKIA===' ``` -After successful startup, the following interface will appear displaying successful installation of IOTDB. +### 6、Verify Activation -![](https://alioss.timecho.com/docs/img/%E5%90%AF%E5%8A%A8%E6%88%90%E5%8A%9F.png) +Check the `ClusterActivationStatus` field. If it shows `ACTIVATED`, the database has been successfully activated. -After the installation success interface appears, continue to check if the activation is successful and use the `show cluster`command +![](https://alioss.timecho.com/docs/img/%E5%8D%95%E6%9C%BA-%E9%AA%8C%E8%AF%81.png) -When you see the display "Activated" on the far right, it indicates successful activation +## Common Problem -![](https://alioss.timecho.com/docs/img/show%20cluster.png) +1. Activation Fails Repeatedly + 1. Use the `ls -al` command to verify that the ownership of the installation directory matches the current user. + 2. Check the ownership of all files in the `./activation` directory to ensure they belong to the current user. +2. ConfigNode Fails to Start + 1. Review the startup logs to check if any parameters, which cannot be modified after the first startup, were changed. + 2. Check the logs for any other errors. If unresolved, contact technical support for assistance. + 3. If the deployment is fresh or data can be discarded, clean the environment and redeploy using the following steps: + **Clean the Environment** -> The appearance of 'Activated (W)' indicates passive activation, indicating that this Config Node does not have a license file (or has not issued the latest license file with a timestamp). At this point, it is recommended to check if the license file has been placed in the license folder. If not, please place the license file. If a license file already exists, it may be due to inconsistency between the license file of this node and the information of other nodes. Please contact Timecho staff to reapply. +1. Stop all ConfigNode and DataNode processes: -## Common Problem -1. Multiple prompts indicating activation failure during deployment process - - Use the `ls -al` command: Use the `ls -al` command to check if the owner information of the installation package root directory is the current user. - - Check activation directory: Check all files in the `./activation` directory and whether the owner information is the current user. +```Bash +sbin/stop-standalone.sh +``` -2. Confignode failed to start +2. Check for any remaining processes: - Step 1: Please check the startup log to see if any parameters that cannot be changed after the first startup have been modified. +```Bash +jps +# or +ps -ef | grep iotdb +``` - Step 2: Please check the startup log for any other abnormalities. If there are any abnormal phenomena in the log, please contact Timecho Technical Support personnel for consultation on solutions. +3. If processes remain, terminate them manually: - Step 3: If it is the first deployment or data can be deleted, you can also clean up the environment according to the following steps, redeploy, and restart. +```Bash +kill -9 - Step 4: Clean up the environment: +#For systems with a single IoTDB instance, you can clean up residual processes with: +ps -ef | grep iotdb | grep -v grep | tr -s ' ' ' ' | cut -d ' ' -f2 | xargs kill -9 +``` - a. Terminate all ConfigNode Node and DataNode processes. - ```Bash - # 1. Stop the ConfigNode and DataNode services - sbin/stop-standalone.sh +4. Delete the `data` and `logs` directories: - # 2. Check for any remaining processes - jps - # Or - ps -ef|gerp iotdb +```Bash +cd /data/iotdb +rm -rf data logs +``` - # 3. If there are any remaining processes, manually kill the - kill -9 - # If you are sure there is only one iotdb on the machine, you can use the following command to clean up residual processes - ps -ef|grep iotdb|grep -v grep|tr -s ' ' ' ' |cut -d ' ' -f2|xargs kill -9 - ``` - b. Delete the data and logs directories. - - Explanation: Deleting the data directory is necessary, deleting the logs directory is for clean logs and is not mandatory. - - ```Bash - cd /data/iotdb - rm -rf data logs - ``` \ No newline at end of file +## Appendix + +### ConfigNode Parameters + +| Parameter | Description | **Is it required** | +| :-------- | :---------------------------------------------------------- | :----------------- | +| -d | Starts the process in daemon mode (runs in the background). | No | + +### DataNode Parameters + +| Parameter | Description | Required | +| :-------- | :----------------------------------------------------------- | :------- | +| -v | Displays version information. | No | +| -f | Runs the script in the foreground without backgrounding it. | No | +| -d | Starts the process in daemon mode (runs in the background). | No | +| -p | Specifies a file to store the process ID for process management. | No | +| -c | Specifies the path to the configuration folder; the script loads configuration files from this location. | No | +| -g | Prints detailed garbage collection (GC) information. | No | +| -H | Specifies the path for the Java heap dump file, used during JVM memory overflow. | No | +| -E | Specifies the file for JVM error logs. | No | +| -D | Defines system properties in the format `key=value`. | No | +| -X | Passes `-XX` options directly to the JVM. | No | +| -h | Displays the help instructions. | No | \ No newline at end of file diff --git a/src/UserGuide/latest/Ecosystem-Integration/Telegraf.md b/src/UserGuide/latest/Ecosystem-Integration/Telegraf.md index 1c84d9bef..f09fb025d 100644 --- a/src/UserGuide/latest/Ecosystem-Integration/Telegraf.md +++ b/src/UserGuide/latest/Ecosystem-Integration/Telegraf.md @@ -18,174 +18,5 @@ under the License. --> -# Telegraf -## 1、Product Overview - -### 1.1 Telegraf - -[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is an open-source proxy tool developed by InfluxData for collecting, processing, and transmitting metric data. - -Telegraf has the following characteristics: - -- Plugin architecture: The strength of Telegraf lies in its extensive plugin ecosystem. It supports multiple input, output, and processor plugins, and can seamlessly integrate with various data sources and targets. - - Data collection: Telegraf excels at collecting metric data from various sources, such as system metrics, logs, databases, etc. Its versatility makes it suitable for monitoring applications, infrastructure, and IoT devices. - - Output target: Once data is collected, it can be sent to various output targets, including popular databases such as InfluxDB. This flexibility allows Telegraf to adapt to different monitoring and analysis settings. -- Easy configuration: Telegraf is configured using TOML files. This simplicity enables users to easily define inputs, outputs, and processors, making customization simple and clear. -- Community and Support: As an open-source project, Telegraf benefits from an active community. Users can contribute plugins, report issues, and seek help through forums and documents. - -### 1.2 Telegraf-IoTDB Plugin - -The Telegraf IoTDB plugin can output and store monitoring information saved in Telegraf to IoTDB. The output plugin uses IoTDB sessions for connection and data writing. - -![](https://alioss.timecho.com/docs/img/telegraf-en.png) - -## 2、Installation Requirements - -Telegraf supports multiple operating systems, including Linux, Windows, and macOS. It is recommended to use 'root' administrator privileges to successfully install Telegraf. Please refer to the installation requirements for specific [Installation Requirements](https://docs.influxdata.com/telegraf/v1/install/) - -## 3、Installation Steps - -Please refer to [Installation Steps](https://docs.influxdata.com/telegraf/v1/install/) for specific installation steps - -- Note: This plugin is a built-in plugin for Telegraf and does not require secondary installation - -## 4、Instructions - -### 4.1 Set Input Source - -Find 'INPUT PLUGINS' in the' telegraf. conf 'configuration file to configure the input source. The specific configuration content is shown in the table below - -| Configuration | Description | Notes | -| ----------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| alias | Example of named plugin | | -| interval |Collect the frequency of this indicator. Ordinary plugins use a single global interval, but if the running frequency of a specific input should be lower or higher, you can configure it here` Interval can be increased to reduce data input rate limitations. | | -| precision | Overlay the settings of the 'precision' proxy. The collected indicators are rounded to the specified precision interval. When this value is set on the service input (e.g. `'statsd':`), the output database may merge multiple events that occur at the same timestamp. | | -| collection_jitter | Overlay the settings of the 'collectic_jitter' proxy. Collection jitter is used to perform random intervals` | | -| name_override | Custom time series path name used when outputting to IoTDB | The output path name must meet the "[Syntax Requirement](../Reference/Syntax-Rule.md)" requirement | -| name_prefix | Specify the prefix attached to the measurement name | | -| name_suffix | Specify the suffix attached to the measurement name | | - -![](https://alioss.timecho.com/docs/img/Telegraf_1.png) - -### 4.2 Set Output Source - -Find "outputs. iotdb" in the "telegraf. conf" configuration file to configure the output source. The specific configuration content is shown in the table below. For specific input source examples, please refer to [Output Source Example](https://docs.influxdata.com/telegraf/v1/configuration/#output-configuration-examples) - -| Configuration | Description | Before Modification | After Modification | Notes | -| ------------------- | -------------- | ----------------------------------- | ------------------------------------------------- | ------------------------------------------------------------ | -| host | Host of IoTDB | # host = "127.0.0.1" | host = "Deploy IoTDB host" | Default is 127.0.0.1 | -| port | The port number of IoTDB | # port = "6667" | port = "Port number for deploying IoTDB" | Default is 6667 | -| user | Username for IoTDB | # user = "root" | user = "Username for IoTDB" |Default as root | -| password | Password for IoTDB | # password = "root" | password= "Password for IoTDB" | Default as root | -| timestamp_precision | Timestamp accuracy | timestamp_precision = "millisecond" | timestamp_precision = "Same timestamp accuracy as IoTDB" | You can check the 'timestamp-precision' field in 'iotdb system. properties' | -| sanitize_tag | Database version | none | sanitize_tag = "0.13/1.0/1.1/1.2/1.3" | | - -![](https://alioss.timecho.com/docs/img/Telegraf_2.png) - -### 4.3 Start Telegraf Service - -```Bash -telegraf -config /path/to/telegraf.conf -``` - -## 5、Example Usage - -The following is an example of collecting CPU data using Telegraf and outputting it to IoTDB using Telegraf IoTDB. Generate configuration files using the telegraf command - -```Bash -telegraf --sample-config --input-filter cpu --output-filter iotdb > cpu_iotdb.conf -``` - -1. Modify the configuration of the input CPU plugin in cpu_iotdb. conf. Among them, the "name_ooverride" field is the custom time-series path name used when outputting to IoTDB - -```Bash -# Read metrics about cpu usage -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states - report_active = false - ## If true and the info is available then add core_id and physical_id tags - core_tags = false - name_override = "root.demo.telgraf.cpu" -``` - -2. Modify the configuration of the output iotdb plugin in cpu_iotdb. conf - -| Configuration | Description | Before Modification | After Modification | Notes | -| ------------------- | -------------- | ----------------------------------- | ------------------------------------------------- | ------------------------------------------------------------ | -| host | Host of IoTDB | # host = "127.0.0.1" | host = "Deploy IoTDB host" | Default is 127.0.0.1 | -| port | The port number of IoTDB | # port = "6667" | port = "Port number for deploying IoTDB" | Default is 6667 | -| user | Username for IoTDB | # user = "root" | user = "Username for IoTDB" |Default as root | -| password | Password for IoTDB | # password = "root" | password= "Password for IoTDB" | Default as root | -| timestamp_precision | Timestamp accuracy | timestamp_precision = "millisecond" | timestamp_precision = "Same timestamp accuracy as IoTDB" | You can check the 'timestamp-precision' field in 'iotdb system. properties' | -| sanitize_tag | Database version | none | sanitize_tag = "0.13/1.0/1.1/1.2/1.3" | | - -```Bash -# Save metrics to an IoTDB Database -[[outputs.iotdb]] - ## Configuration of IoTDB server connection - host = "127.0.0.1" - # port = "6667" - - ## Configuration of authentication - # user = "root" - # password = "root" - - ## Timeout to open a new session. - ## A value of zero means no timeout. - # timeout = "5s" - - ## Configuration of type conversion for 64-bit unsigned int - ## IoTDB currently DOES NOT support unsigned integers (version 13.x). - ## 32-bit unsigned integers are safely converted into 64-bit signed integers by the plugin, - ## however, this is not true for 64-bit values in general as overflows may occur. - ## The following setting allows to specify the handling of 64-bit unsigned integers. - ## Available values are: - ## - "int64" -- convert to 64-bit signed integers and accept overflows - ## - "int64_clip" -- convert to 64-bit signed integers and clip the values on overflow to 9,223,372,036,854,775,807 - ## - "text" -- convert to the string representation of the value - # uint64_conversion = "int64_clip" - - ## Configuration of TimeStamp - ## TimeStamp is always saved in 64bits int. timestamp_precision specifies the unit of timestamp. - ## Available value: - ## "second", "millisecond", "microsecond", "nanosecond"(default) - timestamp_precision = "millisecond" - - ## Handling of tags - ## Tags are not fully supported by IoTDB. - ## A guide with suggestions on how to handle tags can be found here: - ## https://iotdb.apache.org/UserGuide/Master/API/InfluxDB-Protocol.html - ## - ## Available values are: - ## - "fields" -- convert tags to fields in the measurement - ## - "device_id" -- attach tags to the device ID - ## - ## For Example, a metric named "root.sg.device" with the tags `tag1: "private"` and `tag2: "working"` and - ## fields `s1: 100` and `s2: "hello"` will result in the following representations in IoTDB - ## - "fields" -- root.sg.device, s1=100, s2="hello", tag1="private", tag2="working" - ## - "device_id" -- root.sg.device.private.working, s1=100, s2="hello" - # convert_tags_to = "device_id" - ## Handling of unsupported characters - ## Some characters in different versions of IoTDB are not supported in path name - ## A guide with suggetions on valid paths can be found here: - ## for iotdb 0.13.x -> https://iotdb.apache.org/UserGuide/V0.13.x/Reference/Syntax-Conventions.html#identifiers - ## for iotdb 1.x.x and above -> https://iotdb.apache.org/UserGuide/V1.3.x/User-Manual/Syntax-Rule.html#identifier - ## - ## Available values are: - ## - "1.0", "1.1", "1.2", "1.3" -- enclose in `` the world having forbidden character - ## such as @ $ # : [ ] { } ( ) space - ## - "0.13" -- enclose in `` the world having forbidden character - ## such as space - ## - ## Keep this section commented if you don't want to sanitize the path - sanitize_tag = "1.3" -``` - -3. Run Telegraf using the cpu_iotdb.exe configuration file: After running for a period of time, the data collected and reported by Telegraf can be queried in IoTDB \ No newline at end of file +Comming Soon \ No newline at end of file diff --git a/src/UserGuide/latest/Reference/Syntax-Rule.md b/src/UserGuide/latest/Reference/Syntax-Rule.md index 40d858e28..38dffc6ac 100644 --- a/src/UserGuide/latest/Reference/Syntax-Rule.md +++ b/src/UserGuide/latest/Reference/Syntax-Rule.md @@ -172,7 +172,6 @@ Below are basic constraints of identifiers, specific identifiers may have other - [0-9 a-z A-Z _ ] (letters, digits and underscore) - ['\u2E80'..'\u9FFF'] (UNICODE Chinese characters) - ### Reverse quotation marks **If the following situations occur, the identifier needs to be quoted using reverse quotes:** @@ -279,5 +278,4 @@ create device template `t1't"t` ```sql `root.db.*` - ``` - + ``` \ No newline at end of file diff --git a/src/UserGuide/latest/SQL-Manual/Operator-and-Expression.md b/src/UserGuide/latest/SQL-Manual/Operator-and-Expression.md index c6fec7f61..1b6fd667f 100644 --- a/src/UserGuide/latest/SQL-Manual/Operator-and-Expression.md +++ b/src/UserGuide/latest/SQL-Manual/Operator-and-Expression.md @@ -25,7 +25,7 @@ This chapter describes the operators and functions supported by IoTDB. IoTDB pro A list of all available functions, both built-in and custom, can be displayed with `SHOW FUNCTIONS` command. -See the documentation [Select-Expression](./Function-and-Expression.md#selector-functions) for the behavior of operators and functions in SQL. +See the documentation [Select-Expression](../SQL-Manual/Function-and-Expression.md#selector-functions) for the behavior of operators and functions in SQL. ## OPERATORS @@ -41,7 +41,7 @@ See the documentation [Select-Expression](./Function-and-Expression.md#selector- | `+` | addition | | `-` | subtraction | -For details and examples, see the document [Arithmetic Operators and Functions](./Function-and-Expression.md#arithmetic-functions). +For details and examples, see the document [Arithmetic Operators and Functions](../SQL-Manual/Function-and-Expression.md#arithmetic-functions). ### Comparison Operators @@ -64,7 +64,7 @@ For details and examples, see the document [Arithmetic Operators and Functions]( | `IN` / `CONTAINS` | is a value in the specified list | | `NOT IN` / `NOT CONTAINS` | is not a value in the specified list | -For details and examples, see the document [Comparison Operators and Functions](./Function-and-Expression.md#comparison-operators-and-functions). +For details and examples, see the document [Comparison Operators and Functions](../SQL-Manual/Function-and-Expression.md#comparison-operators-and-functions). ### Logical Operators @@ -74,7 +74,7 @@ For details and examples, see the document [Comparison Operators and Functions]( | `AND` / `&` / `&&` | logical AND | | `OR`/ | / || | logical OR | -For details and examples, see the document [Logical Operators](./Function-and-Expression.md#logical-operators). +For details and examples, see the document [Logical Operators](../SQL-Manual/Function-and-Expression.md#logical-operators). ### Operator Precedence @@ -123,7 +123,7 @@ The built-in functions can be used in IoTDB without registration, and the functi | MAX_BY | MAX_BY(x, y) returns the value of x corresponding to the maximum value of the input y. MAX_BY(time, x) returns the timestamp when x is at its maximum value. | The first input x can be of any type, while the second input y must be of type INT32, INT64, FLOAT, DOUBLE, STRING, TIMESTAMP or DATE. | / | Consistent with the data type of the first input x. | | MIN_BY | MIN_BY(x, y) returns the value of x corresponding to the minimum value of the input y. MIN_BY(time, x) returns the timestamp when x is at its minimum value. | The first input x can be of any type, while the second input y must be of type INT32, INT64, FLOAT, DOUBLE, STRING, TIMESTAMP or DATE. | / | Consistent with the data type of the first input x. | -For details and examples, see the document [Aggregate Functions](./Function-and-Expression.md#aggregate-functions). +For details and examples, see the document [Aggregate Functions](../SQL-Manual/Function-and-Expression.md#aggregate-functions). ### Arithmetic Functions @@ -150,7 +150,7 @@ For details and examples, see the document [Aggregate Functions](./Function-and- | LOG10 | INT32 / INT64 / FLOAT / DOUBLE | DOUBLE | / | Math#log10(double) | | SQRT | INT32 / INT64 / FLOAT / DOUBLE | DOUBLE | / | Math#sqrt(double) | -For details and examples, see the document [Arithmetic Operators and Functions](./Function-and-Expression.md#arithmetic-operators-and-functions). +For details and examples, see the document [Arithmetic Operators and Functions](../SQL-Manual/Function-and-Expression.md#arithmetic-operators-and-functions). ### Comparison Functions @@ -159,7 +159,7 @@ For details and examples, see the document [Arithmetic Operators and Functions]( | ON_OFF | INT32 / INT64 / FLOAT / DOUBLE | `threshold`: a double type variate | BOOLEAN | Return `ts_value >= threshold`. | | IN_RANGR | INT32 / INT64 / FLOAT / DOUBLE | `lower`: DOUBLE type `upper`: DOUBLE type | BOOLEAN | Return `ts_value >= lower && value <= upper`. | -For details and examples, see the document [Comparison Operators and Functions](./Function-and-Expression.md#comparison-operators-and-functions). +For details and examples, see the document [Comparison Operators and Functions](../SQL-Manual/Function-and-Expression.md#comparison-operators-and-functions). ### String Processing Functions @@ -179,7 +179,7 @@ For details and examples, see the document [Comparison Operators and Functions]( | TRIM | TEXT STRING | / | TEXT | Get the string whose value is same to input series, with all leading and trailing space removed. | | STRCMP | TEXT STRING | / | TEXT | Get the compare result of two input series. Returns `0` if series value are the same, a `negative integer` if value of series1 is smaller than series2,
a `positive integer` if value of series1 is more than series2. | -For details and examples, see the document [String Processing](./Function-and-Expression.md#string-processing). +For details and examples, see the document [String Processing](../SQL-Manual/Function-and-Expression.md#string-processing). ### Data Type Conversion Function @@ -187,7 +187,7 @@ For details and examples, see the document [String Processing](./Function-and-Ex | ------------- | ------------------------------------------------------------ | ----------------------- | ------------------------------------------------------------ | | CAST | `type`: Output data type, INT32 / INT64 / FLOAT / DOUBLE / BOOLEAN / TEXT | determined by `type` | Convert the data to the type specified by the `type` parameter. | -For details and examples, see the document [Data Type Conversion Function](./Function-and-Expression.md#data-type-conversion-function). +For details and examples, see the document [Data Type Conversion Function](../SQL-Manual/Function-and-Expression.md#data-type-conversion-function). ### Constant Timeseries Generating Functions @@ -197,7 +197,7 @@ For details and examples, see the document [Data Type Conversion Function](./Fun | PI | None | DOUBLE | Data point value: a `double` value of `π`, the ratio of the circumference of a circle to its diameter, which is equals to `Math.PI` in the *Java Standard Library*. | | E | None | DOUBLE | Data point value: a `double` value of `e`, the base of the natural logarithms, which is equals to `Math.E` in the *Java Standard Library*. | -For details and examples, see the document [Constant Timeseries Generating Functions](./Function-and-Expression.md#constant-timeseries-generating-functions). +For details and examples, see the document [Constant Timeseries Generating Functions](../SQL-Manual/Function-and-Expression.md#constant-timeseries-generating-functions). ### Selector Functions @@ -206,7 +206,7 @@ For details and examples, see the document [Constant Timeseries Generating Funct | TOP_K | INT32 / INT64 / FLOAT / DOUBLE / TEXT / STRING / DATE / TIEMSTAMP | `k`: the maximum number of selected data points, must be greater than 0 and less than or equal to 1000 | Same type as the input series | Returns `k` data points with the largest values in a time series. | | BOTTOM_K | INT32 / INT64 / FLOAT / DOUBLE / TEXT / STRING / DATE / TIEMSTAMP | `k`: the maximum number of selected data points, must be greater than 0 and less than or equal to 1000 | Same type as the input series | Returns `k` data points with the smallest values in a time series. | -For details and examples, see the document [Selector Functions](./Function-and-Expression.md#selector-functions). +For details and examples, see the document [Selector Functions](../SQL-Manual/Function-and-Expression.md#selector-functions). ### Continuous Interval Functions @@ -217,7 +217,7 @@ For details and examples, see the document [Selector Functions](./Function-and-E | ZERO_COUNT | INT32/ INT64/ FLOAT/ DOUBLE/ BOOLEAN | `min`:Optional with default value `1L` `max`:Optional with default value `Long.MAX_VALUE` | Long | Return intervals' start times and the number of data points in the interval in which the value is always 0(false). Data points number `n` satisfy `n >= min && n <= max` | | NON_ZERO_COUNT | INT32/ INT64/ FLOAT/ DOUBLE/ BOOLEAN | `min`:Optional with default value `1L` `max`:Optional with default value `Long.MAX_VALUE` | Long | Return intervals' start times and the number of data points in the interval in which the value is always not 0(false). Data points number `n` satisfy `n >= min && n <= max` | -For details and examples, see the document [Continuous Interval Functions](./Function-and-Expression.md#continuous-interval-functions). +For details and examples, see the document [Continuous Interval Functions](../SQL-Manual/Function-and-Expression.md#continuous-interval-functions). ### Variation Trend Calculation Functions @@ -230,7 +230,7 @@ For details and examples, see the document [Continuous Interval Functions](./Fun | NON_NEGATIVE_DERIVATIVE | INT32 / INT64 / FLOAT / DOUBLE | / | DOUBLE | Calculates the absolute value of the rate of change of a data point compared to the previous data point, the result is equals to NON_NEGATIVE_DIFFERENCE / TIME_DIFFERENCE. There is no corresponding output for the first data point. | | DIFF | INT32 / INT64 / FLOAT / DOUBLE | `ignoreNull`:optional,default is true. If is true, the previous data point is ignored when it is null and continues to find the first non-null value forwardly. If the value is false, previous data point is not ignored when it is null, the result is also null because null is used for subtraction | DOUBLE | Calculates the difference between the value of a data point and the value of the previous data point. There is no corresponding output for the first data point, so output is null | -For details and examples, see the document [Variation Trend Calculation Functions](./Function-and-Expression.md#variation-trend-calculation-functions). +For details and examples, see the document [Variation Trend Calculation Functions](../SQL-Manual/Function-and-Expression.md#variation-trend-calculation-functions). ### Sample Functions @@ -250,7 +250,7 @@ For details and examples, see the document [Sample Functions](../SQL-Manual/Func | ------------- | ------------------------------- | ------------------- | ----------------------------- | ----------------------------------------------------------- | | CHANGE_POINTS | INT32 / INT64 / FLOAT / DOUBLE | / | Same type as the input series | Remove consecutive identical values from an input sequence. | -For details and examples, see the document [Time-Series](./Function-and-Expression.md#time-series-processing). +For details and examples, see the document [Time-Series](../SQL-Manual/Function-and-Expression.md#time-series-processing). ## LAMBDA EXPRESSION @@ -259,7 +259,7 @@ For details and examples, see the document [Time-Series](./Function-and-Expressi | ------------- | ----------------------------------------------- | ------------------------------------------------------------ | ----------------------------------------------- | ------------------------------------------------------------ | | JEXL | INT32 / INT64 / FLOAT / DOUBLE / TEXT / BOOLEAN | `expr` is a lambda expression that supports standard one or multi arguments in the form `x -> {...}` or `(x, y, z) -> {...}`, e.g. `x -> {x * 2}`, `(x, y, z) -> {x + y * z}` | INT32 / INT64 / FLOAT / DOUBLE / TEXT / BOOLEAN | Returns the input time series transformed by a lambda expression | -For details and examples, see the document [Lambda](./Function-and-Expression.md#lambda-expression). +For details and examples, see the document [Lambda](../SQL-Manual/Function-and-Expression.md#lambda-expression). ## CONDITIONAL EXPRESSION @@ -267,7 +267,7 @@ For details and examples, see the document [Lambda](./Function-and-Expression.md | --------------- | -------------------- | | `CASE` | similar to "if else" | -For details and examples, see the document [Conditional Expressions](./Function-and-Expression.md#conditional-expressions). +For details and examples, see the document [Conditional Expressions](../SQL-Manual/Function-and-Expression.md#conditional-expressions). ## SELECT EXPRESSION @@ -322,7 +322,7 @@ Aggregate functions are many-to-one functions. They perform aggregate calculatio > select a, count(a) from root.sg group by ([10,100),10ms) > ``` -For the aggregation functions supported by IoTDB, see the document [Aggregate Functions](./Function-and-Expression.md#aggregate-functions). +For the aggregation functions supported by IoTDB, see the document [Aggregate Functions](../SQL-Manual/Function-and-Expression.md#aggregate-functions). #### Time Series Generation Function diff --git a/src/UserGuide/latest/SQL-Manual/SQL-Manual.md b/src/UserGuide/latest/SQL-Manual/SQL-Manual.md index 4ac977278..2a078041c 100644 --- a/src/UserGuide/latest/SQL-Manual/SQL-Manual.md +++ b/src/UserGuide/latest/SQL-Manual/SQL-Manual.md @@ -602,7 +602,7 @@ IoTDB > select avg(temperature), from root.ln.wf01.wt01; IoTDB > select avg(*), - (avg(*) + 1) * 3 / 2 -1 + (avg(*) + 1) * 3 / 2 -1 from root.sg1 IoTDB > select avg(temperature), @@ -1090,11 +1090,11 @@ select change_points(s1), change_points(s2), change_points(s3), change_points(s4 ## DATA QUALITY FUNCTION LIBRARY -For more details, see document [Operator-and-Expression](./UDF-Libraries.md). +For more details, see document [Operator-and-Expression](../SQL-Manual/UDF-Libraries.md). ### Data Quality -For details and examples, see the document [Data-Quality](./UDF-Libraries.md#data-quality). +For details and examples, see the document [Data-Quality](../SQL-Manual/UDF-Libraries.md#data-quality). ```sql # Completeness @@ -1119,7 +1119,7 @@ select Accuracy(t1,t2,t3,m1,m2,m3) from root.test ### Data Profiling -For details and examples, see the document [Data-Profiling](./UDF-Libraries.md#data-profiling). +For details and examples, see the document [Data-Profiling](../SQL-Manual/UDF-Libraries.md#data-profiling). ```sql # ACF @@ -1199,7 +1199,7 @@ select zscore(s1) from root.test ### Anomaly Detection -For details and examples, see the document [Anomaly-Detection](./UDF-Libraries.md#anomaly-detection). +For details and examples, see the document [Anomaly-Detection](../SQL-Manual/UDF-Libraries.md#anomaly-detection). ```sql # IQR @@ -1234,7 +1234,7 @@ select MasterDetect(lo,la,m_lo,m_la,model,'output_type'='anomaly','p'='3','k'='3 ### Frequency Domain -For details and examples, see the document [Frequency-Domain](./UDF-Libraries.md#frequency-domain-analysis). +For details and examples, see the document [Frequency-Domain](../SQL-Manual/UDF-Libraries.md#frequency-domain-analysis). ```sql # Conv @@ -1266,7 +1266,7 @@ select envelope(s1) from root.test.d1 ### Data Matching -For details and examples, see the document [Data-Matching](./UDF-Libraries.md#data-matching). +For details and examples, see the document [Data-Matching](../SQL-Manual/UDF-Libraries.md#data-matching). ```sql # Cov @@ -1287,7 +1287,7 @@ select xcorr(s1, s2) from root.test.d1 where time <= 2020-01-01 00:00:05 ### Data Repairing -For details and examples, see the document [Data-Repairing](./UDF-Libraries.md#data-repairing). +For details and examples, see the document [Data-Repairing](../SQL-Manual/UDF-Libraries.md#data-repairing). ```sql # TimestampRepair @@ -1312,7 +1312,7 @@ select seasonalrepair(s1,'method'='improved','period'=3) from root.test.d2 ### Series Discovery -For details and examples, see the document [Series-Discovery](./UDF-Libraries.md#series-discovery). +For details and examples, see the document [Series-Discovery](../SQL-Manual/UDF-Libraries.md#series-discovery). ```sql # ConsecutiveSequences @@ -1325,7 +1325,7 @@ select consecutivewindows(s1,s2,'length'='10m') from root.test.d1 ### Machine Learning -For details and examples, see the document [Machine-Learning](./UDF-Libraries.md#machine-learning). +For details and examples, see the document [Machine-Learning](../SQL-Manual/UDF-Libraries.md#machine-learning). ```sql # AR @@ -1340,7 +1340,7 @@ select rm(s0, s1,"tb"="3","vb"="2") from root.test.d0 ## LAMBDA EXPRESSION -For details and examples, see the document [Lambda](./UDF-Libraries.md#lambda-expression). +For details and examples, see the document [Lambda](../SQL-Manual/UDF-Libraries.md#lambda-expression). ```sql select jexl(temperature, 'expr'='x -> {x + x}') as jexl1, jexl(temperature, 'expr'='x -> {x * 3}') as jexl2, jexl(temperature, 'expr'='x -> {x * x}') as jexl3, jexl(temperature, 'expr'='x -> {multiply(x, 100)}') as jexl4, jexl(temperature, st, 'expr'='(x, y) -> {x + y}') as jexl5, jexl(temperature, st, str, 'expr'='(x, y, z) -> {x + y + z}') as jexl6 from root.ln.wf01.wt01;``` @@ -1348,7 +1348,7 @@ select jexl(temperature, 'expr'='x -> {x + x}') as jexl1, jexl(temperature, 'exp ## CONDITIONAL EXPRESSION -For details and examples, see the document [Conditional Expressions](./UDF-Libraries.md#conditional-expressions). +For details and examples, see the document [Conditional Expressions](../SQL-Manual/UDF-Libraries.md#conditional-expressions). ```sql select T, P, case @@ -1548,7 +1548,7 @@ CQs can't be altered once they're created. To change a CQ, you must `DROP` and r ## USER-DEFINED FUNCTION (UDF) -For more details, see document [Operator-and-Expression](./UDF-Libraries.md). +For more details, see document [Operator-and-Expression](../SQL-Manual/UDF-Libraries.md). ### UDF Registration diff --git a/src/UserGuide/latest/SQL-Manual/UDF-Libraries_apache.md b/src/UserGuide/latest/SQL-Manual/UDF-Libraries_apache.md index 8bab853b8..c2a0dcd54 100644 --- a/src/UserGuide/latest/SQL-Manual/UDF-Libraries_apache.md +++ b/src/UserGuide/latest/SQL-Manual/UDF-Libraries_apache.md @@ -37,7 +37,7 @@ Based on the ability of user-defined functions, IoTDB provides a series of funct | apache-UDF-1.3.2.zip | V1.0.0~V1.3.2 | Please contact Timecho for assistance| 2. Place the library-udf.jar file in the compressed file obtained in the directory `/ext/udf ` of all nodes in the IoTDB cluster -3. In the SQL operation interface of IoTDB's SQL command line terminal (CLI), execute the corresponding function registration statement as follows. +3. In the SQL operation interface of IoTDB's SQL command line terminal (CLI), execute the corresponding function registration statement as follows. 4. Batch registration: Two registration methods: registration script or SQL full statement - Register Script - Copy the registration script (register-UDF.sh or register-UDF.bat) from the compressed package to the `tools` directory of IoTDB as needed, and modify the parameters in the script (default is host=127.0.0.1, rpcPort=6667, user=root, pass=root); @@ -46,7 +46,6 @@ Based on the ability of user-defined functions, IoTDB provides a series of funct - All SQL statements - Open the SQl file in the compressed package, copy all SQL statements, and in the SQL operation interface of IoTDB's SQL command line terminal (CLI), execute all SQl statements to batch register UDFs - ## Data Quality ### Completeness diff --git a/src/UserGuide/latest/User-Manual/Data-Sync_timecho.md b/src/UserGuide/latest/User-Manual/Data-Sync_timecho.md index 4669d357f..425bf0118 100644 --- a/src/UserGuide/latest/User-Manual/Data-Sync_timecho.md +++ b/src/UserGuide/latest/User-Manual/Data-Sync_timecho.md @@ -457,6 +457,7 @@ with sink ( | Unknown | GAP‌‌ XL—GAP | No Limit | No Limit | + ### Compression Synchronization (V1.3.3+) IoTDB supports specifying data compression methods during synchronization. Real time compression and transmission of data can be achieved by configuring the `compressor` parameter. `Compressor` currently supports 5 optional algorithms: snappy/gzip/lz4/zstd/lzma2, and can choose multiple compression algorithm combinations to compress in the order of configuration `rate-limit-bytes-per-second`(supported in V1.3.3 and later versions) is the maximum number of bytes allowed to be transmitted per second, calculated as compressed bytes. If it is less than 0, there is no limit. diff --git a/src/UserGuide/latest/User-Manual/Data-subscription.md b/src/UserGuide/latest/User-Manual/Data-subscription.md index 3eefc3f8f..250115e36 100644 --- a/src/UserGuide/latest/User-Manual/Data-subscription.md +++ b/src/UserGuide/latest/User-Manual/Data-subscription.md @@ -80,6 +80,7 @@ WITH ( 'start-time' = '2023-01-01', 'end-time' = '2023-12-31' ); +``` #### 3.1.2 Delete Topic diff --git a/src/UserGuide/latest/User-Manual/IoTDB-View_timecho.md b/src/UserGuide/latest/User-Manual/IoTDB-View_timecho.md index b84bfef7a..195847395 100644 --- a/src/UserGuide/latest/User-Manual/IoTDB-View_timecho.md +++ b/src/UserGuide/latest/User-Manual/IoTDB-View_timecho.md @@ -434,8 +434,6 @@ DELETE VIEW root.view.device.avg_temperatue ### View Synchronisation - - #### If the dependent original sequence is deleted When the sequence view is queried (when the sequence is parsed), **the empty result set** is returned if the dependent time series does not exist. diff --git a/src/UserGuide/latest/User-Manual/Load-Balance.md b/src/UserGuide/latest/User-Manual/Load-Balance.md index 3453ea107..45ae3299b 100644 --- a/src/UserGuide/latest/User-Manual/Load-Balance.md +++ b/src/UserGuide/latest/User-Manual/Load-Balance.md @@ -101,4 +101,4 @@ Here is a schematic diagram of the region migration process : ```plain IoTDB> set configuration "wal_throttle_threshold_in_byte"="536870912000" Msg: The statement is executed successfully. - ``` + ``` \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/Background-knowledge/Cluster-Concept.md b/src/zh/UserGuide/Master/Table/Background-knowledge/Cluster-Concept.md index ebd6a800e..b4631022f 100644 --- a/src/zh/UserGuide/Master/Table/Background-knowledge/Cluster-Concept.md +++ b/src/zh/UserGuide/Master/Table/Background-knowledge/Cluster-Concept.md @@ -1,3 +1,6 @@ +--- +redirectTo: Cluster-Concept_apache.html +--- - -# 集群相关概念 -下图展示了一个常见的 IoTDB 3C3D1A(3 个 ConfigNode、3 个 DataNode 和 1 个 AINode)的集群部署模式: - - -其中包括了 IoTDB 集群使用中用户常接触到的几个概念,包括: -- **节点**(ConfigNode、DataNode、AINode); -- **槽**(SchemaSlot、DataSlot); -- **Region**(SchemaRegion、DataRegion); -- ***副本组***。 - -下文将重点对以上概念进行介绍。 - -## 节点 -IoTDB 集群包括三种节点(进程),**ConfigNode**(管理节点),**DataNode**(数据节点)和 **AINode**(分析节点),如下所示: -- **ConfigNode**:存储集群的配置信息、数据库的元数据、时间序列元数据和数据的路由信息,监控集群节点并实施负载均衡,所有 ConfigNode 之间互为全量备份,如上图中的 ConfigNode-1,ConfigNode-2 和 ConfigNode-3 所示。ConfigNode 不直接接收客户端读写请求,它会通过一系列[负载均衡算法](../Technical-Insider/Cluster-data-partitioning.md)对集群中元数据和数据的分布提供指导。 -- **DataNode**:负责时间序列元数据和数据的读写,每个 DataNode 都能接收客户端读写请求并提供相应服务,如上图中的 DataNode-1,DataNode-2 和 DataNode-3 所示。接收客户端读写请求时,若 DataNode 缓存有对应的路由信息,它能直接在本地执行或是转发这些请求;否则它会向 ConfigNode 询问并缓存路由信息,以加速后续请求的服务效率。 -- **AINode**:负责与 ConfigNode 和 DataNode 交互来扩展 IoTDB 集群对时间序列进行智能分析的能力,支持从外部引入已有机器学习模型进行注册,并使用注册的模型在指定时序数据上通过简单 SQL 语句完成时序分析任务的过程,将模型的创建、管理及推理融合在数据库引擎中。目前已提供常见时序分析场景(例如预测与异常检测)的机器学习算法或自研模型。 - -## 槽 -IoTDB 内部将元数据和数据划分成多个更小的、更易于管理的单元,每个单元称为一个**槽**。槽是一个逻辑概念,在 IoTDB 集群中,**元数据槽**和**数据槽**定义如下: -- **元数据槽**(SchemaSlot):一部分元数据集合,元数据槽总数固定,默认数量为 1000,IoTDB 使用哈希算法将所有设备均匀地分配到这些元数据槽中。 -- **数据槽**(DataSlot):一部分数据集合,在元数据槽的基础上,将对应设备的数据按时间范围划分为数据槽,默认的时间范围为 7 天。 - -## Region -在 IoTDB 中,元数据和数据被复制到各个 DataNode 以获得集群高可用性。然而以槽为粒度进行复制会增加集群管理成本、降低写入吞吐。因此 IoTDB 引入 **Region** 这一概念,将元数据槽和数据槽分别分配给 SchemaRegion 和 DataRegion 后,以 Region 为单位进行复制。**SchemRegion** 和 **DataRegion** 的详细定义如下: -- **SchemaRegion**:元数据存储和复制的基本单元,集群每个数据库的所有元数据槽会被均匀分配给该数据库的所有 SchemaRegion。拥有相同 RegionID 的 SchemaRegion 互为副本,如上图中 SchemaRegion-1 拥有三个副本,分别放置于 DataNode-1,DataNode-2 和 DataNode-3。 -- **DataRegion**:数据存储和复制的基本单元,集群每个数据库的所有数据槽会被均匀分配给该数据库的所有 DataRegion。拥有相同 RegionID 的 DataRegion 互为副本,如上图中 DataRegion-2 拥有两个副本,分别放置于 DataNode-1 和 DataNode-2。 - -## 副本组 -Region 的副本对集群的容灾能力至关重要。对于每个 Region 的所有副本,它们的角色分为 **leader** 和 **follower**,共同提供读写服务。不同架构下的副本组配置推荐如下: -| 类别 | 配置项 | 单机推荐配置 | 分布式推荐配置 | -| :-: | :-: | :-: | :-: | -| 元数据 | schema_replication_factor | 1 | 3 | -| 数据 | data_replication_factor | 1 | 2 | \ No newline at end of file +--> \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Table/Background-knowledge/Cluster-Concept_apache.md b/src/zh/UserGuide/Master/Table/Background-knowledge/Cluster-Concept_apache.md new file mode 100644 index 000000000..7121d0564 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/Background-knowledge/Cluster-Concept_apache.md @@ -0,0 +1,107 @@ + + +# 常见概念 + +## 数据模型相关概念 + +| 概念 | 含义 | +| ----------------------- | ------------------------------------------------------------ | +| 数据模型(sql_dialect) | IoTDB 支持两种时序数据模型(SQL语法),管理的对象均为设备和测点树:以层级路径的方式管理数据,一条路径对应一个设备的一个测点表:以关系表的方式管理数据,一张表对应一类设备 | +| 元数据(Schema) | 元数据是数据库的数据模型信息,即树形结构或表结构。包括测点的名称、数据类型等定义。 | +| 设备(Device) | 对应一个实际场景中的物理设备,通常包含多个测点。 | +| 测点(Timeseries) | 又名:物理量、时间序列、时间线、点位、信号量、指标、测量值等。是多个数据点按时间戳递增排列形成的一个时间序列。通常一个测点代表一个采集点位,能够定期采集所在环境的物理量。 | +| 编码(Encoding) | 编码是一种压缩技术,将数据以二进制的形式进行表示,可以提高存储效率。IoTDB 支持多种针对不同类型的数据的编码方法,详细信息请查看:[压缩和编码](../Technical-Insider/Encoding-and-Compression.md) | +| 压缩(Compression) | IoTDB 在数据编码后,使用压缩技术进一步压缩二进制数据,提升存储效率。IoTDB 支持多种压缩方法,详细信息请查看:[压缩和编码](../Technical-Insider/Encoding-and-Compression.md) | + +## 分布式相关概念 + +下图展示了一个常见的 IoTDB 3C3D(3 个 ConfigNode、3 个 DataNode)的集群部署模式: + + + +IoTDB 的集群包括如下常见概念: + +- 节点(ConfigNode、DataNode、AINode) +- Region(SchemaRegion、DataRegion) +- 多副本 + +下文将对以上概念进行介绍。 + + +### 节点 + +IoTDB 集群包括三种节点(进程):ConfigNode(管理节点),DataNode(数据节点)和 AINode(分析节点),如下所示: + +- ConfigNode:管理集群的节点信息、配置信息、用户权限、元数据、分区信息等,负责分布式操作的调度和负载均衡,所有 ConfigNode 之间互为全量备份,如上图中的 ConfigNode-1,ConfigNode-2 和 ConfigNode-3 所示。 +- DataNode:服务客户端请求,负责数据的存储和计算,如上图中的 DataNode-1,DataNode-2 和 DataNode-3 所示。 +- AINode:负责提供机器学习能力,支持注册已训练好的机器学习模型,并通过 SQL 调用模型进行推理,目前已内置自研时序大模型和常见的机器学习算法(如预测与异常检测)。 + +### 数据分区 + +在 IoTDB 中,元数据和数据都被分为小的分区,即 Region,由集群的各个 DataNode 进行管理。 + +- SchemaRegion:元数据分区,管理一部分设备和测点的元数据。不同 DataNode 相同 RegionID 的 SchemaRegion 互为副本,如上图中 SchemaRegion-1 拥有三个副本,分别放置于 DataNode-1,DataNode-2 和 DataNode-3。 +- DataRegion:数据分区,管理一部分设备的一段时间的数据。不同 DataNode 相同 RegionID 的 DataRegion 互为副本,如上图中 DataRegion-2 拥有两个副本,分别放置于 DataNode-1 和 DataNode-2。 +- 具体分区算法可参考:[数据分区](../Technical-Insider/Cluster-data-partitioning.md) + +### 多副本 + +数据和元数据的副本数可配置,不同部署模式下的副本数推荐如下配置,其中多副本时可提供高可用服务。 + +| 类别 | 配置项 | 单机推荐配置 | 集群推荐配置 | +| :----- | :------------------------ | :----------- | :----------- | +| 元数据 | schema_replication_factor | 1 | 3 | +| 数据 | data_replication_factor | 1 | 2 | + + +## 部署相关概念 + +IoTDB 有两种运行模式:单机模式、集群模式。 + +### 单机模式 + +IoTDB单机实例包括 1 个ConfigNode、1个DataNode,即1C1D; + +- **特点**:便于开发者安装部署,部署和维护成本较低,操作方便。 +- **适用场景**:资源有限或对高可用要求不高的场景,例如边缘端服务器。 +- **部署方法**:[单机版部署](../Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md) + + +### 集群模式 + +IoTDB 集群实例为 3 个ConfigNode 和不少于 3 个 DataNode,通常为 3 个 DataNode,即3C3D;当部分节点出现故障时,剩余节点仍然能对外提供服务,保证数据库服务的高可用性,且可随节点增加提升数据库性能。 + +- **特点**:具有高可用性、高扩展性,可通过增加 DataNode 提高系统性能。 +- **适用场景**:需要提供高可用和可靠性的企业级应用场景。 +- **部署方法**:[集群版部署](../Deployment-and-Maintenance/Cluster-Deployment_timecho.md) + +### 特点总结 + +| 维度 | 单机模式 | 集群模式 | +| ------------ | ---------------------------- | ------------------------ | +| 适用场景 | 边缘侧部署、对高可用要求不高 | 高可用性业务、容灾场景等 | +| 所需机器数量 | 1 | ≥3 | +| 安全可靠性 | 无法容忍单点故障 | 高,可容忍单点故障 | +| 扩展性 | 可扩展 DataNode 提升性能 | 可扩展 DataNode 提升性能 | +| 性能 | 可随 DataNode 数量扩展 | 可随 DataNode 数量扩展 | + +- 单机模式和集群模式,部署步骤类似(逐个增加 ConfigNode 和 DataNode),仅副本数和可提供服务的最少节点数不同。 \ No newline at end of file diff --git a/src/zh/UserGuide/V2.0.1/Table/Background-knowledge/Cluster-Concept_timecho.md b/src/zh/UserGuide/Master/Table/Background-knowledge/Cluster-Concept_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Background-knowledge/Cluster-Concept_timecho.md rename to src/zh/UserGuide/Master/Table/Background-knowledge/Cluster-Concept_timecho.md diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Data-Model-and-Terminology.md b/src/zh/UserGuide/Master/Table/Background-knowledge/Data-Model-and-Terminology.md similarity index 99% rename from src/zh/UserGuide/Master/Table/Basic-Concept/Data-Model-and-Terminology.md rename to src/zh/UserGuide/Master/Table/Background-knowledge/Data-Model-and-Terminology.md index 3f2e5c275..bf396459a 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/Data-Model-and-Terminology.md +++ b/src/zh/UserGuide/Master/Table/Background-knowledge/Data-Model-and-Terminology.md @@ -25,7 +25,7 @@ ## 1 时序数据模型 -在构建IoTDB建模方案前,需要先了解时序数据和时序数据模型,详细内容见此页面:[时序数据模型](../Basic-Concept/Navigating_Time_Series_Data.md) +在构建IoTDB建模方案前,需要先了解时序数据和时序数据模型,详细内容见此页面:[时序数据模型](../Background-knowledge/Navigating_Time_Series_Data.md) ## 2 IoTDB 的两种时序模型 diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Navigating_Time_Series_Data.md b/src/zh/UserGuide/Master/Table/Background-knowledge/Navigating_Time_Series_Data.md similarity index 100% rename from src/zh/UserGuide/Master/Table/Basic-Concept/Navigating_Time_Series_Data.md rename to src/zh/UserGuide/Master/Table/Background-knowledge/Navigating_Time_Series_Data.md diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Database-Management.md b/src/zh/UserGuide/Master/Table/Basic-Concept/Database-Management.md new file mode 100644 index 000000000..4806f6996 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/Database-Management.md @@ -0,0 +1,175 @@ + + +# 数据库管理 + +## 1. 数据库管理 + +### 1.1 创建数据库 + +用于创建数据库。 + +**语法:** + +```SQL + CREATE DATABASE (IF NOT EXISTS)? (WITH properties)? +``` + +**说明:** + +1. 数据库名称,具有以下特性: + - 大小写不敏感 + - 名称的长度不得超过 64 个字符。 + - 名称中包含下划线(_)、数字(非开头)、英文字母可以直接创建 + - 名称中包含特殊字符(如`)、中文字符、数字开头时,必须用双引号 "" 括起来。 + +2. WITH properties 子句可配置如下属性: + +> 注:属性的大小写不敏感,且暂不支持修改,有关详细信息[大小写敏感规则](../SQL-Manual/Identifier.md#大小写敏感性)。 + +| 属性 | 含义 | 默认值 | +| ------------------------- | ---------------------------------------- | --------- | +| `TTL` | 数据自动过期删除,单位 ms | INF | +| `TIME_PARTITION_INTERVAL` | 数据库的时间分区间隔,单位 ms | 604800000 | +| `SCHEMA_REGION_GROUP_NUM` | 数据库的元数据副本组数量,一般不需要修改 | 1 | +| `DATA_REGION_GROUP_NUM` | 数据库的数据副本组数量,一般不需要修改 | 2 | + +**示例:** + +```SQL +CREATE DATABASE database1; +CREATE DATABASE IF NOT EXISTS database1; + +// 创建一个名为 database1 的数据库,并将数据库的TTL时间设置为1年。 +CREATE DATABASE IF NOT EXISTS database1 with(TTL=31536000000); +``` + +### 1.2 使用数据库 + +用于指定当前数据库作为表的命名空间。 + +**语法:** + +```SQL +USE +``` + +**示例:** + +```SQL +USE database1 +``` + +### 1.3 查看当前数据库 + +返回当前会话所连接的数据库名称,若未执行过 `use`语句指定数据库,则默认为 `null`。 + +**语法:** + +```SQL +SHOW CURRENT_DATABASE +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| null| ++---------------+ + +IoTDB> USE test; + +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| iot_database| ++---------------+ +``` + +### 1.4 查看所有数据库 + +用于查看所有数据库和数据库的属性信息。 + +**语法:** + +```SQL +SHOW DATABASES (DETAILS)? +``` + +**语句返回列含义如下:** + +| 列名 | 含义 | +| ----------------------- | ------------------------------------------------------------ | +| database | database名称。 | +| TTL | 数据保留周期。如果在创建数据库的时候指定TTL,则TTL对该数据库下所有表的TTL生效。也可以再通过 [create table](#创建表) 、[alter table](#修改表) 来设置或更新表的TTL时间。 | +| SchemaReplicationFactor | 元数据副本数,用于确保元数据的高可用性。可以在`iotdb-system.properties`中修改`schema_replication_factor`配置项。 | +| DataReplicationFactor | 数据副本数,用于确保数据的高可用性。可以在`iotdb-system.properties`中修改`data_replication_factor`配置项。 | +| TimePartitionInterval | 时间分区间隔,决定了数据在磁盘上按多长时间进行目录分组,通常采用默认1周即可。 | +| SchemaRegionGroupNum | 使用`DETAILS`语句会返回此列,展示数据库的元数据副本组数量,一般不需要修改 | +| DataRegionGroupNum | 使用`DETAILS`语句会返回此列,展示数据库的数据副本组数量,一般不需要修改 | + +**示例:** + +```SQL +IoTDB> show databases ++---------+-------+-----------------------+---------------------+---------------------+ +| Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval| ++---------+-------+-----------------------+---------------------+---------------------+ +|test_prop| 300| 3| 2| 100000| +| test2| 300| 3| 2| 604800000| ++---------+-------+-----------------------+---------------------+---------------------+ +IoTDB> show databases details ++---------+-------+-----------------------+---------------------+---------------------+-----------------------+-----------------------+ +| Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum| DataRegionGroupNum| ++---------+-------+-----------------------+---------------------+---------------------+-----------------------+-----------------------+ +|test_prop| 300| 3| 2| 100000| 1| 2| +| test2| 300| 3| 2| 604800000| 1| 2| ++---------+-------+-----------------------+---------------------+---------------------+-----------------------+-----------------------+ +``` + +### 1.5 修改数据库 + +暂不支持,V2.0.2.1后支持 + +### 1.6 删除数据库 + +用于删除数据库。 + +**语法:** + +```SQL +DROP DATABASE (IF EXISTS)? +``` + +**说明:** + +1. 数据库已被设置为当前使用(use)的数据库,仍然可以被删除(drop)。 +2. 删除数据库将导致所选数据库及其内所有表连同其存储的数据一并被删除。 + +**示例:** + +```SQL +DROP DATABASE IF EXISTS database1 +``` diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Delete-Data.md b/src/zh/UserGuide/Master/Table/Basic-Concept/Delete-Data.md index d1b96f221..444f4e971 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/Delete-Data.md +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/Delete-Data.md @@ -57,7 +57,7 @@ ID_CONDITION: ### 1.2 示例: -可以在[示例数据页面](../Basic-Concept/Sample-Data.md)中导入示例数据。可以使用这些数据来测试和执行示例中的SQL语句。 +可以在[示例数据页面](../Reference/Sample-Data.md)中导入示例数据。可以使用这些数据来测试和执行示例中的SQL语句。 #### 1.2.1 删除全表数据 diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Query-Data.md b/src/zh/UserGuide/Master/Table/Basic-Concept/Query-Data.md index 169f3590b..b28ebbbce 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/Query-Data.md +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/Query-Data.md @@ -63,7 +63,7 @@ IoTDB 查询语法提供以下子句: ### 3.1 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 ### 3.2 原始数据查询 diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/TTL-Delete-Data.md b/src/zh/UserGuide/Master/Table/Basic-Concept/TTL-Delete-Data.md index 86fb6560d..729649cd4 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/TTL-Delete-Data.md +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/TTL-Delete-Data.md @@ -40,7 +40,7 @@ IoTDB支持表级的数据自动过期删除(TTL)设置,允许系统自动 ### 2.1 为表设置 TTL -如果在建表时通过sql语句设置了表的 TTL,则会以表的ttl为准。建表语句详情可见:[数据库&表管理](../Basic-Concept//Database&Table-Management.md) +如果在建表时通过sql语句设置了表的 TTL,则会以表的ttl为准。建表语句详情可见:[表管理](../Basic-Concept/Table-Management.md) 示例1:创建表时设置 TTL @@ -64,7 +64,7 @@ ALTER TABLE tableB set properties TTL=DEFAULT ### 2.2 为数据库设置 TTL -没有设置表的TTL,则会继承database的ttl。建数据库语句详情可见:[数据库&表管理](../Basic-Concept/Database&Table-Management.md) +没有设置表的TTL,则会继承database的ttl。建数据库语句详情可见:[数据库管理](../Basic-Concept/Database-Management.md) 示例4:数据库设置为 ttl =3600000,将生成一个ttl=3600000的表: @@ -100,7 +100,7 @@ ALTER TABLE tableB set properties TTL='INF' ## 4. 查看 TTL 信息 -使用 "SHOW DATABASES" 和 "SHOW TABLES" 命令可以直接显示数据库和表的 TTL 详情。数据库和表管理语句详情可见:[数据库&表管理](../Basic-Concept/Database&Table-Management.md) +使用 "SHOW DATABASES" 和 "SHOW TABLES" 命令可以直接显示数据库和表的 TTL 详情。数据库和表管理语句详情可见:[数据库管理](../Basic-Concept/Database-Management.md)、[表管理](../Basic-Concept/Table-Management.md) > 注意,树模型数据库的TTL也将显示。 diff --git a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Database&Table-Management.md b/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management.md similarity index 60% rename from src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Database&Table-Management.md rename to src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management.md index 77559792b..c2b59707a 100644 --- a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Database&Table-Management.md +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management.md @@ -19,166 +19,13 @@ --> -# 数据库&表管理 +# 表管理 -## 1. 数据库管理 +## 1. 表管理 -### 1.1 创建数据库 +### 1.1 创建表 -用于创建数据库。 - -**语法:** - -```SQL - CREATE DATABASE (IF NOT EXISTS)? (WITH properties)? -``` - -**说明:** - -1. 数据库名称,具有以下特性: - - 大小写不敏感 - - 名称的长度不得超过 64 个字符。 - - 名称中包含下划线(_)、数字(非开头)、英文字母可以直接创建 - - 名称中包含特殊字符(如`)、中文字符、数字开头时,必须用双引号 "" 括起来。 - -2. WITH properties 子句可配置如下属性: - -> 注:属性的大小写不敏感,且暂不支持修改,有关详细信息[大小写敏感规则](../SQL-Manual/Identifier.md#大小写敏感性)。 - -| 属性 | 含义 | 默认值 | -| ------------------------- | ---------------------------------------- | --------- | -| `TTL` | 数据自动过期删除,单位 ms | INF | -| `TIME_PARTITION_INTERVAL` | 数据库的时间分区间隔,单位 ms | 604800000 | -| `SCHEMA_REGION_GROUP_NUM` | 数据库的元数据副本组数量,一般不需要修改 | 1 | -| `DATA_REGION_GROUP_NUM` | 数据库的数据副本组数量,一般不需要修改 | 2 | - -**示例:** - -```SQL -CREATE DATABASE database1; -CREATE DATABASE IF NOT EXISTS database1; - -// 创建一个名为 database1 的数据库,并将数据库的TTL时间设置为1年。 -CREATE DATABASE IF NOT EXISTS database1 with(TTL=31536000000); -``` - -### 1.2 使用数据库 - -用于指定当前数据库作为表的命名空间。 - -**语法:** - -```SQL -USE -``` - -**示例:** - -```SQL -USE database1 -``` - -### 1.3 查看当前数据库 - -返回当前会话所连接的数据库名称,若未执行过 `use`语句指定数据库,则默认为 `null`。 - -**语法:** - -```SQL -SHOW CURRENT_DATABASE -``` - -**示例:** - -```SQL -IoTDB> SHOW CURRENT_DATABASE; -+---------------+ -|CurrentDatabase| -+---------------+ -| null| -+---------------+ - -IoTDB> USE test; - -IoTDB> SHOW CURRENT_DATABASE; -+---------------+ -|CurrentDatabase| -+---------------+ -| iot_database| -+---------------+ -``` - -### 1.4 查看所有数据库 - -用于查看所有数据库和数据库的属性信息。 - -**语法:** - -```SQL -SHOW DATABASES (DETAILS)? -``` - -**语句返回列含义如下:** - -| 列名 | 含义 | -| ----------------------- | ------------------------------------------------------------ | -| database | database名称。 | -| TTL | 数据保留周期。如果在创建数据库的时候指定TTL,则TTL对该数据库下所有表的TTL生效。也可以再通过 [create table](#创建表) 、[alter table](#修改表) 来设置或更新表的TTL时间。 | -| SchemaReplicationFactor | 元数据副本数,用于确保元数据的高可用性。可以在`iotdb-system.properties`中修改`schema_replication_factor`配置项。 | -| DataReplicationFactor | 数据副本数,用于确保数据的高可用性。可以在`iotdb-system.properties`中修改`data_replication_factor`配置项。 | -| TimePartitionInterval | 时间分区间隔,决定了数据在磁盘上按多长时间进行目录分组,通常采用默认1周即可。 | -| SchemaRegionGroupNum | 使用`DETAILS`语句会返回此列,展示数据库的元数据副本组数量,一般不需要修改 | -| DataRegionGroupNum | 使用`DETAILS`语句会返回此列,展示数据库的数据副本组数量,一般不需要修改 | - -**示例:** - -```SQL -IoTDB> show databases -+---------+-------+-----------------------+---------------------+---------------------+ -| Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval| -+---------+-------+-----------------------+---------------------+---------------------+ -|test_prop| 300| 3| 2| 100000| -| test2| 300| 3| 2| 604800000| -+---------+-------+-----------------------+---------------------+---------------------+ -IoTDB> show databases details -+---------+-------+-----------------------+---------------------+---------------------+-----------------------+-----------------------+ -| Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum| DataRegionGroupNum| -+---------+-------+-----------------------+---------------------+---------------------+-----------------------+-----------------------+ -|test_prop| 300| 3| 2| 100000| 1| 2| -| test2| 300| 3| 2| 604800000| 1| 2| -+---------+-------+-----------------------+---------------------+---------------------+-----------------------+-----------------------+ -``` - -### 1.5 修改数据库 - -暂不支持,V2.0.2.1后支持 - -### 1.6 删除数据库 - -用于删除数据库。 - -**语法:** - -```SQL -DROP DATABASE (IF EXISTS)? -``` - -**说明:** - -1. 数据库已被设置为当前使用(use)的数据库,仍然可以被删除(drop)。 -2. 删除数据库将导致所选数据库及其内所有表连同其存储的数据一并被删除。 - -**示例:** - -```SQL -DROP DATABASE IF EXISTS database1 -``` - -## 2. 表管理 - -### 2.1 创建表 - -#### 2.1.1 通过 Create 语句手动创建表 +#### 1.1.1 通过 Create 语句手动创建表 用于在当前数据库中创建表,也可以对任何指定数据库创建表,格式为“数据库名.表名”。 @@ -247,7 +94,7 @@ CREATE TABLE tableC ( ) with (TTL=DEFAULT); ``` -#### 2.1.2 通过 Session 写入自动创建表 +#### 1.1.2 通过 Session 写入自动创建表 在通过 Session 进行数据写入时,IoTDB 能够根据写入请求中的信息自动构建表结构,无需用户事先手动创建表即可直接执行数据写入操作。 @@ -323,7 +170,7 @@ IoTDB> desc table1 +-----------+---------+-----------+ ``` -### 2.2 查看表 +### 1.2 查看表 用于查看该数据库中或指定数据库中的所有表和表库的属性信息。 @@ -362,7 +209,7 @@ IoTDB> show tables details from test_db +---------+-------+----------+ ``` -### 2.3 查看表的列 +### 1.3 查看表的列 用于查看表的列名、数据类型、类别、状态。 @@ -403,7 +250,7 @@ IoTDB> desc tableB details +----------+---------+-----------+----------+ ``` -### 2.4 修改表 +### 1.4 修改表 用于修改表,包括添加列、删除列以及设置表的属性。 @@ -428,7 +275,7 @@ ALTER TABLE tableB ADD COLUMN IF NOT EXISTS a TAG ALTER TABLE tableB set properties TTL=3600 ``` -### 2.5 删除表 +### 1.5 删除表 用于删除表。 diff --git a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md new file mode 100644 index 000000000..b66dfa675 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md @@ -0,0 +1,326 @@ + +# 集群版安装部署 + +本小节描述如何手动部署包括3个ConfigNode和3个DataNode的实例,即通常所说的3C3D集群。 + +
+ +
+ +## 1 注意事项 + +1. 安装前请确认系统已参照[系统配置](../Deployment-and-Maintenance/Environment-Requirements.md)准备完成。 + +2. 推荐使用`hostname`进行IP配置,可避免后期修改主机ip导致数据库无法启动的问题。设置hostname需要在服务器上配`/etc/hosts`,如本机ip是11.101.17.224,hostname是iotdb-1,则可以使用以下命令设置服务器的 hostname,并使用hostname配置IoTDB的`cn_internal_address`、`dn_internal_address`。 + + ```shell + echo "11.101.17.224 iotdb-1" >> /etc/hosts + ``` + +3. 有些参数首次启动后不能修改,请参考下方的[参数配置](#参数配置)章节来进行设置。 + +4. 无论是在linux还是windows中,请确保IoTDB的安装路径中不含空格和中文,避免软件运行异常。 + +5. 请注意,安装部署(包括激活和使用软件)IoTDB时,您可以: + +- 使用 root 用户(推荐):可以避免权限等问题。 + +- 使用固定的非 root 用户: + + - 使用同一用户操作:确保在启动、激活、停止等操作均保持使用同一用户,不要切换用户。 + + - 避免使用 sudo:使用 sudo 命令会以 root 用户权限执行命令,可能会引起权限混淆或安全问题。 + +6. 推荐部署监控面板,可以对重要运行指标进行监控,随时掌握数据库运行状态,监控面板可以联系商务获取,部署监控面板步骤可以参考:[监控面板部署](./Monitoring-panel-deployment.md) + +## 2 准备步骤 + +1. 准备IoTDB数据库安装包 :timechodb-{version}-bin.zip(安装包获取见:[链接](./IoTDB-Package_timecho.md)) +2. 按环境要求配置好操作系统环境(系统环境配置见:[链接](./Environment-Requirements.md)) + +## 3 安装步骤 + +假设现在有3台linux服务器,IP地址和服务角色分配如下: + +| 节点ip | 主机名 | 服务 | +| ------------- | ------- | -------------------- | +| 11.101.17.224 | iotdb-1 | ConfigNode、DataNode | +| 11.101.17.225 | iotdb-2 | ConfigNode、DataNode | +| 11.101.17.226 | iotdb-3 | ConfigNode、DataNode | + +### 3.1 设置主机名 + +在3台机器上分别配置主机名,设置主机名需要在目标服务器上配置/etc/hosts,使用如下命令: + +```shell +echo "11.101.17.224 iotdb-1" >> /etc/hosts +echo "11.101.17.225 iotdb-2" >> /etc/hosts +echo "11.101.17.226 iotdb-3" >> /etc/hosts +``` + +### 3.2 参数配置 + +解压安装包并进入安装目录 + +```shell +unzip timechodb-{version}-bin.zip +cd timechodb-{version}-bin +``` + +#### 3.2.1 环境脚本配置 + +- ./conf/confignode-env.sh配置 + +| **配置项** | **说明** | **默认值** | **推荐值** | 备注 | +| :---------- | :------------------------------------- | :--------- | :----------------------------------------------- | :----------- | +| MEMORY_SIZE | IoTDB ConfigNode节点可以使用的内存总量 | 空 | 可按需填写,填写后系统会根据填写的数值来分配内存 | 重启服务生效 | + +- ./conf/datanode-env.sh配置 + +| **配置项** | **说明** | **默认值** | **推荐值** | 备注 | +| :---------- | :----------------------------------- | :--------- | :----------------------------------------------- | :----------- | +| MEMORY_SIZE | IoTDB DataNode节点可以使用的内存总量 | 空 | 可按需填写,填写后系统会根据填写的数值来分配内存 | 重启服务生效 | + +#### 3.2.2 通用配置(./conf/iotdb-system.properties) + +- 集群配置 + +| 配置项 | 说明 | 11.101.17.224 | 11.101.17.225 | 11.101.17.226 | +| ------------------------- | ---------------------------------------- | -------------- | -------------- | -------------- | +| cluster_name | 集群名称 | defaultCluster | defaultCluster | defaultCluster | +| schema_replication_factor | 元数据副本数,DataNode数量不应少于此数目 | 3 | 3 | 3 | +| data_replication_factor | 数据副本数,DataNode数量不应少于此数目 | 2 | 2 | 2 | + +#### 3.2.3 ConfigNode 配置 + +| 配置项 | 说明 | 默认 | 推荐值 | 11.101.17.224 | 11.101.17.225 | 11.101.17.226 | 备注 | +| ------------------- | ------------------------------------------------------------ | --------------- | ------------------------------------------------------- | ------------- | ------------- | ------------- | ------------------ | +| cn_internal_address | ConfigNode在集群内部通讯使用的地址 | 127.0.0.1 | 所在服务器的IPV4地址或hostname,推荐使用hostname | iotdb-1 | iotdb-2 | iotdb-3 | 首次启动后不能修改 | +| cn_internal_port | ConfigNode在集群内部通讯使用的端口 | 10710 | 10710 | 10710 | 10710 | 10710 | 首次启动后不能修改 | +| cn_consensus_port | ConfigNode副本组共识协议通信使用的端口 | 10720 | 10720 | 10720 | 10720 | 10720 | 首次启动后不能修改 | +| cn_seed_config_node | 节点注册加入集群时连接的ConfigNode 的地址,cn_internal_address:cn_internal_port | 127.0.0.1:10710 | 第一个CongfigNode的cn_internal_address:cn_internal_port | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | 首次启动后不能修改 | + +#### 3.2.4 DataNode 配置 + +| 配置项 | 说明 | 默认 | 推荐值 | 11.101.17.224 | 11.101.17.225 | 11.101.17.226 | 备注 | +| ------------------------------- | ------------------------------------------------------------ | --------------- | ------------------------------------------------------- | ------------- | ------------- | ------------- | ------------------ | +| dn_rpc_address | 客户端 RPC 服务的地址 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 重启服务生效 | +| dn_rpc_port | 客户端 RPC 服务的端口 | 6667 | 6667 | 6667 | 6667 | 6667 | 重启服务生效 | +| dn_internal_address | DataNode在集群内部通讯使用的地址 | 127.0.0.1 | 所在服务器的IPV4地址或hostname,推荐使用hostname | iotdb-1 | iotdb-2 | iotdb-3 | 首次启动后不能修改 | +| dn_internal_port | DataNode在集群内部通信使用的端口 | 10730 | 10730 | 10730 | 10730 | 10730 | 首次启动后不能修改 | +| dn_mpp_data_exchange_port | DataNode用于接收数据流使用的端口 | 10740 | 10740 | 10740 | 10740 | 10740 | 首次启动后不能修改 | +| dn_data_region_consensus_port | DataNode用于数据副本共识协议通信使用的端口 | 10750 | 10750 | 10750 | 10750 | 10750 | 首次启动后不能修改 | +| dn_schema_region_consensus_port | DataNode用于元数据副本共识协议通信使用的端口 | 10760 | 10760 | 10760 | 10760 | 10760 | 首次启动后不能修改 | +| dn_seed_config_node | 节点注册加入集群时连接的ConfigNode地址,即cn_internal_address:cn_internal_port | 127.0.0.1:10710 | 第一个CongfigNode的cn_internal_address:cn_internal_port | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | 首次启动后不能修改 | + +> ❗️注意:VSCode Remote等编辑器无自动保存配置功能,请确保修改的文件被持久化保存,否则配置项无法生效 + +### 3.3 启动ConfigNode节点 + +先启动第一个iotdb-1的confignode, 保证种子confignode节点先启动,然后依次启动第2和第3个confignode节点 + +```shell +cd sbin +./start-confignode.sh -d #“-d”参数将在后台进行启动 +``` + +如果启动失败,请参考下[常见问题](#常见问题) + +### 3.4 启动DataNode 节点 + + 分别进入iotdb的sbin目录下,依次启动3个datanode节点: + +```shell +cd sbin +./start-datanode.sh -d #-d参数将在后台进行启动 +``` + +### 3.5 验证激活 + +当看到“Result”字段状态显示为success表示激活成功 + +![](https://alioss.timecho.com/docs/img/%E9%9B%86%E7%BE%A4-%E9%AA%8C%E8%AF%81.png) + +## 4 节点维护步骤 + +### 4.1 ConfigNode节点维护 + +ConfigNode节点维护分为ConfigNode添加和移除两种操作,有两个常见使用场景: + +- 集群扩展:如集群中只有1个ConfigNode时,希望增加ConfigNode以提升ConfigNode节点高可用性,则可以添加2个ConfigNode,使得集群中有3个ConfigNode。 +- 集群故障恢复:1个ConfigNode所在机器发生故障,使得该ConfigNode无法正常运行,此时可以移除该ConfigNode,然后添加一个新的ConfigNode进入集群。 + +> ❗️注意,在完成ConfigNode节点维护后,需要保证集群中有1或者3个正常运行的ConfigNode。2个ConfigNode不具备高可用性,超过3个ConfigNode会导致性能损失。 + +#### 4.1.1 添加ConfigNode节点 + +脚本命令: + +```shell +# Linux / MacOS +# 首先切换到IoTDB根目录 +sbin/start-confignode.sh + +# Windows +# 首先切换到IoTDB根目录 +sbin/start-confignode.bat +``` + +#### 4.1.2 移除ConfigNode节点 + +首先通过CLI连接集群,通过`show confignodes`确认想要移除ConfigNode的内部地址与端口号: + +```shell +IoTDB> show confignodes ++------+-------+---------------+------------+--------+ +|NodeID| Status|InternalAddress|InternalPort| Role| ++------+-------+---------------+------------+--------+ +| 0|Running| 127.0.0.1| 10710| Leader| +| 1|Running| 127.0.0.1| 10711|Follower| +| 2|Running| 127.0.0.1| 10712|Follower| ++------+-------+---------------+------------+--------+ +Total line number = 3 +It costs 0.030s +``` + +然后使用脚本将DataNode移除。脚本命令: + +```Bash +# Linux / MacOS +sbin/remove-confignode.sh [confignode_id] +或 +./sbin/remove-confignode.sh [cn_internal_address:cn_internal_port] + +#Windows +sbin/remove-confignode.bat [confignode_id] +或 +./sbin/remove-confignode.bat [cn_internal_address:cn_internal_port] +``` + +### 4.2 DataNode节点维护 + +DataNode节点维护有两个常见场景: + +- 集群扩容:出于集群能力扩容等目的,添加新的DataNode进入集群 +- 集群故障恢复:一个DataNode所在机器出现故障,使得该DataNode无法正常运行,此时可以移除该DataNode,并添加新的DataNode进入集群 + +> ❗️注意,为了使集群能正常工作,在DataNode节点维护过程中以及维护完成后,正常运行的DataNode总数不得少于数据副本数(通常为2),也不得少于元数据副本数(通常为3)。 + +#### 4.2.1 添加DataNode节点 + +脚本命令: + +```Bash +# Linux / MacOS +# 首先切换到IoTDB根目录 +sbin/start-datanode.sh + +#Windows +# 首先切换到IoTDB根目录 +sbin/start-datanode.bat +``` + +说明:在添加DataNode后,随着新的写入到来(以及旧数据过期,如果设置了TTL),集群负载会逐渐向新的DataNode均衡,最终在所有节点上达到存算资源的均衡。 + +#### 4.2.2 移除DataNode节点 + +首先通过CLI连接集群,通过`show datanodes`确认想要移除的DataNode的RPC地址与端口号: + +```Bash +IoTDB> show datanodes ++------+-------+----------+-------+-------------+---------------+ +|NodeID| Status|RpcAddress|RpcPort|DataRegionNum|SchemaRegionNum| ++------+-------+----------+-------+-------------+---------------+ +| 1|Running| 0.0.0.0| 6667| 0| 0| +| 2|Running| 0.0.0.0| 6668| 1| 1| +| 3|Running| 0.0.0.0| 6669| 1| 0| ++------+-------+----------+-------+-------------+---------------+ +Total line number = 3 +It costs 0.110s +``` + +然后使用脚本将DataNode移除。脚本命令: + +```Bash +# Linux / MacOS +sbin/remove-datanode.sh [dn_rpc_address:dn_rpc_port] + +#Windows +sbin/remove-datanode.bat [dn_rpc_address:dn_rpc_port] +``` + +## 5 常见问题 + +1. 部署过程中多次提示激活失败 + - 使用 `ls -al` 命令:使用 `ls -al` 命令检查安装包根目录的所有者信息是否为当前用户。 + - 检查激活目录:检查 `./activation` 目录下的所有文件,所有者信息是否为当前用户。 +2. Confignode节点启动失败 + - 步骤 1: 请查看启动日志,检查是否修改了某些首次启动后不可改的参数。 + - 步骤 2: 请查看启动日志,检查是否出现其他异常。日志中若存在异常现象,请联系天谋技术支持人员咨询解决方案。 + - 步骤 3: 如果是首次部署或者数据可删除,也可按下述步骤清理环境,重新部署后,再次启动。 + - 清理环境: + + 1. 结束所有 ConfigNode 和 DataNode 进程。 + ```Bash + # 1. 停止 ConfigNode 和 DataNode 服务 + sbin/stop-standalone.sh + + # 2. 检查是否还有进程残留 + jps + # 或者 + ps -ef|gerp iotdb + + # 3. 如果有进程残留,则手动kill + kill -9 + # 如果确定机器上仅有1个iotdb,可以使用下面命令清理残留进程 + ps -ef|grep iotdb|grep -v grep|tr -s ' ' ' ' |cut -d ' ' -f2|xargs kill -9 + ``` + + 2. 删除 data 和 logs 目录。 + - 说明:删除 data 目录是必要的,删除 logs 目录是为了纯净日志,非必需。 + ```shell + cd /data/iotdb rm -rf data logs + ``` +## 6 附录 + +### 6.1 Confignode节点参数介绍 + +| 参数 | 描述 | 是否为必填项 | +| :--- | :------------------------------- | :----------- | +| -d | 以守护进程模式启动,即在后台运行 | 否 | + +### 6.2 Datanode节点参数介绍 + +| 缩写 | 描述 | 是否为必填项 | +| :--- | :--------------------------------------------- | :----------- | +| -v | 显示版本信息 | 否 | +| -f | 在前台运行脚本,不将其放到后台 | 否 | +| -d | 以守护进程模式启动,即在后台运行 | 否 | +| -p | 指定一个文件来存放进程ID,用于进程管理 | 否 | +| -c | 指定配置文件夹的路径,脚本会从这里加载配置文件 | 否 | +| -g | 打印垃圾回收(GC)的详细信息 | 否 | +| -H | 指定Java堆转储文件的路径,当JVM内存溢出时使用 | 否 | +| -E | 指定JVM错误日志文件的路径 | 否 | +| -D | 定义系统属性,格式为 key=value | 否 | +| -X | 直接传递 -XX 参数给 JVM | 否 | +| -h | 帮助指令 | 否 | + diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/IoTDB-Package_apache.md b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/IoTDB-Package_apache.md rename to src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/IoTDB-Package_apache.md diff --git a/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md new file mode 100644 index 000000000..9205ed436 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md @@ -0,0 +1,180 @@ + +# 单机版安装部署 + +本章将介绍如何启动IoTDB单机实例,IoTDB单机实例包括 1 个ConfigNode 和1个DataNode(即通常所说的1C1D)。 + +## 1 注意事项 + +1. 安装前请确认系统已参照[系统配置](../Deployment-and-Maintenance/Environment-Requirements.md)准备完成。 +2. 推荐使用`hostname`进行IP配置,可避免后期修改主机ip导致数据库无法启动的问题。设置hostname需要在服务器上配置`/etc/hosts`,如本机ip是192.168.1.3,hostname是iotdb-1,则可以使用以下命令设置服务器的 hostname,并使用hostname配置IoTDB的 `cn_internal_address`、`dn_internal_address`。 + + ```shell + echo "192.168.1.3 iotdb-1" >> /etc/hosts + ``` + +3. 部分参数首次启动后不能修改,请参考下方的[参数配置](#2参数配置)章节进行设置。 +4. 无论是在linux还是windows中,请确保IoTDB的安装路径中不含空格和中文,避免软件运行异常。 +5. 请注意,安装部署(包括激活和使用软件)IoTDB时,您可以: + - 使用 root 用户(推荐):可以避免权限等问题。 + - 使用固定的非 root 用户: + - 使用同一用户操作:确保在启动、激活、停止等操作均保持使用同一用户,不要切换用户。 + - 避免使用 sudo:使用 sudo 命令会以 root 用户权限执行命令,可能会引起权限混淆或安全问题。 +6. 推荐部署监控面板,可以对重要运行指标进行监控,随时掌握数据库运行状态,监控面板可以联系工作人员获取,部署监控面板步骤可以参考:[监控面板部署](../Deployment-and-Maintenance/Monitoring-panel-deployment.md) + +## 2 安装步骤 + +### 2.1 解压安装包并进入安装目录 + +```Plain +unzip timechodb-{version}-bin.zip +cd timechodb-{version}-bin +``` + +### 2.2 参数配置 + +#### 2.2.1 内存配置 + +- conf/confignode-env.sh(或 .bat) + +| **配置项** | **说明** | **默认值** | **推荐值** | 备注 | +| :---------- | :------------------------------------- | :--------- | :----------------------------------------------- | :----------- | +| MEMORY_SIZE | IoTDB ConfigNode节点可以使用的内存总量 | 空 | 可按需填写,填写后系统会根据填写的数值来分配内存 | 重启服务生效 | + +- conf/datanode-env.sh(或 .bat) + +| **配置项** | **说明** | **默认值** | **推荐值** | 备注 | +| :---------- | :----------------------------------- | :--------- | :----------------------------------------------- | :----------- | +| MEMORY_SIZE | IoTDB DataNode节点可以使用的内存总量 | 空 | 可按需填写,填写后系统会根据填写的数值来分配内存 | 重启服务生效 | + +#### 2.2.2 功能配置 + +系统实际生效的参数在文件 conf/iotdb-system.properties 中,启动需设置以下参数,可以从 conf/iotdb-system.properties.template 文件中查看全部参数 + +集群级功能配置 + +| **配置项** | **说明** | **默认值** | **推荐值** | 备注 | +| :------------------------ | :------------------------------- | :------------- | :----------------------------------------------- | :------------------------ | +| cluster_name | 集群名称 | defaultCluster | 可根据需要设置集群名称,如无特殊需要保持默认即可 | 首次启动后不可修改 | +| schema_replication_factor | 元数据副本数,单机版此处设置为 1 | 1 | 1 | 默认1,首次启动后不可修改 | +| data_replication_factor | 数据副本数,单机版此处设置为 1 | 1 | 1 | 默认1,首次启动后不可修改 | + +ConfigNode 配置 + +| **配置项** | **说明** | **默认** | 推荐值 | **备注** | +| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------- | :----------------- | +| cn_internal_address | ConfigNode在集群内部通讯使用的地址 | 127.0.0.1 | 所在服务器的IPV4地址或hostname,推荐使用hostname | 首次启动后不能修改 | +| cn_internal_port | ConfigNode在集群内部通讯使用的端口 | 10710 | 10710 | 首次启动后不能修改 | +| cn_consensus_port | ConfigNode副本组共识协议通信使用的端口 | 10720 | 10720 | 首次启动后不能修改 | +| cn_seed_config_node | 节点注册加入集群时连接的ConfigNode 的地址,cn_internal_address:cn_internal_port | 127.0.0.1:10710 | cn_internal_address:cn_internal_port | 首次启动后不能修改 | + +DataNode 配置 + +| **配置项** | **说明** | **默认** | 推荐值 | **备注** | +| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------- | :----------------- | +| dn_rpc_address | 客户端 RPC 服务的地址 | 0.0.0.0 | 0.0.0.0 | 重启服务生效 | +| dn_rpc_port | 客户端 RPC 服务的端口 | 6667 | 6667 | 重启服务生效 | +| dn_internal_address | DataNode在集群内部通讯使用的地址 | 127.0.0.1 | 所在服务器的IPV4地址或hostname,推荐使用hostname | 首次启动后不能修改 | +| dn_internal_port | DataNode在集群内部通信使用的端口 | 10730 | 10730 | 首次启动后不能修改 | +| dn_mpp_data_exchange_port | DataNode用于接收数据流使用的端口 | 10740 | 10740 | 首次启动后不能修改 | +| dn_data_region_consensus_port | DataNode用于数据副本共识协议通信使用的端口 | 10750 | 10750 | 首次启动后不能修改 | +| dn_schema_region_consensus_port | DataNode用于元数据副本共识协议通信使用的端口 | 10760 | 10760 | 首次启动后不能修改 | +| dn_seed_config_node | 节点注册加入集群时连接的ConfigNode地址,即cn_internal_address:cn_internal_port | 127.0.0.1:10710 | cn_internal_address:cn_internal_port | 首次启动后不能修改 | + +### 2.3 启动 ConfigNode 节点 + +进入iotdb的sbin目录下,启动confignode + +```shell +./sbin/start-confignode.sh -d #“-d”参数将在后台进行启动 +``` + +如果启动失败,请参考下方[常见问题](#常见问题)。 + +### 2.4 启动 DataNode 节点 + + 进入iotdb的sbin目录下,启动datanode: + +```shell +./sbin/start-datanode.sh -d #“-d”参数将在后台进行启动 +``` +### 2.5 验证激活 + +当看到“ClusterActivationStatus”字段状态显示为ACTIVATED表示激活成功 + +![](https://alioss.timecho.com/docs/img/%E5%8D%95%E6%9C%BA-%E9%AA%8C%E8%AF%81.png) + +## 3 常见问题 + +1. 部署过程中多次提示激活失败 + - 使用 `ls -al` 命令:使用 `ls -al` 命令检查安装包根目录的所有者信息是否为当前用户。 + - 检查激活目录:检查 `./activation` 目录下的所有文件,所有者信息是否为当前用户。 +2. Confignode节点启动失败 + - 步骤 1: 请查看启动日志,检查是否修改了某些首次启动后不可改的参数。 + - 步骤 2: 请查看启动日志,检查是否出现其他异常。日志中若存在异常现象,请联系天谋技术支持人员咨询解决方案。 + - 步骤 3: 如果是首次部署或者数据可删除,也可按下述步骤清理环境,重新部署后,再次启动。 + - 清理环境: + 1. 结束所有 ConfigNode 和 DataNode 进程。 + ```Bash + # 1. 停止 ConfigNode 和 DataNode 服务 + sbin/stop-standalone.sh + + # 2. 检查是否还有进程残留 + jps + # 或者 + ps -ef|gerp iotdb + + # 3. 如果有进程残留,则手动kill + kill -9 + # 如果确定机器上仅有1个iotdb,可以使用下面命令清理残留进程 + ps -ef|grep iotdb|grep -v grep|tr -s ' ' ' ' |cut -d ' ' -f2|xargs kill -9 + ``` + + 2. 删除 data 和 logs 目录。 + - 说明:删除 data 目录是必要的,删除 logs 目录是为了纯净日志,非必需。 + ```shell + cd /data/iotdb rm -rf data logs + ``` + +## 4 附录 + +### 4.1 Confignode节点参数介绍 + +| 参数 | 描述 | 是否为必填项 | +| :--- | :------------------------------- | :----------- | +| -d | 以守护进程模式启动,即在后台运行 | 否 | + +### 4.2 Datanode节点参数介绍 + +| 缩写 | 描述 | 是否为必填项 | +| :--- | :--------------------------------------------- | :----------- | +| -v | 显示版本信息 | 否 | +| -f | 在前台运行脚本,不将其放到后台 | 否 | +| -d | 以守护进程模式启动,即在后台运行 | 否 | +| -p | 指定一个文件来存放进程ID,用于进程管理 | 否 | +| -c | 指定配置文件夹的路径,脚本会从这里加载配置文件 | 否 | +| -g | 打印垃圾回收(GC)的详细信息 | 否 | +| -H | 指定Java堆转储文件的路径,当JVM内存溢出时使用 | 否 | +| -E | 指定JVM错误日志文件的路径 | 否 | +| -D | 定义系统属性,格式为 key=value | 否 | +| -X | 直接传递 -XX 参数给 JVM | 否 | +| -h | 帮助指令 | 否 | + diff --git a/src/zh/UserGuide/V2.0.1/Tree/IoTDB-Introduction/Release-history_apache.md b/src/zh/UserGuide/Master/Table/IoTDB-Introduction/Release-history_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/IoTDB-Introduction/Release-history_apache.md rename to src/zh/UserGuide/Master/Table/IoTDB-Introduction/Release-history_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/IoTDB-Introduction/Release-history_timecho.md b/src/zh/UserGuide/Master/Table/IoTDB-Introduction/Release-history_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/IoTDB-Introduction/Release-history_timecho.md rename to src/zh/UserGuide/Master/Table/IoTDB-Introduction/Release-history_timecho.md diff --git a/src/zh/UserGuide/Master/Table/QuickStart/QuickStart_apache.md b/src/zh/UserGuide/Master/Table/QuickStart/QuickStart_apache.md new file mode 100644 index 000000000..3dab8fe7f --- /dev/null +++ b/src/zh/UserGuide/Master/Table/QuickStart/QuickStart_apache.md @@ -0,0 +1,78 @@ + + +# 快速上手 + +本篇文档将帮助您了解快速入门 IoTDB 的方法。 + +## 如何安装部署? + +本篇文档将帮助您快速安装部署 IoTDB,您可以通过以下文档的链接快速定位到所需要查看的内容: + +1. 准备所需机器资源:IoTDB 的部署和运行需要考虑多个方面的机器资源配置。具体资源配置可查看 [资源规划](../Deployment-and-Maintenance/Database-Resources.md) + +2. 完成系统配置准备:IoTDB 的系统配置涉及多个方面,关键的系统配置介绍可查看 [系统配置](../Deployment-and-Maintenance/Environment-Requirements.md) + +3. 获取安装包:您可以在[ Apache IoTDB 官网](https://iotdb.apache.org/zh/Download/)获取获取 IoTDB 安装包。具体安装包结构可查看:[安装包获取](../Deployment-and-Maintenance/IoTDB-Package_apache.md) + +4. 安装数据库:您可以根据实际部署架构选择以下教程进行安装部署: + + - 单机版:[单机版](../Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md) + + - 集群版:[集群版](../Deployment-and-Maintenance/Cluster-Deployment_apache.md) + +> ❗️注意:目前我们仍然推荐直接在物理机/虚拟机上安装部署,如需要 docker 部署,可参考:[Docker 部署](../Deployment-and-Maintenance/Docker-Deployment_apache.md) + +## 如何使用? + +1. 数据库建模设计:数据库建模是创建数据库系统的重要步骤,它涉及到设计数据的结构和关系,以确保数据的组织方式能够满足特定应用的需求,下面的文档将会帮助您快速了解 IoTDB 的建模设计: + + - 时序概念介绍:[时序数据模型](../Background-knowledge/Navigating_Time_Series_Data.md) + + - 建模设计介绍:[建模方案设计](../Background-knowledge/Data-Model-and-Terminology.md) + + - 数据库介绍:[数据库管理](../Basic-Concept/Database-Management.md) + + - 表介绍:[表管理](../Basic-Concept/Table-Management.md) + + +2. 数据写入&更新:在数据写入&更新方面,IoTDB 提供了多种方式来插入实时数据,基本的数据写入&更新操作请查看 [数据写入&更新](../Basic-Concept/Write-Updata-Data.md) + +3. 数据查询:IoTDB 提供了丰富的数据查询功能,数据查询的基本介绍请查看 [数据查询](../Basic-Concept/Query-Data.md) + +4. 数据删除:IoTDB 提供了两种删除方式,分别为SQL语句删除与过期自动删除(TTL) + + - SQL语句删除:基本介绍请查看 [数据删除](../Basic-Concept/Delete-Data.md) + - 过期自动删除(TTL):基本介绍请查看 [过期自动删除](../Basic-Concept/TTL-Delete-Data.md) + +5. 其他进阶功能:除了数据库常见的写入、查询等功能外,IoTDB 还支持“数据同步”等功能,具体使用方法可参见具体文档: + + - 数据同步:[数据同步](../User-Manual/Data-Sync_apache.md) + +6. 应用编程接口: IoTDB 提供了多种应用编程接口(API),以便于开发者在应用程序中与 IoTDB 进行交互,目前支持[ Java 原生接口](../API/Programming-Java-Native-API.md)、[Python 原生接口](../API/Programming-Python-Native-API.md)、[JDBC](../API/Programming-JDBC.md)等,更多编程接口可参见官网【应用编程接口】其他章节 + +## 想了解更多技术细节? + +如果您想了解 IoTDB 的更多技术内幕,可以移步至下面的文档: + + - 数据分区和负载均衡:IoTDB 基于时序数据特性,精心设计了数据分区策略和负载均衡算法,提升了集群的可用性和性能,想了解更多请查看 [数据分区和负载均衡](../Technical-Insider/Cluster-data-partitioning.md) + + - 压缩&编码:IoTDB 通过多样化的编码和压缩技术,针对不同数据类型优化存储效率,想了解更多请查看 [压缩&编码](../Technical-Insider/Encoding-and-Compression.md) diff --git a/src/zh/UserGuide/Master/Table/QuickStart/QuickStart_timecho.md b/src/zh/UserGuide/Master/Table/QuickStart/QuickStart_timecho.md index f030bca42..28912cd34 100644 --- a/src/zh/UserGuide/Master/Table/QuickStart/QuickStart_timecho.md +++ b/src/zh/UserGuide/Master/Table/QuickStart/QuickStart_timecho.md @@ -52,11 +52,13 @@ 1. 数据库建模设计:数据库建模是创建数据库系统的重要步骤,它涉及到设计数据的结构和关系,以确保数据的组织方式能够满足特定应用的需求,下面的文档将会帮助您快速了解 IoTDB 的建模设计: - - 时序概念介绍:[时序数据模型](../Basic-Concept/Navigating_Time_Series_Data.md) + - 时序概念介绍:[时序数据模型](../Background-knowledge/Navigating_Time_Series_Data.md) - - 建模设计介绍:[建模方案设计](../Basic-Concept/Data-Model-and-Terminology.md) + - 建模设计介绍:[建模方案设计](../Background-knowledge/Data-Model-and-Terminology.md) - - 数据库&表介绍:[数据库&表管理](../Basic-Concept/Database&Table-Management.md) + - 数据库介绍:[数据库管理](../Basic-Concept/Database-Management.md) + + - 表介绍:[表管理](../Basic-Concept/Table-Management.md) 2. 数据写入&更新:在数据写入&更新方面,IoTDB 提供了多种方式来插入实时数据,基本的数据写入&更新操作请查看 [数据写入&更新](../Basic-Concept/Write-Updata-Data.md) diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Sample-Data.md b/src/zh/UserGuide/Master/Table/Reference/Sample-Data.md similarity index 100% rename from src/zh/UserGuide/Master/Table/Basic-Concept/Sample-Data.md rename to src/zh/UserGuide/Master/Table/Reference/Sample-Data.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Reference/Status-Codes.md b/src/zh/UserGuide/Master/Table/Reference/Status-Codes.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Reference/Status-Codes.md rename to src/zh/UserGuide/Master/Table/Reference/Status-Codes.md diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Fill-Clause.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Fill-Clause.md index e8797f836..b61f74040 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/Fill-Clause.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Fill-Clause.md @@ -89,7 +89,7 @@ IoTDB 支持以下三种空值填充方式: ## 3 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 ### 3.1 PREVIOUS 填充: diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/From-Join-Clause.md b/src/zh/UserGuide/Master/Table/SQL-Manual/From-Join-Clause.md index 47367651f..3492401ac 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/From-Join-Clause.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/From-Join-Clause.md @@ -117,7 +117,7 @@ SELECT selectExpr [, selectExpr] ... FROM [, ] ... [WHE ## 4 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 ### 4.1 From 示例 diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/GroupBy-Clause.md b/src/zh/UserGuide/Master/Table/SQL-Manual/GroupBy-Clause.md index c36c93840..f253b3d87 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/GroupBy-Clause.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/GroupBy-Clause.md @@ -195,7 +195,7 @@ It costs 0.047s ## 3 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 #### 示例 1:降采样时间序列数据 diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Having-Clause.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Having-Clause.md index d02911d4a..98412d6b2 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/Having-Clause.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Having-Clause.md @@ -37,7 +37,7 @@ HAVING booleanExpression ## 2 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 #### 示例 1:过滤计数低于特定值的设备 diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Limit-Offset-Clause.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Limit-Offset-Clause.md index 4a18e7632..30d950193 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/Limit-Offset-Clause.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Limit-Offset-Clause.md @@ -48,7 +48,7 @@ OFFSET 子句与 LIMIT 子句配合使用,用于指定查询结果跳过前 OF ## 2 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 #### 示例 1:查询设备的最新行 diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/OrderBy-Clause.md b/src/zh/UserGuide/Master/Table/SQL-Manual/OrderBy-Clause.md index f5be7a4f8..aec9e0dfb 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/OrderBy-Clause.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/OrderBy-Clause.md @@ -40,7 +40,7 @@ sortItem ## 2 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 #### 示例 1: 按时间降序查询过去一小时的数据 diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause.md index ebf97c93a..78635e7da 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Select-Clause.md @@ -45,7 +45,7 @@ selectItem ## 3 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 ### 3.1 选择列表 diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/Where-Clause.md b/src/zh/UserGuide/Master/Table/SQL-Manual/Where-Clause.md index 82c4d2cc4..58c56b34b 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/Where-Clause.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/Where-Clause.md @@ -31,7 +31,7 @@ __WHERE 子句__:用于在 SQL 查询中指定筛选条件,WHERE 子句在 FRO ## 2 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 #### 示例1:选择特定 ID 的行 diff --git a/src/zh/UserGuide/Master/Table/User-Manual/Data-Sync_apache.md b/src/zh/UserGuide/Master/Table/User-Manual/Data-Sync_apache.md new file mode 100644 index 000000000..cbad5d698 --- /dev/null +++ b/src/zh/UserGuide/Master/Table/User-Manual/Data-Sync_apache.md @@ -0,0 +1,512 @@ + + +# 数据同步 +数据同步是工业物联网的典型需求,通过数据同步机制,可实现 IoTDB 之间的数据共享,搭建完整的数据链路来满足内网外网数据互通、端边云同步、数据迁移、数据备份等需求。 + +## 1 功能概述 + +### 1.1 数据同步 + +一个数据同步任务包含 3 个阶段: + +![](https://alioss.timecho.com/docs/img/dataSync01.png) + +- 抽取(Source)阶段:该部分用于从源 IoTDB 抽取数据,在 SQL 语句中的 source 部分定义 +- 处理(Process)阶段:该部分用于处理从源 IoTDB 抽取出的数据,在 SQL 语句中的 processor 部分定义 +- 发送(Sink)阶段:该部分用于向目标 IoTDB 发送数据,在 SQL 语句中的 sink 部分定义 + +通过 SQL 语句声明式地配置 3 个部分的具体内容,可实现灵活的数据同步能力。 + +### 1.2 功能限制及说明 + +- 支持 1.x 系列版本 IoTDB 数据同步到 2.x 以及以上系列版本版本的 IoTDB。 +- 不支持 2.x 系列版本 IoTDB 数据同步到 1.x 系列版本版本的 IoTDB。 +- 在进行数据同步任务时,请避免执行任何删除操作,防止两端状态不一致。 + +## 2 使用说明 + +数据同步任务有三种状态:RUNNING、STOPPED 和 DROPPED。任务状态转换如下图所示: + +![](https://alioss.timecho.com/docs/img/Data-Sync01.png) + +创建后任务会直接启动,同时当任务发生异常停止后,系统会自动尝试重启任务。 + +提供以下 SQL 语句对同步任务进行状态管理。 + +### 2.1 创建任务 + +使用 `CREATE PIPE` 语句来创建一条数据同步任务,下列属性中`PipeId`和`sink`必填,`source`和`processor`为选填项,输入 SQL 时注意 `SOURCE`与 `SINK` 插件顺序不能替换。 + +SQL 示例如下: + +```SQL +CREATE PIPE [IF NOT EXISTS] -- PipeId 是能够唯一标定任务的名字 +-- 数据抽取插件,可选插件 +WITH SOURCE ( + [ = ,], +) +-- 数据处理插件,可选插件 +WITH PROCESSOR ( + [ = ,], +) +-- 数据连接插件,必填插件 +WITH SINK ( + [ = ,], +) +``` + +**IF NOT EXISTS 语义**:用于创建操作中,确保当指定 Pipe 不存在时,执行创建命令,防止因尝试创建已存在的 Pipe 而导致报错。 + +### 2.2 开始任务 + +创建之后,任务直接进入运行状态,不需要执行启动任务。当使用`STOP PIPE`语句停止任务时需手动使用`START PIPE`语句来启动任务,PIPE 发生异常情况停止后会自动重新启动任务,从而开始处理数据: + +```SQL +START PIPE +``` + +### 2.3 停止任务 + +停止处理数据: + +```SQL +STOP PIPE +``` + +### 2.4 删除任务 + +删除指定任务: + +```SQL +DROP PIPE [IF EXISTS] +``` + +**IF EXISTS 语义**:用于删除操作中,确保当指定 Pipe 存在时,执行删除命令,防止因尝试删除不存在的 Pipe 而导致报错。 + +删除任务不需要先停止同步任务。 + +### 2.5 查看任务 + +查看全部任务: + +```SQL +SHOW PIPES +``` + +查看指定任务: + +```SQL +SHOW PIPE +``` + + pipe 的 show pipes 结果示例: + +```SQL ++--------------------------------+-----------------------+-------+----------+-------------+-----------------------------------------------------------+----------------+-------------------+-------------------------+ +| ID| CreationTime| State|PipeSource|PipeProcessor| PipeSink|ExceptionMessage|RemainingEventCount|EstimatedRemainingSeconds| ++--------------------------------+-----------------------+-------+----------+-------------+-----------------------------------------------------------+----------------+-------------------+-------------------------+ +|59abf95db892428b9d01c5fa318014ea|2024-06-17T14:03:44.189|RUNNING| {}| {}|{sink=iotdb-thrift-sink, sink.ip=127.0.0.1, sink.port=6668}| | 128| 1.03| ++--------------------------------+-----------------------+-------+----------+-------------+-----------------------------------------------------------+----------------+-------------------+-------------------------+ +``` + +其中各列含义如下: + +- **ID**:同步任务的唯一标识符 +- **CreationTime**:同步任务的创建的时间 +- **State**:同步任务的状态 +- **PipeSource**:同步数据流的来源 +- **PipeProcessor**:同步数据流在传输过程中的处理逻辑 +- **PipeSink**:同步数据流的目的地 +- **ExceptionMessage**:显示同步任务的异常信息 +- **RemainingEventCount(统计存在延迟)**:剩余 event 数,当前数据同步任务中的所有 event 总数,包括数据同步的 event,以及系统和用户自定义的 event。 +- **EstimatedRemainingSeconds(统计存在延迟)**:剩余时间,基于当前 event 个数和 pipe 处速率,预估完成传输的剩余时间。 + +### 同步插件 + +为了使得整体架构更加灵活以匹配不同的同步场景需求,我们支持在同步任务框架中进行插件组装。系统为您预置了一些常用插件可直接使用,同时您也可以自定义 processor 插件 和 Sink 插件,并加载至 IoTDB 系统进行使用。查看系统中的插件(含自定义与内置插件)可以用以下语句: + +```SQL +SHOW PIPEPLUGINS +``` + +返回结果如下: + +```SQL +IoTDB> SHOW PIPEPLUGINS ++------------------------------+----------+--------------------------------------------------------------------------------------------------+----------------------------------------------------+ +| PluginName|PluginType| ClassName| PluginJar| ++------------------------------+----------+--------------------------------------------------------------------------------------------------+----------------------------------------------------+ +| DO-NOTHING-PROCESSOR| Builtin| org.apache.iotdb.commons.pipe.plugin.builtin.processor.donothing.DoNothingProcessor| | +| DO-NOTHING-SINK| Builtin| org.apache.iotdb.commons.pipe.plugin.builtin.connector.donothing.DoNothingConnector| | +| IOTDB-SOURCE| Builtin| org.apache.iotdb.commons.pipe.plugin.builtin.extractor.iotdb.IoTDBExtractor| | +| IOTDB-THRIFT-SINK| Builtin| org.apache.iotdb.commons.pipe.plugin.builtin.connector.iotdb.thrift.IoTDBThriftConnector| | +| IOTDB-THRIFT-SSL-SINK| Builtin| org.apache.iotdb.commons.pipe.plugin.builtin.connector.iotdb.thrift.IoTDBThriftSslConnector| | ++------------------------------+----------+--------------------------------------------------------------------------------------------------+----------------------------------------------------+ + +``` + +预置插件详细介绍如下(各插件的详细参数可参考本文[参数说明](#参考参数说明)): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
类型自定义插件插件名称介绍
source 插件不支持iotdb-source默认的 extractor 插件,用于抽取 IoTDB 历史或实时数据
processor 插件支持do-nothing-processor默认的 processor 插件,不对传入的数据做任何的处理
sink 插件支持do-nothing-sink不对发送出的数据做任何的处理
iotdb-thrift-sink默认的 sink 插件,用于 IoTDB 到 IoTDB(V2.0.0 及以上)之间的数据传输。使用 Thrift RPC 框架传输数据,多线程 async non-blocking IO 模型,传输性能高,尤其适用于目标端为分布式时的场景
iotdb-thrift-ssl-sink用于 IoTDB 与 IoTDB(V2.0.0 及以上)之间的数据传输。使用 Thrift RPC 框架传输数据,多线程 sync blocking IO 模型,适用于安全需求较高的场景
+ + +## 3 使用示例 + +### 3.1 全量数据同步 + +本例子用来演示将一个 IoTDB 的所有数据同步至另一个 IoTDB,数据链路如下图所示: + +![](https://alioss.timecho.com/docs/img/%E6%95%B0%E6%8D%AE%E5%90%8C%E6%AD%A51.png) + +在这个例子中,我们可以创建一个名为 A2B 的同步任务,用来同步 A IoTDB 到 B IoTDB 间的全量数据,这里需要用到用到 sink 的 iotdb-thrift-sink 插件(内置插件),需通过 node-urls 配置目标端 IoTDB 中 DataNode 节点的数据服务端口的 url,如下面的示例语句: + +```SQL +create pipe A2B +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6668', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +### 3.2 部分数据同步 + +本例子用来演示同步某个历史时间范围( 2023 年 8 月 23 日 8 点到 2023 年 10 月 23 日 8 点)的数据至另一个 IoTDB,数据链路如下图所示: + +![](https://alioss.timecho.com/docs/img/%E6%95%B0%E6%8D%AE%E5%90%8C%E6%AD%A51.png) + +在这个例子中,我们可以创建一个名为 A2B 的同步任务。首先我们需要在 source 中定义传输数据的范围,由于传输的是历史数据(历史数据是指同步任务创建之前存在的数据),需要配置数据的起止时间 start-time 和 end-time 以及传输的模式 mode.streaming。通过 node-urls 配置目标端 IoTDB 中 DataNode 节点的数据服务端口的 url。 + +详细语句如下: + +```SQL +create pipe A2B +WITH SOURCE ( + 'source'= 'iotdb-source', + 'mode.streaming' = 'true' -- 新插入数据(pipe创建后)的抽取模式:是否按流式抽取(false 时为批式) + 'start-time' = '2023.08.23T08:00:00+00:00', -- 同步所有数据的开始 event time,包含 start-time + 'end-time' = '2023.10.23T08:00:00+00:00' -- 同步所有数据的结束 event time,包含 end-time +) +with SINK ( + 'sink'='iotdb-thrift-async-sink', + 'node-urls' = '127.0.0.1:6668', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +### 3.3 双向数据传输 + +本例子用来演示两个 IoTDB 之间互为双活的场景,数据链路如下图所示: + +![](https://alioss.timecho.com/docs/img/1706698592139.jpg) + +在这个例子中,为了避免数据无限循环,需要将 A 和 B 上的参数`source.mode.double-living` 均设置为 `true`,表示不转发从另一 pipe 传输而来的数据。 + +详细语句如下: + +在 A IoTDB 上执行下列语句: + +```SQL +create pipe AB +with source ( + 'source.mode.double-living' ='true' --不转发由其他 Pipe 写入的数据 +) +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6668', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +在 B IoTDB 上执行下列语句: + +```SQL +create pipe BA +with source ( + 'source.mode.double-living' ='true' --不转发由其他 Pipe 写入的数据 +) +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6667', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` +### 3.4 边云数据传输 + +本例子用来演示多个 IoTDB 之间边云传输数据的场景,数据由 B 、C、D 集群分别都同步至 A 集群,数据链路如下图所示: + +![](https://alioss.timecho.com/docs/img/dataSync03.png) + +在这个例子中,为了将 B 、C、D 集群的数据同步至 A,在 BA 、CA、DA 之间的 pipe 需要配置database-name 和 table-name 限制范围,详细语句如下: + +在 B IoTDB 上执行下列语句,将 B 中数据同步至 A: + +```SQL +create pipe BA +with source ( + 'database-name'='db_b.*', -- 限制范围 + 'table-name'='.*', -- 可选择匹配所有 +) +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6667', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +在 C IoTDB 上执行下列语句,将 C 中数据同步至 A: + +```SQL +create pipe CA +with source ( + 'database-name'='db_c.*', -- 限制范围 + 'table-name'='.*', -- 可选择匹配所有 +) +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6668', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +在 D IoTDB 上执行下列语句,将 D 中数据同步至 A: + +```SQL +create pipe DA +with source ( + 'database-name'='db_d.*', -- 限制范围 + 'table-name'='.*', -- 可选择匹配所有 +) +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6669', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +### 3.5 级联数据传输 + +本例子用来演示多个 IoTDB 之间级联传输数据的场景,数据由 A 集群同步至 B 集群,再同步至 C 集群,数据链路如下图所示: + +![](https://alioss.timecho.com/docs/img/1706698610134.jpg) + +在这个例子中,为了将 A 集群的数据同步至 C,在 BC 之间的 pipe 需要将 `source.mode.double-living` 配置为`true`,详细语句如下: + +在 A IoTDB 上执行下列语句,将 A 中数据同步至 B: + +```SQL +create pipe AB +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6668', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +在 B IoTDB 上执行下列语句,将 B 中数据同步至 C: + +```SQL +create pipe BC +with source ( + 'source.mode.double-living' ='true' --不转发由其他 Pipe 写入的数据 +) +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6669', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +### 3.6 压缩同步 + +IoTDB 支持在同步过程中指定数据压缩方式。可通过配置 `compressor` 参数,实现数据的实时压缩和传输。`compressor`目前支持 snappy / gzip / lz4 / zstd / lzma2 5 种可选算法,且可以选择多种压缩算法组合,按配置的顺序进行压缩。`rate-limit-bytes-per-second`(V1.3.3 及以后版本支持)每秒最大允许传输的byte数,计算压缩后的byte,若小于0则不限制。 + +如创建一个名为 A2B 的同步任务: + +```SQL +create pipe A2B +with sink ( + 'node-urls' = '127.0.0.1:6668', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url + 'compressor' = 'snappy,lz4' -- + 'rate-limit-bytes-per-second'='1048576' -- 每秒最大允许传输的byte数 +) +``` + + +### 3.7 加密同步 + +IoTDB 支持在同步过程中使用 SSL 加密,从而在不同的 IoTDB 实例之间安全地传输数据。通过配置 SSL 相关的参数,如证书地址和密码(`ssl.trust-store-path`)、(`ssl.trust-store-pwd`)可以确保数据在同步过程中被 SSL 加密所保护。 + +如创建名为 A2B 的同步任务: + +```SQL +create pipe A2B +with sink ( + 'sink'='iotdb-thrift-ssl-sink', + 'node-urls'='127.0.0.1:6667', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url + 'ssl.trust-store-path'='pki/trusted', -- 连接目标端 DataNode 所需的 trust store 证书路径 + 'ssl.trust-store-pwd'='root' -- 连接目标端 DataNode 所需的 trust store 证书密码 +) +``` + +## 参考:注意事项 + +可通过修改 IoTDB 配置文件(`iotdb-system.properties`)以调整数据同步的参数,如同步数据存储目录等。完整配置如下:: + +```Properties +# pipe_receiver_file_dir +# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/${cn_system_dir}/pipe/receiver). +# If it is absolute, system will save the data in the exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. +# Note: If pipe_receiver_file_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. +# effectiveMode: restart +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# pipe_receiver_file_dir=data\\confignode\\system\\pipe\\receiver +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +pipe_receiver_file_dir=data/confignode/system/pipe/receiver + +#################### +### Pipe Configuration +#################### + +# Uncomment the following field to configure the pipe lib directory. +# effectiveMode: first_start +# For Windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is +# absolute. Otherwise, it is relative. +# pipe_lib_dir=ext\\pipe +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +pipe_lib_dir=ext/pipe + +# The maximum number of threads that can be used to execute the pipe subtasks in PipeSubtaskExecutor. +# The actual value will be min(pipe_subtask_executor_max_thread_num, max(1, CPU core number / 2)). +# effectiveMode: restart +# Datatype: int +pipe_subtask_executor_max_thread_num=5 + +# The connection timeout (in milliseconds) for the thrift client. +# effectiveMode: restart +# Datatype: int +pipe_sink_timeout_ms=900000 + +# The maximum number of selectors that can be used in the sink. +# Recommend to set this value to less than or equal to pipe_sink_max_client_number. +# effectiveMode: restart +# Datatype: int +pipe_sink_selector_number=4 + +# The maximum number of clients that can be used in the sink. +# effectiveMode: restart +# Datatype: int +pipe_sink_max_client_number=16 + +# The total bytes that all pipe sinks can transfer per second. +# When given a value less than or equal to 0, it means no limit. +# default value is -1, which means no limit. +# effectiveMode: hot_reload +# Datatype: double +pipe_all_sinks_rate_limit_bytes_per_second=-1 +``` + +## 参考:参数说明 + +### source 参数 + +| **参数** | **描述** | **value 取值范围** | **是否必填** | **默认取值** | +| ------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------ | ------------------------------- | +| source | iotdb-source | String: iotdb-source | 必填 | - | +| mode.streaming | 此参数指定时序数据写入的捕获来源。适用于 `mode.streaming`为 `false` 模式下的场景,决定`inclusion`中`data.insert`数据的捕获来源。提供两种捕获策略:true: 动态选择捕获的类型。系统将根据下游处理速度,自适应地选择是捕获每个写入请求还是仅捕获 TsFile 文件的封口请求。当下游处理速度快时,优先捕获写入请求以减少延迟;当处理速度慢时,仅捕获文件封口请求以避免处理堆积。这种模式适用于大多数场景,能够实现处理延迟和吞吐量的最优平衡。false:固定按批捕获方式。仅捕获 TsFile 文件的封口请求,适用于资源受限的应用场景,以降低系统负载。注意,pipe 启动时捕获的快照数据只会以文件的方式供下游处理。 | Boolean: true / false | 否 | true | +| mode.strict | 在使用 time / path / database-name / table-name 参数过滤数据时,是否需要严格按照条件筛选:`true`: 严格筛选。系统将完全按照给定条件过滤筛选被捕获的数据,确保只有符合条件的数据被选中。`false`:非严格筛选。系统在筛选被捕获的数据时可能会包含一些额外的数据,适用于性能敏感的场景,可降低 CPU 和 IO 消耗。 | Boolean: true / false | 否 | true | +| mode.snapshot | 此参数决定时序数据的捕获方式,影响`inclusion`中的`data`数据。提供两种模式:`true`:静态数据捕获。启动 pipe 时,会进行一次性的数据快照捕获。当快照数据被完全消费后,**pipe 将自动终止(DROP PIPE SQL 会自动执行)**。`false`:动态数据捕获。除了在 pipe 启动时捕获快照数据外,还会持续捕获后续的数据变更。pipe 将持续运行以处理动态数据流。 | Boolean: true / false | 否 | false | +| database-name | 当用户连接指定的 sql_dialect 为 table 时可以指定。此参数决定时序数据的捕获范围,影响`inclusion`中的`data`数据。表示要过滤的数据库的名称。它可以是具体的数据库名,也可以是 Java 风格正则表达式来匹配多个数据库。默认情况下,匹配所有的库。 | String:数据库名或数据库正则模式串,可以匹配未创建的、不存在的库 | 否 | ".*" | +| table-name | 当用户连接指定的 sql_dialect 为 table 时可以指定。此参数决定时序数据的捕获范围,影响`inclusion`中的`data`数据。表示要过滤的表的名称。它可以是具体的表名,也可以是 Java 风格正则表达式来匹配多个表。默认情况下,匹配所有的表。 | String:数据表名或数据表正则模式串,可以是未创建的、不存在的表 | 否 | ".*" | +| start-time | 此参数决定时序数据的捕获范围,影响`inclusion`中的`data`数据。当数据的 event time 大于等于该参数时,数据会被筛选出来进入流处理 pipe。 | Long: [Long.MIN_VALUE, Long.MAX_VALUE] (unix 裸时间戳)或 String:IoTDB 支持的 ISO 格式时间戳 | 否 | Long.MIN_VALUE(unix 裸时间戳) | +| end-time | 此参数决定时序数据的捕获范围,影响`inclusion`中的`data`数据。当数据的 event time 小于等于该参数时,数据会被筛选出来进入流处理 pipe。 | Long: [Long.MIN_VALUE, Long.MAX_VALUE](unix 裸时间戳)或String:IoTDB 支持的 ISO 格式时间戳 | 否 | Long.MAX_VALUE(unix 裸时间戳) | +| forwarding-pipe-requests | 是否转发由 pipe 数据同步而来的集群外的数据。一般供搭建双活集群时使用,双活集群模式下该参数为 false,以此避免无限的环形同步。 | Boolean: true / false | 否 | true | + +> 💎 **说明:数据抽取模式 mode.streaming 取值 true 和 false 的差异** +> - **true(推荐)**:该取值下,任务将对数据进行实时处理、发送,其特点是高时效、低吞吐 +> - **false**:该取值下,任务将对数据进行批量(按底层数据文件)处理、发送,其特点是低时效、高吞吐 + +### sink 参数 + +#### iotdb-thrift-sink + +| **参数** | **描述** | **value 取值范围** | **是否必填** | **默认取值** | +| --------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -------- | ------------ | +| sink | iotdb-thrift-sink 或 iotdb-thrift-async-sink | String: iotdb-thrift-sink 或 iotdb-thrift-async-sink | 必填 | - | +| node-urls | 目标端 IoTDB 任意多个 DataNode 节点的数据服务端口的 url(请注意同步任务不支持向自身服务进行转发) | String. 例:'127.0.0.1:6667,127.0.0.1:6668,127.0.0.1:6669', '127.0.0.1:6667' | 必填 | - | +| user/usename | 连接接收端使用的用户名,同步要求该用户具备相应的操作权限 | String | 选填 | root | +| password | 连接接收端使用的用户名对应的密码,同步要求该用户具备相应的操作权限 | String | 选填 | root | +| batch.enable | 是否开启日志攒批发送模式,用于提高传输吞吐,降低 IOPS | Boolean: true, false | 选填 | true | +| batch.max-delay-seconds | 在开启日志攒批发送模式时生效,表示一批数据在发送前的最长等待时间(单位:s) | Integer | 选填 | 1 | +| batch.size-bytes | 在开启日志攒批发送模式时生效,表示一批数据最大的攒批大小(单位:byte) | Long | 选填 | 16*1024*1024 | +| compressor | 所选取的 rpc 压缩算法,可配置多个,对每个请求顺序采用 | String: snappy / gzip / lz4 / zstd / lzma2 | 选填 | "" | +| compressor.zstd.level | 所选取的 rpc 压缩算法为 zstd 时,可使用该参数额外配置 zstd 算法的压缩等级 | Int: [-131072, 22] | 选填 | 3 | +| rate-limit-bytes-per-second | 每秒最大允许传输的 byte 数,计算压缩后的 byte(如压缩),若小于 0 则不限制 | Double: [Double.MIN_VALUE, Double.MAX_VALUE] | 选填 | -1 | + + +#### iotdb-thrift-ssl-sink + +| **参数** | **描述** | **value 取值范围** | **是否必填** | **默认取值** | +| --------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -------- | ------------ | +| sink | iotdb-thrift-ssl-sink | String: iotdb-thrift-ssl-sink | 必填 | - | +| node-urls | 目标端 IoTDB 任意多个 DataNode 节点的数据服务端口的 url(请注意同步任务不支持向自身服务进行转发) | String. 例:'127.0.0.1:6667,127.0.0.1:6668,127.0.0.1:6669', '127.0.0.1:6667' | 必填 | - | +| user/usename | 连接接收端使用的用户名,同步要求该用户具备相应的操作权限 | String | 选填 | root | +| password | 连接接收端使用的用户名对应的密码,同步要求该用户具备相应的操作权限 | String | 选填 | root | +| batch.enable | 是否开启日志攒批发送模式,用于提高传输吞吐,降低 IOPS | Boolean: true, false | 选填 | true | +| batch.max-delay-seconds | 在开启日志攒批发送模式时生效,表示一批数据在发送前的最长等待时间(单位:s) | Integer | 选填 | 1 | +| batch.size-bytes | 在开启日志攒批发送模式时生效,表示一批数据最大的攒批大小(单位:byte) | Long | 选填 | 16*1024*1024 | +| compressor | 所选取的 rpc 压缩算法,可配置多个,对每个请求顺序采用 | String: snappy / gzip / lz4 / zstd / lzma2 | 选填 | "" | +| compressor.zstd.level | 所选取的 rpc 压缩算法为 zstd 时,可使用该参数额外配置 zstd 算法的压缩等级 | Int: [-131072, 22] | 选填 | 3 | +| rate-limit-bytes-per-second | 每秒最大允许传输的 byte 数,计算压缩后的 byte(如压缩),若小于 0 则不限制 | Double: [Double.MIN_VALUE, Double.MAX_VALUE] | 选填 | -1 | +| ssl.trust-store-path | 连接目标端 DataNode 所需的 trust store 证书路径 | String.Example: '127.0.0.1:6667,127.0.0.1:6668,127.0.0.1:6669', '127.0.0.1:6667' | 必填 | - | +| ssl.trust-store-pwd | 连接目标端 DataNode 所需的 trust store 证书密码 | Integer | 必填 | - | \ No newline at end of file diff --git a/src/zh/UserGuide/Master/Tree/Reference/Keywords.md b/src/zh/UserGuide/Master/Tree/SQL-Manual/Keywords.md similarity index 100% rename from src/zh/UserGuide/Master/Tree/Reference/Keywords.md rename to src/zh/UserGuide/Master/Tree/SQL-Manual/Keywords.md diff --git a/src/zh/UserGuide/Master/Tree/Reference/Syntax-Rule.md b/src/zh/UserGuide/Master/Tree/SQL-Manual/Syntax-Rule.md similarity index 100% rename from src/zh/UserGuide/Master/Tree/Reference/Syntax-Rule.md rename to src/zh/UserGuide/Master/Tree/SQL-Manual/Syntax-Rule.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-CSharp-Native-API.md b/src/zh/UserGuide/V1.3.3/API/Programming-CSharp-Native-API.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-CSharp-Native-API.md rename to src/zh/UserGuide/V1.3.3/API/Programming-CSharp-Native-API.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-Cpp-Native-API.md b/src/zh/UserGuide/V1.3.3/API/Programming-Cpp-Native-API.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-Cpp-Native-API.md rename to src/zh/UserGuide/V1.3.3/API/Programming-Cpp-Native-API.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-Data-Subscription.md b/src/zh/UserGuide/V1.3.3/API/Programming-Data-Subscription.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-Data-Subscription.md rename to src/zh/UserGuide/V1.3.3/API/Programming-Data-Subscription.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-Go-Native-API.md b/src/zh/UserGuide/V1.3.3/API/Programming-Go-Native-API.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-Go-Native-API.md rename to src/zh/UserGuide/V1.3.3/API/Programming-Go-Native-API.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-JDBC.md b/src/zh/UserGuide/V1.3.3/API/Programming-JDBC.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-JDBC.md rename to src/zh/UserGuide/V1.3.3/API/Programming-JDBC.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-Java-Native-API.md b/src/zh/UserGuide/V1.3.3/API/Programming-Java-Native-API.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-Java-Native-API.md rename to src/zh/UserGuide/V1.3.3/API/Programming-Java-Native-API.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-Kafka.md b/src/zh/UserGuide/V1.3.3/API/Programming-Kafka.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-Kafka.md rename to src/zh/UserGuide/V1.3.3/API/Programming-Kafka.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-MQTT.md b/src/zh/UserGuide/V1.3.3/API/Programming-MQTT.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-MQTT.md rename to src/zh/UserGuide/V1.3.3/API/Programming-MQTT.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-NodeJS-Native-API.md b/src/zh/UserGuide/V1.3.3/API/Programming-NodeJS-Native-API.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-NodeJS-Native-API.md rename to src/zh/UserGuide/V1.3.3/API/Programming-NodeJS-Native-API.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-ODBC.md b/src/zh/UserGuide/V1.3.3/API/Programming-ODBC.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-ODBC.md rename to src/zh/UserGuide/V1.3.3/API/Programming-ODBC.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-OPC-UA_timecho.md b/src/zh/UserGuide/V1.3.3/API/Programming-OPC-UA_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-OPC-UA_timecho.md rename to src/zh/UserGuide/V1.3.3/API/Programming-OPC-UA_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-Python-Native-API.md b/src/zh/UserGuide/V1.3.3/API/Programming-Python-Native-API.md similarity index 99% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-Python-Native-API.md rename to src/zh/UserGuide/V1.3.3/API/Programming-Python-Native-API.md index 129b94e5b..a894bc1ab 100644 --- a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-Python-Native-API.md +++ b/src/zh/UserGuide/V1.3.3/API/Programming-Python-Native-API.md @@ -29,9 +29,9 @@ 首先下载包:`pip3 install apache-iotdb` -您可以从这里得到一个使用该包进行数据读写的例子:[Session Example](https://github.com/apache/iotdb/blob/rc/2.0.1/iotdb-client/client-py/session_example.py) +您可以从这里得到一个使用该包进行数据读写的例子:[Session Example](https://github.com/apache/iotdb/blob/rc/1.3.3/iotdb-client/client-py/SessionExample.py) -关于对齐时间序列读写的例子:[Aligned Timeseries Session Example](https://github.com/apache/iotdb/blob/rc/2.0.1/iotdb-client/client-py/session_aligned_timeseries_example.py) +关于对齐时间序列读写的例子:[Aligned Timeseries Session Example](https://github.com/apache/iotdb/blob/rc/1.3.3/iotdb-client/client-py/SessionAlignedTimeseriesExample.py) (您需要在文件的头部添加`import iotdb`) diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/Programming-Rust-Native-API.md b/src/zh/UserGuide/V1.3.3/API/Programming-Rust-Native-API.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/API/Programming-Rust-Native-API.md rename to src/zh/UserGuide/V1.3.3/API/Programming-Rust-Native-API.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/RestServiceV1.md b/src/zh/UserGuide/V1.3.3/API/RestServiceV1.md similarity index 99% rename from src/zh/UserGuide/V2.0.1/Tree/API/RestServiceV1.md rename to src/zh/UserGuide/V1.3.3/API/RestServiceV1.md index c1d12587b..98ef9eec4 100644 --- a/src/zh/UserGuide/V2.0.1/Tree/API/RestServiceV1.md +++ b/src/zh/UserGuide/V1.3.3/API/RestServiceV1.md @@ -23,8 +23,8 @@ IoTDB 的 RESTful 服务可用于查询、写入和管理操作,它使用 OpenAPI 标准来定义接口并生成框架。 ## 开启RESTful 服务 -RESTful 服务默认情况是关闭的 - +RESTful 服务默认情况是关闭的 + 找到IoTDB安装目录下面的`conf/iotdb-system.properties`文件,将 `enable_rest_service` 设置为 `true` 以启用该模块。 ```properties diff --git a/src/zh/UserGuide/V2.0.1/Tree/API/RestServiceV2.md b/src/zh/UserGuide/V1.3.3/API/RestServiceV2.md similarity index 99% rename from src/zh/UserGuide/V2.0.1/Tree/API/RestServiceV2.md rename to src/zh/UserGuide/V1.3.3/API/RestServiceV2.md index b572379d3..51fae7854 100644 --- a/src/zh/UserGuide/V2.0.1/Tree/API/RestServiceV2.md +++ b/src/zh/UserGuide/V1.3.3/API/RestServiceV2.md @@ -24,7 +24,7 @@ IoTDB 的 RESTful 服务可用于查询、写入和管理操作,它使用 Open ## 开启RESTful 服务 RESTful 服务默认情况是关闭的 - + 找到IoTDB安装目录下面的`conf/iotdb-system.properties`文件,将 `enable_rest_service` 设置为 `true` 以启用该模块。 ```properties diff --git a/src/zh/UserGuide/V2.0.1/Table/Background-knowledge/Cluster-Concept.md b/src/zh/UserGuide/V1.3.3/Background-knowledge/Cluster-Concept.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Background-knowledge/Cluster-Concept.md rename to src/zh/UserGuide/V1.3.3/Background-knowledge/Cluster-Concept.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Background-knowledge/Cluster-Concept_apache.md b/src/zh/UserGuide/V1.3.3/Background-knowledge/Cluster-Concept_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Background-knowledge/Cluster-Concept_apache.md rename to src/zh/UserGuide/V1.3.3/Background-knowledge/Cluster-Concept_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Background-knowledge/Cluster-Concept_timecho.md b/src/zh/UserGuide/V1.3.3/Background-knowledge/Cluster-Concept_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Background-knowledge/Cluster-Concept_timecho.md rename to src/zh/UserGuide/V1.3.3/Background-knowledge/Cluster-Concept_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Background-knowledge/Data-Type.md b/src/zh/UserGuide/V1.3.3/Background-knowledge/Data-Type.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Background-knowledge/Data-Type.md rename to src/zh/UserGuide/V1.3.3/Background-knowledge/Data-Type.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Data-Model-and-Terminology.md b/src/zh/UserGuide/V1.3.3/Basic-Concept/Data-Model-and-Terminology.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Data-Model-and-Terminology.md rename to src/zh/UserGuide/V1.3.3/Basic-Concept/Data-Model-and-Terminology.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Navigating_Time_Series_Data.md b/src/zh/UserGuide/V1.3.3/Basic-Concept/Navigating_Time_Series_Data.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Navigating_Time_Series_Data.md rename to src/zh/UserGuide/V1.3.3/Basic-Concept/Navigating_Time_Series_Data.md diff --git a/src/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata.md b/src/zh/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata.md similarity index 100% rename from src/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata.md rename to src/zh/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata_apache.md b/src/zh/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata_apache.md rename to src/zh/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata_timecho.md b/src/zh/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata_timecho.md similarity index 99% rename from src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata_timecho.md rename to src/zh/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata_timecho.md index 5ba32f82a..3c1ed63c8 100644 --- a/src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Operate-Metadata_timecho.md +++ b/src/zh/UserGuide/V1.3.3/Basic-Concept/Operate-Metadata_timecho.md @@ -19,8 +19,7 @@ --> -# 测点管理 - +# 测点管理 ## 数据库管理 数据库(Database)可以被视为关系数据库中的Database。 diff --git a/src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Query-Data.md b/src/zh/UserGuide/V1.3.3/Basic-Concept/Query-Data.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Query-Data.md rename to src/zh/UserGuide/V1.3.3/Basic-Concept/Query-Data.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Write-Delete-Data.md b/src/zh/UserGuide/V1.3.3/Basic-Concept/Write-Delete-Data.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Basic-Concept/Write-Delete-Data.md rename to src/zh/UserGuide/V1.3.3/Basic-Concept/Write-Delete-Data.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/AINode_Deployment_timecho.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/AINode_Deployment_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/AINode_Deployment_timecho.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/AINode_Deployment_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Cluster-Deployment_apache.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Cluster-Deployment_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Cluster-Deployment_apache.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Cluster-Deployment_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Cluster-Deployment_timecho.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Cluster-Deployment_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Cluster-Deployment_timecho.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Cluster-Deployment_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Database-Resources.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Database-Resources.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Database-Resources.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Database-Resources.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Docker-Deployment_apache.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Docker-Deployment_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Docker-Deployment_apache.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Docker-Deployment_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Docker-Deployment_timecho.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Docker-Deployment_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Docker-Deployment_timecho.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Docker-Deployment_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Environment-Requirements.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Environment-Requirements.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Environment-Requirements.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Environment-Requirements.md diff --git a/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/IoTDB-Package_apache.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/IoTDB-Package_apache.md new file mode 100644 index 000000000..80e7cb01b --- /dev/null +++ b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/IoTDB-Package_apache.md @@ -0,0 +1,44 @@ + +# 安装包获取 +## 安装包获取方式 + +安装包可直接在Apache IoTDB官网获取:https://iotdb.apache.org/zh/Download/ + +## 安装包结构 + +解压后安装包(`apache-iotdb--all-bin.zip`),安装包解压后目录结构如下: + +| **目录** | **类型** | **说明** | +| ---------------- | -------- | ------------------------------------------------------------ | +| conf | 文件夹 | 配置文件目录,包含 ConfigNode、DataNode、JMX 和 logback 等配置文件 | +| data | 文件夹 | 默认的数据文件目录,包含 ConfigNode 和 DataNode 的数据文件。(启动程序后才会生成该目录) | +| lib | 文件夹 | IoTDB可执行库文件目录 | +| licenses | 文件夹 | 开源社区证书文件目录 | +| logs | 文件夹 | 默认的日志文件目录,包含 ConfigNode 和 DataNode 的日志文件(启动程序后才会生成该目录) | +| sbin | 文件夹 | 主要脚本目录,包含启、停等脚本等 | +| tools | 文件夹 | 系统周边工具目录 | +| ext | 文件夹 | pipe,trigger,udf插件的相关文件(需要使用时用户自行创建) | +| LICENSE | 文件 | 证书 | +| NOTICE | 文件 | 提示 | +| README_ZH\.md | 文件 | markdown格式的中文版说明 | +| README\.md | 文件 | 使用说明 | +| RELEASE_NOTES\.md | 文件 | 版本说明 | \ No newline at end of file diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/IoTDB-Package_timecho.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/IoTDB-Package_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/IoTDB-Package_timecho.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/IoTDB-Package_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Monitoring-panel-deployment.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Monitoring-panel-deployment.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Monitoring-panel-deployment.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Monitoring-panel-deployment.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Slow-Query-Management.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Slow-Query-Management.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Slow-Query-Management.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Slow-Query-Management.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/workbench-deployment_timecho.md b/src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/workbench-deployment_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Deployment-and-Maintenance/workbench-deployment_timecho.md rename to src/zh/UserGuide/V1.3.3/Deployment-and-Maintenance/workbench-deployment_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/DBeaver.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/DBeaver.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/DBeaver.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/DBeaver.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/DataEase.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/DataEase.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/DataEase.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/DataEase.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Flink-IoTDB.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Flink-IoTDB.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Flink-IoTDB.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Flink-IoTDB.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Flink-TsFile.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Flink-TsFile.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Flink-TsFile.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Flink-TsFile.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Grafana-Connector.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Grafana-Connector.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Grafana-Connector.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Grafana-Connector.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Grafana-Plugin.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Grafana-Plugin.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Grafana-Plugin.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Grafana-Plugin.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Hive-TsFile.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Hive-TsFile.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Hive-TsFile.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Hive-TsFile.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Ignition-IoTDB-plugin_timecho.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Ignition-IoTDB-plugin_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Ignition-IoTDB-plugin_timecho.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Ignition-IoTDB-plugin_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Kubernetes_apache.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Kubernetes_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Kubernetes_apache.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Kubernetes_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Kubernetes_timecho.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Kubernetes_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Kubernetes_timecho.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Kubernetes_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/NiFi-IoTDB.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/NiFi-IoTDB.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/NiFi-IoTDB.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/NiFi-IoTDB.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Spark-IoTDB.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Spark-IoTDB.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Spark-IoTDB.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Spark-IoTDB.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Spark-TsFile.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Spark-TsFile.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Spark-TsFile.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Spark-TsFile.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Telegraf.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Telegraf.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Telegraf.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Telegraf.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Thingsboard.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Thingsboard.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Thingsboard.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Thingsboard.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Zeppelin-IoTDB_apache.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Zeppelin-IoTDB_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Zeppelin-IoTDB_apache.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Zeppelin-IoTDB_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Zeppelin-IoTDB_timecho.md b/src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Zeppelin-IoTDB_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Ecosystem-Integration/Zeppelin-IoTDB_timecho.md rename to src/zh/UserGuide/V1.3.3/Ecosystem-Integration/Zeppelin-IoTDB_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/FAQ/Frequently-asked-questions.md b/src/zh/UserGuide/V1.3.3/FAQ/Frequently-asked-questions.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/FAQ/Frequently-asked-questions.md rename to src/zh/UserGuide/V1.3.3/FAQ/Frequently-asked-questions.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/IoTDB-Introduction/IoTDB-Introduction_apache.md b/src/zh/UserGuide/V1.3.3/IoTDB-Introduction/IoTDB-Introduction_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/IoTDB-Introduction/IoTDB-Introduction_apache.md rename to src/zh/UserGuide/V1.3.3/IoTDB-Introduction/IoTDB-Introduction_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Table/IoTDB-Introduction/IoTDB-Introduction_timecho.md b/src/zh/UserGuide/V1.3.3/IoTDB-Introduction/IoTDB-Introduction_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/IoTDB-Introduction/IoTDB-Introduction_timecho.md rename to src/zh/UserGuide/V1.3.3/IoTDB-Introduction/IoTDB-Introduction_timecho.md diff --git a/src/zh/UserGuide/V1.3.3/IoTDB-Introduction/Release-history_apache.md b/src/zh/UserGuide/V1.3.3/IoTDB-Introduction/Release-history_apache.md new file mode 100644 index 000000000..bab81abd0 --- /dev/null +++ b/src/zh/UserGuide/V1.3.3/IoTDB-Introduction/Release-history_apache.md @@ -0,0 +1,162 @@ + +# 发版历史 + +## V1.3.3 + +> 发版时间:2024.11.20 +> + +V1.3.3主要新增 String、Blob、Date、Timestamp 数据类型、增加数据订阅、DataNode 主动监听并加载 TsFile,同时增加可观测性指标、发送端支持传文件至指定目录后,接收端自动加载到 IoTDB、配置文件整合、客户端查询请求负载均衡等功能,对数据库监控、性能、稳定性进行了全方位提升,并修复部分产品 bug 和性能问题。具体发布内容如下: + +- 存储模块:新增 String、Blob、Date、Timestamp 数据类型 +- 存储模块:合并模块内存控制性能提升 +- 查询模块:新增客户端查询请求负载均衡优化 +- 查询模块:新增活跃元数据统计查询 +- 查询模块:Filter 性能优化,提升聚合查询和 where 条件查询的速度 +- 数据同步:发送端支持传文件至指定目录后,接收端自动加载到 IoTDB +- 数据同步:接收端新增数据类型请求的自动转换机制 +- 数据订阅:新增数据订阅能力,支持以数据点或 TsFile 文件方式订阅数据库数据 +- 数据加载:DataNode 主动监听并加载 TsFile,同时增加可观测性指标 +- 流处理:Alter Pipe 支持 Alter Source 的能力 +- 系统模块:优化配置文件,原有配置文件三合一,降低用户操作成本 +- 系统模块:新增配置项设置接口 +- 系统模块:优化重启恢复性能,减少启动时间 +- 脚本与工具:新增元数据导入导出脚本 +- 脚本与工具:新增对 Kubernetes Helm 的支持 + +## V1.3.2 + +> 发版时间:2024.7.1 +> + +V1.3.2主要新增 explain analyze 语句分析单个 SQL 查询耗时、新增 UDAF 用户自定义聚合函数框架、元数据同步、统计指定路径下数据点数、SQL 语句导入导出脚本等功能,同时集群管理工具支持滚动升级、上传插件到整个集群,对数据库监控、性能、稳定性进行了全方位提升,并修复部分产品 bug 和性能问题。具体发布内容如下: + +- 存储模块:insertRecords 接口写入性能提升 +- 查询模块:新增 Explain Analyze 语句(监控单条 SQL 执行各阶段耗时) +- 查询模块:新增 UDAF 用户自定义聚合函数框架 +- 查询模块:新增 MaxBy/MinBy 函数,支持获取最大/小值的同时返回对应时间戳 +- 查询模块:值过滤查询性能提升 +- 数据同步:路径匹配支持通配符 +- 数据同步:支持元数据同步(含时间序列及相关属性、权限等设置) +- 流处理:增加 Alter Pipe 语句,支持热更新 Pipe 任务的插件 +- 系统模块:系统数据点数统计增加对 load TsFile 导入数据的统计 +- 脚本与工具:新增本地升级备份工具(通过硬链接对原有数据进行备份) +- 脚本与工具:新增 export-data/import-data 脚本,支持将数据导出为 CSV、TsFile 格式或 SQL 语句 +- 脚本与工具:Windows 环境支持通过窗口名区分 ConfigNode、DataNode、Cli + +## V1.3.1 + +> 发版时间:2024.4.22 +> + +V1.3.1主要新增一键启停集群脚本、一键收集实例信息脚本、多种内置函数等新特性,优化了原有数据同步、日志输出策略、查询执行过程,提升系统可观测性,并修复部分产品 bug 和性能问题。具体发布内容如下: + +- 增加一键启停集群脚本(start-all/stop-all.sh & start-all/stop-all.bat) +- 增加一键收集实例信息脚本(collect-info.sh & collect-info.bat) +- 新增标准差、方差内置聚合函数 +- 新增 tsfile 修复命令 +- Fill 子句支持设置填充超时阈值,超过时间阈值不填充 +- 数据同步简化时间范围指定方式,直接设置起止时间 +- 系统可观测性提升(增加集群节点的散度监控、分布式任务调度框架可观测性) +- 日志默认输出策略优化 +- Load TsFile 完善内存控制,覆盖全流程 +- Rest 接口(V2 版)增加列类型返回 +- 优化查询执行过程 +- 客户端自动拉取可用 DataNode 列表 + +## V1.3.0 + +> 发版时间:2024.1.1 +> + + +V1.3.0主要新增SSL通讯加密、数据同步监控项统计等新特性,优化了原有权限模块的语法和逻辑、metrics算法库性能、python客户端写入性能以及在部分查询场景下的查询效率,修复部分产品 bug 和性能问题。具体发布内容如下: + +- 安全模块:优化权限模块,支持时间序列粒度的权限控制 +- 安全模块:客户端服务器支持 SSL 通讯加密 +- 查询模块:计算类型视图支持 LAST 查询 +- 流处理:新增 pipe 相关监控指标 +- 存储模块:支持负数时间戳写入 +- 脚本与工具:load 脚本导入数据纳入数据点数监控项统计 +- 客户端模块:优化 python 客户端的性能 +- 查询模块优化 show path 返回时间长的问题 +- 查询模块:优化 explain 语句的展示结果,使展示结果对齐 +- 系统模块:环境配置脚本中增加统一内存配置项 MEMORY_SIZE +- 系统模块:配置项 target_config_node_list 更名为 seed_config_node +- 系统模块:配置项 storage_query_schema_consensus_free_memory_proportion 更名为 datanode_memory_proportion + +## V1.2.0 + +> 发版时间:2023.6.30 +> + + +V1.2.0主要增加了流处理框架、动态模板、substring/replace/round内置查询函数等新特性,增强了show region、show timeseries、show variable等内置语句功能和Session接口,同时优化了内置监控项及其实现,修复部分产品bug和性能问题。 + +- 流处理:新增流处理框架 +- 元数据模块:新增模板动态扩充功能 +- 存储模块:新增SPRINTZ和RLBE编码以及LZMA2压缩算法 +- 查询模块:新增cast、round、substr、replace内置标量函数 +- 查询模块:新增time_duration、mode内置聚合函数 +- 查询模块:SQL语句支持case when语法 +- 查询模块:SQL语句支持order by表达式 +- 接口模块:Python API支持连接分布式多个节点 +- 接口模块:Python客户端支持写入重定向 +- 接口模块:Session API增加用模板批量创建序列接口 + +## V1.1.0 + +> 发版时间:2023-04-03 +> + +V1.1.0主要改进增加了部分新特性,如支持 GROUP BY VARIATION、GROUP BY CONDITION 等分段方式、增加 DIFF、COUNT_IF 等实用函数,引入 pipeline 执行引擎进一步提升查询速度等。同时修复对齐序列 last 查询 order by timeseries、LIMIT&OFFSET 不生效、重启后元数据模版错误、删除所有 database 后创建序列错误等相关问题。 + +- 查询模块:align by device 语句支持 order by time +- 查询模块:支持 Show Queries 命令 +- 查询模块:支持 kill query 命令 +- 系统模块:show regions 支持指定特定的 database +- 系统模块:新增 SQL show variables, 可以展示当前集群参数 +- 查询模块:聚合查询支持 GROUP BY VARIATION +- 查询模块:SELECT INTO 支持特定的数据类型强转 +- 查询模块:实现内置标量函数 DIFF +- 系统模块:show regions 显示创建时间 +- 查询模块:实现内置聚合函数 COUNT_IF +- 查询模块:聚合查询支持 GROUP BY CONDITION +- 系统模块:支持修改 dn_rpc_port 和 dn_rpc_address + +## V1.0.0 + +> 发版时间:2022.12.03 +> + +V1.0.0主要修复分区计算及查询执行时的相关问题,历史快照未删除,数据查询及 SessionPool 内存使用上的相关问题等;同时改进增加部分新特性,如支持 show variables、explain align by device 等命令,完善 ExportCSV/ExportTsFile/MQTT 等功能,完善集群的启停流程、更改 IoTDB 集群默认的内部端口、新增用于区分集群的 cluster_name 属性等。 + +- 系统模块:支持分布式高可用架构 +- 系统模块:支持多副本存储 +- 系统模块:启动节点时,如果端口已被占用,则终止启动流程 +- 系统模块:支持集群管理sql +- 系统模块:支持对Confignode、Datanode进行启动、停止、移除等功能管理 +- 系统模块:可配置共识协议框架及多种共识协议:Simple、IoTConsensus、Ratis +- 系统模块:支持数据、元数据、Confignode的多副本管理 +- 查询模块:支持大规模并行处理框架MPP,提供分布式读写能力 +- 流处理模块:支持流处理框架 +- 流处理模块:支持集群间数据同步功能 \ No newline at end of file diff --git a/src/zh/UserGuide/V1.3.3/IoTDB-Introduction/Release-history_timecho.md b/src/zh/UserGuide/V1.3.3/IoTDB-Introduction/Release-history_timecho.md new file mode 100644 index 000000000..cff39be78 --- /dev/null +++ b/src/zh/UserGuide/V1.3.3/IoTDB-Introduction/Release-history_timecho.md @@ -0,0 +1,219 @@ + +# 发版历史 + +## TimechoDB(数据库内核) +### V1.3.4.1 +> 发版时间:2025.01.08 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.4.1版本新增模式匹配函数、持续优化数据订阅机制,提升稳定性、import-data/export-data 脚本扩展支持新数据类型,import-data/export-data 脚本合并同时兼容 TsFile、CSV 和 SQL 三种类型数据的导入导出等功能,同时对数据库监控、性能、稳定性进行了全方位提升。具体发布内容如下: + +- 查询模块:用户可通过配置项控制 UDF、PipePlugin、Trigger 和 AINode 通过 URI 加载 jar 包 +- 系统模块:UDF 函数拓展,新增 pattern_match 模式匹配函数 +- 数据同步:支持在发送端指定接收端鉴权信息 +- 生态集成:支持 Kubernetes Operator +- 脚本与工具:import-data/export-data 脚本扩展,支持新数据类型(字符串、大二进制对象、日期、时间戳) +- 脚本与工具:import-data/export-data 脚本迭代,同时兼容 TsFile、CSV 和 SQL 三种类型数据的导入导出 + +### V1.3.3.3 + +> 发版时间:2024.10.31 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.3.3版本增加优化重启恢复性能,减少启动时间、DataNode 主动监听并加载 TsFile,同时增加可观测性指标、发送端支持传文件至指定目录后,接收端自动加载到IoTDB、Alter Pipe 支持 Alter Source 的能力等功能,同时对数据库监控、性能、稳定性进行了全方位提升。具体发布内容如下: + +- 数据同步:接收端支持对不一致数据类型的自动转换 +- 数据同步:接收端增强可观测性,支持多个内部接口的 ops/latency 统计 +- 数据同步:opc-ua-sink 插件支持 CS 模式访问和非匿名访问方式 +- 数据订阅: SDK 支持 create if not exists 和 drop if exists 接口 +- 流处理:Alter Pipe 支持 Alter Source 的能力 +- 系统模块:新增 rest 模块的耗时监控 +- 脚本与工具:支持加载自动加载指定目录的TsFile文件 +- 脚本与工具:import-tsfile脚本扩展,支持脚本与iotdb server不在同一服务器运行 +- 脚本与工具:新增对Kubernetes Helm的支持 +- 脚本与工具:Python 客户端支持新数据类型(字符串、大二进制对象、日期、时间戳) + +### V1.3.3.2 + +> 发版时间:2024.8.15 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.3.2版本支持输出读取mods文件的耗时、输入最大顺乱序归并排序内存 以及dispatch 耗时、通过参数配置对时间分区原点的调整、支持根据 pipe 历史数据处理结束标记自动结束订阅,同时合并了模块内存控制性能提升,具体发布内容如下: + +- 查询模块:Explain Analyze 功能支持输出读取mods文件的耗时 +- 查询模块:Explain Analyze 功能支持输入最大顺乱序归并排序内存以及 dispatch 耗时 +- 存储模块:新增合并目标文件拆分功能,增加配置文件参数 +- 系统模块:支持通过参数配置对时间分区原点的调整 +- 流处理:数据订阅支持根据 pipe 历史数据处理结束标记自动结束订阅 +- 数据同步:RPC 压缩支持指定压缩等级 +- 脚本与工具:数据/元数据导出只过滤 root.__system,不对root.__systema 等开头的数据进行过滤 + +### V1.3.3.1 + +> 发版时间:2024.7.12 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.3.1版本多级存储增加限流机制、数据同步支持在发送端 sink 指定接收端使用用户名密码密码鉴权,优化了数据同步接收端一些不明确的WARN日志、重启恢复性能,减少启动时间,同时对脚本内容进行了合并,具体发布内容如下: + +- 查询模块:Filter 性能优化,提升聚合查询和where条件查询的速度 +- 查询模块:Java Session客户端查询 sql 请求均分到所有节点 +- 系统模块:将"iotdb-confignode.properties、iotdb-datanode.properties、iotdb-common.properties"配置文件合并为" iotdb-system.properties" +- 存储模块:多级存储增加限流机制 +- 数据同步:数据同步支持在发送端 sink 指定接收端使用用户名密码密码鉴权 +- 系统模块:优化重启恢复性能,减少启动时间 + +### V1.3.2.2 + +> 发版时间:2024.6.4 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.2.2 版本新增 explain analyze 语句分析单个 SQL 查询耗时、新增 UDAF 用户自定义聚合函数框架、支持磁盘空间到达设置阈值自动删除数据、元数据同步、统计指定路径下数据点数、SQL 语句导入导出脚本等功能,同时集群管理工具支持滚动升级、上传插件到整个集群,同时对数据库监控、性能、稳定性进行了全方位提升。具体发布内容如下: + +- 存储模块:insertRecords 接口写入性能提升 +- 存储模块:新增 SpaceTL 功能,支持磁盘空间到达设置阈值自动删除数据 +- 查询模块:新增 Explain Analyze 语句(监控单条 SQL 执行各阶段耗时) +- 查询模块:新增 UDAF 用户自定义聚合函数框架 +- 查询模块:UDF 新增包络解调分析 +- 查询模块:新增 MaxBy/MinBy 函数,支持获取最大/小值的同时返回对应时间戳 +- 查询模块:值过滤查询性能提升 +- 数据同步:路径匹配支持通配符 +- 数据同步:支持元数据同步(含时间序列及相关属性、权限等设置) +- 流处理:增加 Alter Pipe 语句,支持热更新 Pipe 任务的插件 +- 系统模块:系统数据点数统计增加对 load TsFile 导入数据的统计 +- 脚本与工具:新增本地升级备份工具(通过硬链接对原有数据进行备份) +- 脚本与工具:新增 export-data/import-data 脚本,支持将数据导出为 CSV、TsFile 格式或 SQL 语句 +- 脚本与工具:Windows 环境支持通过窗口名区分 ConfigNode、DataNode、Cli + +### V1.3.1.4 + +> 发版时间:2024.4.23 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.1 版本增加系统激活情况查看、内置方差/标准差聚合函数、内置Fill语句支持超时时间设置、tsfile修复命令等功能,增加一键收集实例信息脚本、一键启停集群等脚本,并对视图、流处理等功能进行优化,提升使用易用度和版本性能。具体发布内容如下: + +- 查询模块:Fill 子句支持设置填充超时阈值,超过时间阈值不填充 +- 查询模块:Rest 接口(V2 版)增加列类型返回 +- 数据同步:数据同步简化时间范围指定方式,直接设置起止时间 +- 数据同步:数据同步支持 SSL 传输协议(iotdb-thrift-ssl-sink 插件) +- 系统模块:支持使用 SQL 查询集群激活信息 +- 系统模块:多级存储增加迁移时传输速率控制 +- 系统模块:系统可观测性提升(增加集群节点的散度监控、分布式任务调度框架可观测性) +- 系统模块:日志默认输出策略优化 +- 脚本与工具:增加一键启停集群脚本(start-all/stop-all.sh & start-all/stop-all.bat) +- 脚本与工具:增加一键收集实例信息脚本(collect-info.sh & collect-info.bat) + +### V1.3.0.4 + +> 发版时间:2024.1.3 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.0.4 发布了全新内生机器学习框架 AINode,全面升级权限模块支持序列粒度授予权限,并对视图、流处理等功能进行诸多细节优化,进一步提升了产品的使用易用度,并增强了版本稳定性和各方面性能。具体发布内容如下: + +- 查询模块:新增 AINode 内生机器学习模块 +- 查询模块:优化 show path 语句返回时间长的问题 +- 安全模块:升级权限模块,支持时间序列粒度的权限设置 +- 安全模块:支持客户端与服务器 SSL 通讯加密 +- 流处理:流处理模块新增多种 metrics 监控项 +- 查询模块:非可写视图序列支持 LAST 查询 +- 系统模块:优化数据点监控项统计准确性 + +### V1.2.0.1 + +> 发版时间:2023.6.30 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.2.0.1主要增加了流处理框架、动态模板、substring/replace/round内置查询函数等新特性,增强了show region、show timeseries、show variable等内置语句功能和Session接口,同时优化了内置监控项及其实现,修复部分产品bug和性能问题。 + +- 流处理:新增流处理框架 +- 元数据模块:新增模板动态扩充功能 +- 存储模块:新增SPRINTZ和RLBE编码以及LZMA2压缩算法 +- 查询模块:新增cast、round、substr、replace内置标量函数 +- 查询模块:新增time_duration、mode内置聚合函数 +- 查询模块:SQL语句支持case when语法 +- 查询模块:SQL语句支持order by表达式 +- 接口模块:Python API支持连接分布式多个节点 +- 接口模块:Python客户端支持写入重定向 +- 接口模块:Session API增加用模板批量创建序列接口 + +### V1.1.0.1 + +> 发版时间:2023-04-03 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.1.0.1主要改进增加了部分新特性,如支持 GROUP BY VARIATION、GROUP BY CONDITION 等分段方式、增加 DIFF、COUNT_IF 等实用函数,引入 pipeline 执行引擎进一步提升查询速度等。同时修复对齐序列 last 查询 order by timeseries、LIMIT&OFFSET 不生效、重启后元数据模版错误、删除所有 database 后创建序列错误等相关问题。 + +- 查询模块:align by device 语句支持 order by time +- 查询模块:支持 Show Queries 命令 +- 查询模块:支持 kill query 命令 +- 系统模块:show regions 支持指定特定的 database +- 系统模块:新增 SQL show variables, 可以展示当前集群参数 +- 查询模块:聚合查询支持 GROUP BY VARIATION +- 查询模块:SELECT INTO 支持特定的数据类型强转 +- 查询模块:实现内置标量函数 DIFF +- 系统模块:show regions 显示创建时间 +- 查询模块:实现内置聚合函数 COUNT_IF +- 查询模块:聚合查询支持 GROUP BY CONDITION +- 系统模块:支持修改 dn_rpc_port 和 dn_rpc_address + +### V1.0.0.1 + +> 发版时间:2022.12.03 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.0.0.1主要修复分区计算及查询执行时的相关问题,历史快照未删除,数据查询及 SessionPool 内存使用上的相关问题等;同时改进增加部分新特性,如支持 show variables、explain align by device 等命令,完善 ExportCSV/ExportTsFile/MQTT 等功能,完善集群的启停流程、更改 IoTDB 集群默认的内部端口、新增用于区分集群的 cluster_name 属性等。 + +- 系统模块:支持分布式高可用架构 +- 系统模块:支持多副本存储 +- 系统模块:启动节点时,如果端口已被占用,则终止启动流程 +- 系统模块:支持集群管理sql +- 系统模块:支持对Confignode、Datanode进行启动、停止、移除等功能管理 +- 系统模块:可配置共识协议框架及多种共识协议:Simple、IoTConsensus、Ratis +- 系统模块:支持数据、元数据、Confignode的多副本管理 +- 查询模块:支持大规模并行处理框架MPP,提供分布式读写能力 +- 流处理模块:支持流处理框架 +- 流处理模块:支持集群间数据同步功能 + +## Workbench(控制台工具) + +| **控制台版本号** | **版本说明** | **可支持IoTDB版本** | +| ---------------- | ------------------------------------------------------------ | ------------------- | +| V1.5.1 | 新增AI分析功能以及模式匹配功能 | V1.3.2及以上版本 | +| V1.4.0 | 新增树模型展示及英文版 | V1.3.2及以上版本 | +| V1.3.1 | 分析功能新增分析方式,优化导入模版等功能 | V1.3.2及以上版本 | +| V1.3.0 | 新增数据库配置功能,优化部分版本细节 | V1.3.2及以上版本 | +| V1.2.6 | 优化各模块权限控制功能 | V1.3.1及以上版本 | +| V1.2.5 | 可视化功能新增“常用模版”概念,所有界面优化补充页面缓存等功能 | V1.3.0及以上版本 | +| V1.2.4 | 计算功能新增“导入、导出”功能,测点列表新增“时间对齐”字段 | V1.2.2及以上版本 | +| V1.2.3 | 首页新增“激活详情”,新增分析等功能 | V1.2.2及以上版本 | +| V1.2.2 | 优化“测点描述”展示内容等功能 | V1.2.2及以上版本 | +| V1.2.1 | 数据同步界面新增“监控面板”,优化Prometheus提示信息 | V1.2.2及以上版本 | +| V1.2.0 | 全新Workbench版本升级 | V1.2.0及以上版本 | \ No newline at end of file diff --git a/src/zh/UserGuide/V2.0.1/Table/IoTDB-Introduction/Scenario.md b/src/zh/UserGuide/V1.3.3/IoTDB-Introduction/Scenario.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/IoTDB-Introduction/Scenario.md rename to src/zh/UserGuide/V1.3.3/IoTDB-Introduction/Scenario.md diff --git a/src/zh/UserGuide/V2.0.1/Table/QuickStart/QuickStart.md b/src/zh/UserGuide/V1.3.3/QuickStart/QuickStart.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/QuickStart/QuickStart.md rename to src/zh/UserGuide/V1.3.3/QuickStart/QuickStart.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/QuickStart/QuickStart_apache.md b/src/zh/UserGuide/V1.3.3/QuickStart/QuickStart_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/QuickStart/QuickStart_apache.md rename to src/zh/UserGuide/V1.3.3/QuickStart/QuickStart_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/QuickStart/QuickStart_timecho.md b/src/zh/UserGuide/V1.3.3/QuickStart/QuickStart_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/QuickStart/QuickStart_timecho.md rename to src/zh/UserGuide/V1.3.3/QuickStart/QuickStart_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Reference/Common-Config-Manual.md b/src/zh/UserGuide/V1.3.3/Reference/Common-Config-Manual.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Reference/Common-Config-Manual.md rename to src/zh/UserGuide/V1.3.3/Reference/Common-Config-Manual.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Reference/ConfigNode-Config-Manual.md b/src/zh/UserGuide/V1.3.3/Reference/ConfigNode-Config-Manual.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Reference/ConfigNode-Config-Manual.md rename to src/zh/UserGuide/V1.3.3/Reference/ConfigNode-Config-Manual.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Reference/DataNode-Config-Manual.md b/src/zh/UserGuide/V1.3.3/Reference/DataNode-Config-Manual.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Reference/DataNode-Config-Manual.md rename to src/zh/UserGuide/V1.3.3/Reference/DataNode-Config-Manual.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Reference/DataNode-Config-Manual_apache.md b/src/zh/UserGuide/V1.3.3/Reference/DataNode-Config-Manual_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Reference/DataNode-Config-Manual_apache.md rename to src/zh/UserGuide/V1.3.3/Reference/DataNode-Config-Manual_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Reference/DataNode-Config-Manual_timecho.md b/src/zh/UserGuide/V1.3.3/Reference/DataNode-Config-Manual_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Reference/DataNode-Config-Manual_timecho.md rename to src/zh/UserGuide/V1.3.3/Reference/DataNode-Config-Manual_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Reference/Keywords.md b/src/zh/UserGuide/V1.3.3/Reference/Keywords.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Reference/Keywords.md rename to src/zh/UserGuide/V1.3.3/Reference/Keywords.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Reference/Modify-Config-Manual.md b/src/zh/UserGuide/V1.3.3/Reference/Modify-Config-Manual.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Reference/Modify-Config-Manual.md rename to src/zh/UserGuide/V1.3.3/Reference/Modify-Config-Manual.md diff --git a/src/zh/UserGuide/V1.3.3/Reference/Status-Codes.md b/src/zh/UserGuide/V1.3.3/Reference/Status-Codes.md new file mode 100644 index 000000000..d941f7862 --- /dev/null +++ b/src/zh/UserGuide/V1.3.3/Reference/Status-Codes.md @@ -0,0 +1,178 @@ + + +# 状态码 + +IoTDB 引入了**状态码**这一概念。例如,因为 IoTDB 需要在写入数据之前首先注册时间序列,一种可能的解决方案是: + +``` +try { + writeData(); +} catch (SQLException e) { + // the most case is that the time series does not exist + if (e.getMessage().contains("exist")) { + //However, using the content of the error message is not so efficient + registerTimeSeries(); + //write data once again + writeData(); + } +} + +``` + +利用状态码,我们就可以不必写诸如`if (e.getErrorMessage().contains("exist"))`的代码, +只需要使用`e.getStatusType().getCode() == TSStatusCode.TIME_SERIES_NOT_EXIST_ERROR.getStatusCode()`。 + +这里是状态码和相对应信息的列表: + +| 状态码 | 状态类型 | 状态信息 | +|:-----|:---------------------------------------|:--------------------------| +| 200 | SUCCESS_STATUS | 成功状态 | +| 201 | INCOMPATIBLE_VERSION | 版本不兼容 | +| 202 | CONFIGURATION_ERROR | 配置文件有错误项 | +| 203 | START_UP_ERROR | 启动错误 | +| 204 | SHUT_DOWN_ERROR | 关机错误 | +| 300 | UNSUPPORTED_OPERATION | 不支持的操作 | +| 301 | EXECUTE_STATEMENT_ERROR | 执行语句错误 | +| 302 | MULTIPLE_ERROR | 多行语句执行错误 | +| 303 | ILLEGAL_PARAMETER | 参数错误 | +| 304 | OVERLAP_WITH_EXISTING_TASK | 与正在执行的其他操作冲突 | +| 305 | INTERNAL_SERVER_ERROR | 服务器内部错误 | +| 306 | DISPATCH_ERROR | 分发错误 | +| 400 | REDIRECTION_RECOMMEND | 推荐客户端重定向 | +| 500 | DATABASE_NOT_EXIST | 数据库不存在 | +| 501 | DATABASE_ALREADY_EXISTS | 数据库已存在 | +| 502 | SERIES_OVERFLOW | 序列数量超过阈值 | +| 503 | TIMESERIES_ALREADY_EXIST | 时间序列已存在 | +| 504 | TIMESERIES_IN_BLACK_LIST | 时间序列正在删除 | +| 505 | ALIAS_ALREADY_EXIST | 路径别名已经存在 | +| 506 | PATH_ALREADY_EXIST | 路径已经存在 | +| 507 | METADATA_ERROR | 处理元数据错误 | +| 508 | PATH_NOT_EXIST | 路径不存在 | +| 509 | ILLEGAL_PATH | 路径不合法 | +| 510 | CREATE_TEMPLATE_ERROR | 创建物理量模板失败 | +| 511 | DUPLICATED_TEMPLATE | 元数据模板重复 | +| 512 | UNDEFINED_TEMPLATE | 元数据模板未定义 | +| 513 | TEMPLATE_NOT_SET | 元数据模板未设置 | +| 514 | DIFFERENT_TEMPLATE | 元数据模板不一致 | +| 515 | TEMPLATE_IS_IN_USE | 元数据模板正在使用 | +| 516 | TEMPLATE_INCOMPATIBLE | 元数据模板不兼容 | +| 517 | SEGMENT_NOT_FOUND | 未找到 Segment | +| 518 | PAGE_OUT_OF_SPACE | PBTreeFile 中 Page 空间不够 | +| 519 | RECORD_DUPLICATED | 记录重复 | +| 520 | SEGMENT_OUT_OF_SPACE | PBTreeFile 中 segment 空间不够 | +| 521 | PBTREE_FILE_NOT_EXISTS | PBTreeFile 不存在 | +| 522 | OVERSIZE_RECORD | 记录大小超过元数据文件页面大小 | +| 523 | PBTREE_FILE_REDO_LOG_BROKEN | PBTreeFile 的 redo 日志损坏 | +| 524 | TEMPLATE_NOT_ACTIVATED | 元数据模板未激活 | +| 526 | SCHEMA_QUOTA_EXCEEDED | 集群元数据超过配额上限 | +| 527 | MEASUREMENT_ALREADY_EXISTS_IN_TEMPLATE | 元数据模板中已存在物理量 | +| 600 | SYSTEM_READ_ONLY | IoTDB 系统只读 | +| 601 | STORAGE_ENGINE_ERROR | 存储引擎相关错误 | +| 602 | STORAGE_ENGINE_NOT_READY | 存储引擎还在恢复中,还不能接受读写操作 | +| 603 | DATAREGION_PROCESS_ERROR | DataRegion 相关错误 | +| 604 | TSFILE_PROCESSOR_ERROR | TsFile 处理器相关错误 | +| 605 | WRITE_PROCESS_ERROR | 写入相关错误 | +| 606 | WRITE_PROCESS_REJECT | 写入拒绝错误 | +| 607 | OUT_OF_TTL | 插入时间少于 TTL 时间边界 | +| 608 | COMPACTION_ERROR | 合并错误 | +| 609 | ALIGNED_TIMESERIES_ERROR | 对齐时间序列错误 | +| 610 | WAL_ERROR | WAL 异常 | +| 611 | DISK_SPACE_INSUFFICIENT | 磁盘空间不足 | +| 700 | SQL_PARSE_ERROR | SQL 语句分析错误 | +| 701 | SEMANTIC_ERROR | SQL 语义错误 | +| 702 | GENERATE_TIME_ZONE_ERROR | 生成时区错误 | +| 703 | SET_TIME_ZONE_ERROR | 设置时区错误 | +| 704 | QUERY_NOT_ALLOWED | 查询语句不允许 | +| 705 | LOGICAL_OPERATOR_ERROR | 逻辑符相关错误 | +| 706 | LOGICAL_OPTIMIZE_ERROR | 逻辑优化相关错误 | +| 707 | UNSUPPORTED_FILL_TYPE | 不支持的填充类型 | +| 708 | QUERY_PROCESS_ERROR | 查询处理相关错误 | +| 709 | MPP_MEMORY_NOT_ENOUGH | MPP 框架中任务执行内存不足 | +| 710 | CLOSE_OPERATION_ERROR | 关闭操作错误 | +| 711 | TSBLOCK_SERIALIZE_ERROR | TsBlock 序列化错误 | +| 712 | INTERNAL_REQUEST_TIME_OUT | MPP 操作超时 | +| 713 | INTERNAL_REQUEST_RETRY_ERROR | 内部操作重试失败 | +| 714 | NO_SUCH_QUERY | 查询不存在 | +| 715 | QUERY_WAS_KILLED | 查询执行时被终止 | +| 800 | UNINITIALIZED_AUTH_ERROR | 授权模块未初始化 | +| 801 | WRONG_LOGIN_PASSWORD | 用户名或密码错误 | +| 802 | NOT_LOGIN | 没有登录 | +| 803 | NO_PERMISSION | 没有操作权限 | +| 804 | USER_NOT_EXIST | 用户不存在 | +| 805 | USER_ALREADY_EXIST | 用户已存在 | +| 806 | USER_ALREADY_HAS_ROLE | 用户拥有对应角色 | +| 807 | USER_NOT_HAS_ROLE | 用户未拥有对应角色 | +| 808 | ROLE_NOT_EXIST | 角色不存在 | +| 809 | ROLE_ALREADY_EXIST | 角色已存在 | +| 810 | ALREADY_HAS_PRIVILEGE | 已拥有对应权限 | +| 811 | NOT_HAS_PRIVILEGE | 未拥有对应权限 | +| 812 | CLEAR_PERMISSION_CACHE_ERROR | 清空权限缓存失败 | +| 813 | UNKNOWN_AUTH_PRIVILEGE | 未知权限 | +| 814 | UNSUPPORTED_AUTH_OPERATION | 不支持的权限操作 | +| 815 | AUTH_IO_EXCEPTION | 权限模块IO异常 | +| 900 | MIGRATE_REGION_ERROR | Region 迁移失败 | +| 901 | CREATE_REGION_ERROR | 创建 region 失败 | +| 902 | DELETE_REGION_ERROR | 删除 region 失败 | +| 903 | PARTITION_CACHE_UPDATE_ERROR | 更新分区缓存失败 | +| 904 | CONSENSUS_NOT_INITIALIZED | 共识层未初始化,不能提供服务 | +| 905 | REGION_LEADER_CHANGE_ERROR | Region leader 迁移失败 | +| 906 | NO_AVAILABLE_REGION_GROUP | 无法找到可用的 Region 副本组 | +| 907 | LACK_DATA_PARTITION_ALLOCATION | 调用创建数据分区方法的返回结果里缺少信息 | +| 1000 | DATANODE_ALREADY_REGISTERED | DataNode 在集群中已经注册 | +| 1001 | NO_ENOUGH_DATANODE | DataNode 数量不足,无法移除节点或创建副本 | +| 1002 | ADD_CONFIGNODE_ERROR | 新增 ConfigNode 失败 | +| 1003 | REMOVE_CONFIGNODE_ERROR | 移除 ConfigNode 失败 | +| 1004 | DATANODE_NOT_EXIST | 此 DataNode 不存在 | +| 1005 | DATANODE_STOP_ERROR | DataNode 关闭失败 | +| 1006 | REMOVE_DATANODE_ERROR | 移除 datanode 失败 | +| 1007 | REGISTER_DATANODE_WITH_WRONG_ID | 注册的 DataNode 中有错误的注册id | +| 1008 | CAN_NOT_CONNECT_DATANODE | 连接 DataNode 失败 | +| 1100 | LOAD_FILE_ERROR | 加载文件错误 | +| 1101 | LOAD_PIECE_OF_TSFILE_ERROR | 加载 TsFile 片段异常 | +| 1102 | DESERIALIZE_PIECE_OF_TSFILE_ERROR | 反序列化 TsFile 片段异常 | +| 1103 | SYNC_CONNECTION_ERROR | 同步连接错误 | +| 1104 | SYNC_FILE_REDIRECTION_ERROR | 同步文件时重定向异常 | +| 1105 | SYNC_FILE_ERROR | 同步文件异常 | +| 1106 | CREATE_PIPE_SINK_ERROR | 创建 PIPE Sink 失败 | +| 1107 | PIPE_ERROR | PIPE 异常 | +| 1108 | PIPESERVER_ERROR | PIPE server 异常 | +| 1109 | VERIFY_METADATA_ERROR | 校验元数据失败 | +| 1200 | UDF_LOAD_CLASS_ERROR | UDF 加载类异常 | +| 1201 | UDF_DOWNLOAD_ERROR | 无法从 ConfigNode 下载 UDF | +| 1202 | CREATE_UDF_ON_DATANODE_ERROR | 在 DataNode 创建 UDF 失败 | +| 1203 | DROP_UDF_ON_DATANODE_ERROR | 在 DataNode 卸载 UDF 失败 | +| 1300 | CREATE_TRIGGER_ERROR | ConfigNode 创建 Trigger 失败 | +| 1301 | DROP_TRIGGER_ERROR | ConfigNode 删除 Trigger 失败 | +| 1302 | TRIGGER_FIRE_ERROR | 触发器执行错误 | +| 1303 | TRIGGER_LOAD_CLASS_ERROR | 触发器加载类异常 | +| 1304 | TRIGGER_DOWNLOAD_ERROR | 从 ConfigNode 下载触发器异常 | +| 1305 | CREATE_TRIGGER_INSTANCE_ERROR | 创建触发器实例异常 | +| 1306 | ACTIVE_TRIGGER_INSTANCE_ERROR | 激活触发器实例异常 | +| 1307 | DROP_TRIGGER_INSTANCE_ERROR | 删除触发器实例异常 | +| 1308 | UPDATE_TRIGGER_LOCATION_ERROR | 更新有状态的触发器所在 DataNode 异常 | +| 1400 | NO_SUCH_CQ | CQ 任务不存在 | +| 1401 | CQ_ALREADY_ACTIVE | CQ 任务已激活 | +| 1402 | CQ_AlREADY_EXIST | CQ 任务已存在 | +| 1403 | CQ_UPDATE_LAST_EXEC_TIME_ERROR | CQ 更新上一次执行时间失败 | + +> 在最新版本中,我们重构了 IoTDB 的异常类。通过将错误信息统一提取到异常类中,并为所有异常添加不同的错误代码,从而当捕获到异常并引发更高级别的异常时,错误代码将保留并传递,以便用户了解详细的错误原因。 +除此之外,我们添加了一个基础异常类“ProcessException”,由所有异常扩展。 diff --git a/src/zh/UserGuide/latest/Reference/Syntax-Rule.md b/src/zh/UserGuide/V1.3.3/Reference/Syntax-Rule.md similarity index 100% rename from src/zh/UserGuide/latest/Reference/Syntax-Rule.md rename to src/zh/UserGuide/V1.3.3/Reference/Syntax-Rule.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries_apache.md b/src/zh/UserGuide/V1.3.3/Reference/UDF-Libraries_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries_apache.md rename to src/zh/UserGuide/V1.3.3/Reference/UDF-Libraries_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/Function-and-Expression.md b/src/zh/UserGuide/V1.3.3/SQL-Manual/Function-and-Expression.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/Function-and-Expression.md rename to src/zh/UserGuide/V1.3.3/SQL-Manual/Function-and-Expression.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/Operator-and-Expression.md b/src/zh/UserGuide/V1.3.3/SQL-Manual/Operator-and-Expression.md similarity index 99% rename from src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/Operator-and-Expression.md rename to src/zh/UserGuide/V1.3.3/SQL-Manual/Operator-and-Expression.md index 1d02a15bb..df99144bf 100644 --- a/src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/Operator-and-Expression.md +++ b/src/zh/UserGuide/V1.3.3/SQL-Manual/Operator-and-Expression.md @@ -234,7 +234,7 @@ OR, |, || | EQUAL_SIZE_BUCKET_OUTLIER_SAMPLE | INT32 / INT64 / FLOAT / DOUBLE | `proportion`取值范围为`(0, 1]`,默认为`0.1`
`type`取值为`avg`或`stendis`或`cos`或`prenextdis`,默认为`avg`
`number`取值应大于0,默认`3`| INT32 / INT64 / FLOAT / DOUBLE | 返回符合采样比例和桶内采样个数的等分桶离群值采样 | | M4 | INT32 / INT64 / FLOAT / DOUBLE | 包含固定点数的窗口和滑动时间窗口使用不同的属性参数。包含固定点数的窗口使用属性`windowSize`和`slidingStep`。滑动时间窗口使用属性`timeInterval`、`slidingStep`、`displayWindowBegin`和`displayWindowEnd`。更多细节见下文。 | INT32 / INT64 / FLOAT / DOUBLE | 返回每个窗口内的第一个点(`first`)、最后一个点(`last`)、最小值点(`bottom`)、最大值点(`top`)。在一个窗口内的聚合点输出之前,M4会将它们按照时间戳递增排序并且去重。 | -详细说明及示例见文档 [采样函数](../SQL-Manual/Function-and-Expression.md#采样函数)。 +详细说明及示例见文档 [采样函数](./Function-and-Expression.md#采样函数)。 ### 时间序列处理函数 | 函数名 | 输入序列类型 | 参数 | 输出序列类型 | 功能描述 | diff --git a/src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/SQL-Manual.md b/src/zh/UserGuide/V1.3.3/SQL-Manual/SQL-Manual.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/SQL-Manual.md rename to src/zh/UserGuide/V1.3.3/SQL-Manual/SQL-Manual.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries.md b/src/zh/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries.md rename to src/zh/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Reference/UDF-Libraries_apache.md b/src/zh/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries_apache.md similarity index 99% rename from src/zh/UserGuide/V2.0.1/Tree/Reference/UDF-Libraries_apache.md rename to src/zh/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries_apache.md index 7112666cf..b35e35e1a 100644 --- a/src/zh/UserGuide/V2.0.1/Tree/Reference/UDF-Libraries_apache.md +++ b/src/zh/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries_apache.md @@ -27,20 +27,20 @@ ## 安装步骤 1. 请获取与 IoTDB 版本兼容的 UDF 函数库 JAR 包的压缩包。 - | UDF 函数库版本 | 支持的 IoTDB 版本 | 下载链接 | + | UDF 安装包 | 支持的 IoTDB 版本 | 下载链接 | | --------------- | ----------------- | ------------------------------------------------------------ | - | UDF-1.3.3.zip | V1.3.3及以上 | 请联系天谋商务获取 | - | UDF-1.3.2.zip | V1.0.0~V1.3.2 | 请联系天谋商务获取 | + | apache-UDF-1.3.3.zip | V1.3.3及以上 | 请联系天谋商务获取 | + | apache-UDF-1.3.2.zip | V1.0.0~V1.3.2 | 请联系天谋商务获取| 2. 将获取的压缩包中的 library-udf.jar 文件放置在 IoTDB 集群所有节点的 `/ext/udf` 的目录下 -3. 在 IoTDB 的 SQL 命令行终端(CLI)或可视化控制台(Workbench)的 SQL 操作界面中,执行下述相应的函数注册语句。 +3. 在 IoTDB 的 SQL 命令行终端(CLI)的 SQL 操作界面中,执行下述相应的函数注册语句。 4. 批量注册:两种注册方式:注册脚本 或 SQL汇总语句 - 注册脚本 - 将压缩包中的注册脚本(register-UDF.sh 或 register-UDF.bat)按需复制到 IoTDB 的 tools 目录下,修改脚本中的参数(默认为host=127.0.0.1,rpcPort=6667,user=root,pass=root); - 启动 IoTDB 服务,运行注册脚本批量注册 UDF - SQL汇总语句 - - 打开压缩包中的SQl文件,复制全部 SQL 语句,在 IoTDB 的 SQL 命令行终端(CLI)或可视化控制台(Workbench)的 SQL 操作界面中,执行全部 SQl 语句批量注册 UDF + - 打开压缩包中的SQl文件,复制全部 SQL 语句,在 IoTDB 的 SQL 命令行终端(CLI)的 SQL 操作界面中,执行全部 SQl 语句批量注册 UDF ## 数据质量 diff --git a/src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries_timecho.md b/src/zh/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/SQL-Manual/UDF-Libraries_timecho.md rename to src/zh/UserGuide/V1.3.3/SQL-Manual/UDF-Libraries_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Technical-Insider/Cluster-data-partitioning.md b/src/zh/UserGuide/V1.3.3/Technical-Insider/Cluster-data-partitioning.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Technical-Insider/Cluster-data-partitioning.md rename to src/zh/UserGuide/V1.3.3/Technical-Insider/Cluster-data-partitioning.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Technical-Insider/Encoding-and-Compression.md b/src/zh/UserGuide/V1.3.3/Technical-Insider/Encoding-and-Compression.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Technical-Insider/Encoding-and-Compression.md rename to src/zh/UserGuide/V1.3.3/Technical-Insider/Encoding-and-Compression.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Technical-Insider/Publication.md b/src/zh/UserGuide/V1.3.3/Technical-Insider/Publication.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Technical-Insider/Publication.md rename to src/zh/UserGuide/V1.3.3/Technical-Insider/Publication.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Tools-System/Benchmark.md b/src/zh/UserGuide/V1.3.3/Tools-System/Benchmark.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Tools-System/Benchmark.md rename to src/zh/UserGuide/V1.3.3/Tools-System/Benchmark.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Tools-System/CLI.md b/src/zh/UserGuide/V1.3.3/Tools-System/CLI.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Tools-System/CLI.md rename to src/zh/UserGuide/V1.3.3/Tools-System/CLI.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Tools-System/Data-Export-Tool.md b/src/zh/UserGuide/V1.3.3/Tools-System/Data-Export-Tool.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Tools-System/Data-Export-Tool.md rename to src/zh/UserGuide/V1.3.3/Tools-System/Data-Export-Tool.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Tools-System/Data-Import-Tool.md b/src/zh/UserGuide/V1.3.3/Tools-System/Data-Import-Tool.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Tools-System/Data-Import-Tool.md rename to src/zh/UserGuide/V1.3.3/Tools-System/Data-Import-Tool.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Tools-System/Maintenance-Tool_apache.md b/src/zh/UserGuide/V1.3.3/Tools-System/Maintenance-Tool_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Tools-System/Maintenance-Tool_apache.md rename to src/zh/UserGuide/V1.3.3/Tools-System/Maintenance-Tool_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Tools-System/Maintenance-Tool_timecho.md b/src/zh/UserGuide/V1.3.3/Tools-System/Maintenance-Tool_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Tools-System/Maintenance-Tool_timecho.md rename to src/zh/UserGuide/V1.3.3/Tools-System/Maintenance-Tool_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Tools-System/Monitor-Tool_apache.md b/src/zh/UserGuide/V1.3.3/Tools-System/Monitor-Tool_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Tools-System/Monitor-Tool_apache.md rename to src/zh/UserGuide/V1.3.3/Tools-System/Monitor-Tool_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Tools-System/Monitor-Tool_timecho.md b/src/zh/UserGuide/V1.3.3/Tools-System/Monitor-Tool_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Tools-System/Monitor-Tool_timecho.md rename to src/zh/UserGuide/V1.3.3/Tools-System/Monitor-Tool_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Tools-System/Workbench_timecho.md b/src/zh/UserGuide/V1.3.3/Tools-System/Workbench_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Tools-System/Workbench_timecho.md rename to src/zh/UserGuide/V1.3.3/Tools-System/Workbench_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/AINode_timecho.md b/src/zh/UserGuide/V1.3.3/User-Manual/AINode_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/AINode_timecho.md rename to src/zh/UserGuide/V1.3.3/User-Manual/AINode_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Audit-Log_timecho.md b/src/zh/UserGuide/V1.3.3/User-Manual/Audit-Log_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Audit-Log_timecho.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Audit-Log_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Authority-Management.md b/src/zh/UserGuide/V1.3.3/User-Manual/Authority-Management.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Authority-Management.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Authority-Management.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Data-Recovery.md b/src/zh/UserGuide/V1.3.3/User-Manual/Data-Recovery.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Data-Recovery.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Data-Recovery.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Data-Sync_apache.md b/src/zh/UserGuide/V1.3.3/User-Manual/Data-Sync_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Data-Sync_apache.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Data-Sync_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Data-Sync_timecho.md b/src/zh/UserGuide/V1.3.3/User-Manual/Data-Sync_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Data-Sync_timecho.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Data-Sync_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Data-subscription.md b/src/zh/UserGuide/V1.3.3/User-Manual/Data-subscription.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Data-subscription.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Data-subscription.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Database-Programming.md b/src/zh/UserGuide/V1.3.3/User-Manual/Database-Programming.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Database-Programming.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Database-Programming.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/IoTDB-View_timecho.md b/src/zh/UserGuide/V1.3.3/User-Manual/IoTDB-View_timecho.md similarity index 99% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/IoTDB-View_timecho.md rename to src/zh/UserGuide/V1.3.3/User-Manual/IoTDB-View_timecho.md index 482317b37..2181ae4d4 100644 --- a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/IoTDB-View_timecho.md +++ b/src/zh/UserGuide/V1.3.3/User-Manual/IoTDB-View_timecho.md @@ -361,7 +361,6 @@ WHERE temperature01 < temperature02 此外,对于别名序列,如果用户想要得到时间序列的tag、attributes等信息,则需要先查询视图列的映射,找到对应的时间序列,再向时间序列查询tag、attributes等信息。查询视图列的映射的方法将会在3.5部分说明。 - ### 视图修改 视图支持的修改操作包括:修改计算逻辑,修改标签/属性,以及删除。 diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Load-Balance.md b/src/zh/UserGuide/V1.3.3/User-Manual/Load-Balance.md similarity index 99% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Load-Balance.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Load-Balance.md index 99730de97..a9093e0be 100644 --- a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Load-Balance.md +++ b/src/zh/UserGuide/V1.3.3/User-Manual/Load-Balance.md @@ -102,9 +102,10 @@ IoTDB 是一个分布式数据库,数据的均衡分布对集群的磁盘空 - **阻塞写入**: IoTConsensus 的 region 迁移不直接阻塞写入,但由于过程中需要阻塞 WAL 文件的清理,如果 WAL 文件堆积达到阈值`wal_throttle_threshold_in_byte`,那么当前 DataNode 会暂停写入,直到 WAL 文件恢复到阈值以下。 - + 如果迁移过程中由于 WAL 达到阈值造成写入报错(例如报错信息为 The write is rejected because the wal directory size has reached the threshold),可以将`wal_throttle_threshold_in_byte`调大到 500GB 或更大以允许继续写入。使用 SQL 语句: ```plain IoTDB> set configuration "wal_throttle_threshold_in_byte"="536870912000" Msg: The statement is executed successfully. - ``` \ No newline at end of file + ``` + diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Query-Performance-Analysis.md b/src/zh/UserGuide/V1.3.3/User-Manual/Query-Performance-Analysis.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Query-Performance-Analysis.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Query-Performance-Analysis.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Streaming_apache.md b/src/zh/UserGuide/V1.3.3/User-Manual/Streaming_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Streaming_apache.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Streaming_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Streaming_timecho.md b/src/zh/UserGuide/V1.3.3/User-Manual/Streaming_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Streaming_timecho.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Streaming_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Tiered-Storage_timecho.md b/src/zh/UserGuide/V1.3.3/User-Manual/Tiered-Storage_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Tiered-Storage_timecho.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Tiered-Storage_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/Trigger.md b/src/zh/UserGuide/V1.3.3/User-Manual/Trigger.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/Trigger.md rename to src/zh/UserGuide/V1.3.3/User-Manual/Trigger.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/UDF-development.md b/src/zh/UserGuide/V1.3.3/User-Manual/UDF-development.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/UDF-development.md rename to src/zh/UserGuide/V1.3.3/User-Manual/UDF-development.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/User-defined-function_apache.md b/src/zh/UserGuide/V1.3.3/User-Manual/User-defined-function_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/User-defined-function_apache.md rename to src/zh/UserGuide/V1.3.3/User-Manual/User-defined-function_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/User-defined-function_timecho.md b/src/zh/UserGuide/V1.3.3/User-Manual/User-defined-function_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/User-defined-function_timecho.md rename to src/zh/UserGuide/V1.3.3/User-Manual/User-defined-function_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/User-Manual/White-List_timecho.md b/src/zh/UserGuide/V1.3.3/User-Manual/White-List_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/User-Manual/White-List_timecho.md rename to src/zh/UserGuide/V1.3.3/User-Manual/White-List_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/UserGuideReadme.md b/src/zh/UserGuide/V1.3.3/UserGuideReadme.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/UserGuideReadme.md rename to src/zh/UserGuide/V1.3.3/UserGuideReadme.md diff --git a/src/zh/UserGuide/V2.0.1/Table/API/Programming-JDBC.md b/src/zh/UserGuide/V2.0.1-Table/API/Programming-JDBC.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/API/Programming-JDBC.md rename to src/zh/UserGuide/V2.0.1-Table/API/Programming-JDBC.md diff --git a/src/zh/UserGuide/V2.0.1/Table/API/Programming-Java-Native-API.md b/src/zh/UserGuide/V2.0.1-Table/API/Programming-Java-Native-API.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/API/Programming-Java-Native-API.md rename to src/zh/UserGuide/V2.0.1-Table/API/Programming-Java-Native-API.md diff --git a/src/zh/UserGuide/V2.0.1/Table/API/Programming-Python-Native-API.md b/src/zh/UserGuide/V2.0.1-Table/API/Programming-Python-Native-API.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/API/Programming-Python-Native-API.md rename to src/zh/UserGuide/V2.0.1-Table/API/Programming-Python-Native-API.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/Background-knowledge/Cluster-Concept.md b/src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Cluster-Concept.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Background-knowledge/Cluster-Concept.md rename to src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Cluster-Concept.md diff --git a/src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Cluster-Concept_apache.md b/src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Cluster-Concept_apache.md new file mode 100644 index 000000000..de55e7f1d --- /dev/null +++ b/src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Cluster-Concept_apache.md @@ -0,0 +1,107 @@ + + +# 常见概念 + +## 数据模型相关概念 + +| 概念 | 含义 | +| ----------------------- | ------------------------------------------------------------ | +| 数据模型(sql_dialect) | IoTDB 支持两种时序数据模型(SQL语法),管理的对象均为设备和测点树:以层级路径的方式管理数据,一条路径对应一个设备的一个测点表:以关系表的方式管理数据,一张表对应一类设备 | +| 元数据(Schema) | 元数据是数据库的数据模型信息,即树形结构或表结构。包括测点的名称、数据类型等定义。 | +| 设备(Device) | 对应一个实际场景中的物理设备,通常包含多个测点。 | +| 测点(Timeseries) | 又名:物理量、时间序列、时间线、点位、信号量、指标、测量值等。是多个数据点按时间戳递增排列形成的一个时间序列。通常一个测点代表一个采集点位,能够定期采集所在环境的物理量。 | +| 编码(Encoding) | 编码是一种压缩技术,将数据以二进制的形式进行表示,可以提高存储效率。IoTDB 支持多种针对不同类型的数据的编码方法,详细信息请查看:[压缩和编码](../Technical-Insider/Encoding-and-Compression.md) | +| 压缩(Compression) | IoTDB 在数据编码后,使用压缩技术进一步压缩二进制数据,提升存储效率。IoTDB 支持多种压缩方法,详细信息请查看:[压缩和编码](../Technical-Insider/Encoding-and-Compression.md) | + +## 分布式相关概念 + +下图展示了一个常见的 IoTDB 3C3D(3 个 ConfigNode、3 个 DataNode)的集群部署模式: + + + +IoTDB 的集群包括如下常见概念: + +- 节点(ConfigNode、DataNode、AINode) +- Region(SchemaRegion、DataRegion) +- 多副本 + +下文将对以上概念进行介绍。 + + +### 节点 + +IoTDB 集群包括三种节点(进程):ConfigNode(管理节点),DataNode(数据节点)和 AINode(分析节点),如下所示: + +- ConfigNode:管理集群的节点信息、配置信息、用户权限、元数据、分区信息等,负责分布式操作的调度和负载均衡,所有 ConfigNode 之间互为全量备份,如上图中的 ConfigNode-1,ConfigNode-2 和 ConfigNode-3 所示。 +- DataNode:服务客户端请求,负责数据的存储和计算,如上图中的 DataNode-1,DataNode-2 和 DataNode-3 所示。 +- AINode:负责提供机器学习能力,支持注册已训练好的机器学习模型,并通过 SQL 调用模型进行推理,目前已内置自研时序大模型和常见的机器学习算法(如预测与异常检测)。 + +### 数据分区 + +在 IoTDB 中,元数据和数据都被分为小的分区,即 Region,由集群的各个 DataNode 进行管理。 + +- SchemaRegion:元数据分区,管理一部分设备和测点的元数据。不同 DataNode 相同 RegionID 的 SchemaRegion 互为副本,如上图中 SchemaRegion-1 拥有三个副本,分别放置于 DataNode-1,DataNode-2 和 DataNode-3。 +- DataRegion:数据分区,管理一部分设备的一段时间的数据。不同 DataNode 相同 RegionID 的 DataRegion 互为副本,如上图中 DataRegion-2 拥有两个副本,分别放置于 DataNode-1 和 DataNode-2。 +- 具体分区算法可参考:[数据分区](../Technical-Insider/Cluster-data-partitioning.md) + +### 多副本 + +数据和元数据的副本数可配置,不同部署模式下的副本数推荐如下配置,其中多副本时可提供高可用服务。 + +| 类别 | 配置项 | 单机推荐配置 | 集群推荐配置 | +| :----- | :------------------------ | :----------- | :----------- | +| 元数据 | schema_replication_factor | 1 | 3 | +| 数据 | data_replication_factor | 1 | 2 | + + +## 部署相关概念 + +IoTDB 有两种运行模式:单机模式、集群模式。 + +### 单机模式 + +IoTDB单机实例包括 1 个ConfigNode、1个DataNode,即1C1D; + +- **特点**:便于开发者安装部署,部署和维护成本较低,操作方便。 +- **适用场景**:资源有限或对高可用要求不高的场景,例如边缘端服务器。 +- **部署方法**:[单机版部署](../Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md) + + +### 集群模式 + +IoTDB 集群实例为 3 个ConfigNode 和不少于 3 个 DataNode,通常为 3 个 DataNode,即3C3D;当部分节点出现故障时,剩余节点仍然能对外提供服务,保证数据库服务的高可用性,且可随节点增加提升数据库性能。 + +- **特点**:具有高可用性、高扩展性,可通过增加 DataNode 提高系统性能。 +- **适用场景**:需要提供高可用和可靠性的企业级应用场景。 +- **部署方法**:[集群版部署](../Deployment-and-Maintenance/Cluster-Deployment_apache.md) + +### 特点总结 + +| 维度 | 单机模式 | 集群模式 | +| ------------ | ---------------------------- | ------------------------ | +| 适用场景 | 边缘侧部署、对高可用要求不高 | 高可用性业务、容灾场景等 | +| 所需机器数量 | 1 | ≥3 | +| 安全可靠性 | 无法容忍单点故障 | 高,可容忍单点故障 | +| 扩展性 | 可扩展 DataNode 提升性能 | 可扩展 DataNode 提升性能 | +| 性能 | 可随 DataNode 数量扩展 | 可随 DataNode 数量扩展 | + +- 单机模式和集群模式,部署步骤类似(逐个增加 ConfigNode 和 DataNode),仅副本数和可提供服务的最少节点数不同。 \ No newline at end of file diff --git a/src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Cluster-Concept_timecho.md b/src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Cluster-Concept_timecho.md new file mode 100644 index 000000000..77edf9ebb --- /dev/null +++ b/src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Cluster-Concept_timecho.md @@ -0,0 +1,116 @@ + + +# 常见概念 + +## 数据模型相关概念 + +| 概念 | 含义 | +| ----------------------- | ------------------------------------------------------------ | +| 数据模型(sql_dialect) | IoTDB 支持两种时序数据模型(SQL语法),管理的对象均为设备和测点树:以层级路径的方式管理数据,一条路径对应一个设备的一个测点表:以关系表的方式管理数据,一张表对应一类设备 | +| 元数据(Schema) | 元数据是数据库的数据模型信息,即树形结构或表结构。包括测点的名称、数据类型等定义。 | +| 设备(Device) | 对应一个实际场景中的物理设备,通常包含多个测点。 | +| 测点(Timeseries) | 又名:物理量、时间序列、时间线、点位、信号量、指标、测量值等。是多个数据点按时间戳递增排列形成的一个时间序列。通常一个测点代表一个采集点位,能够定期采集所在环境的物理量。 | +| 编码(Encoding) | 编码是一种压缩技术,将数据以二进制的形式进行表示,可以提高存储效率。IoTDB 支持多种针对不同类型的数据的编码方法,详细信息请查看:[压缩和编码](../Technical-Insider/Encoding-and-Compression.md) | +| 压缩(Compression) | IoTDB 在数据编码后,使用压缩技术进一步压缩二进制数据,提升存储效率。IoTDB 支持多种压缩方法,详细信息请查看:[压缩和编码](../Technical-Insider/Encoding-and-Compression.md) | + +## 分布式相关概念 + +下图展示了一个常见的 IoTDB 3C3D(3 个 ConfigNode、3 个 DataNode)的集群部署模式: + + + +IoTDB 的集群包括如下常见概念: + +- 节点(ConfigNode、DataNode、AINode) +- Region(SchemaRegion、DataRegion) +- 多副本 + +下文将对以上概念进行介绍。 + + +### 节点 + +IoTDB 集群包括三种节点(进程):ConfigNode(管理节点),DataNode(数据节点)和 AINode(分析节点),如下所示: + +- ConfigNode:管理集群的节点信息、配置信息、用户权限、元数据、分区信息等,负责分布式操作的调度和负载均衡,所有 ConfigNode 之间互为全量备份,如上图中的 ConfigNode-1,ConfigNode-2 和 ConfigNode-3 所示。 +- DataNode:服务客户端请求,负责数据的存储和计算,如上图中的 DataNode-1,DataNode-2 和 DataNode-3 所示。 +- AINode:负责提供机器学习能力,支持注册已训练好的机器学习模型,并通过 SQL 调用模型进行推理,目前已内置自研时序大模型和常见的机器学习算法(如预测与异常检测)。 + +### 数据分区 + +在 IoTDB 中,元数据和数据都被分为小的分区,即 Region,由集群的各个 DataNode 进行管理。 + +- SchemaRegion:元数据分区,管理一部分设备和测点的元数据。不同 DataNode 相同 RegionID 的 SchemaRegion 互为副本,如上图中 SchemaRegion-1 拥有三个副本,分别放置于 DataNode-1,DataNode-2 和 DataNode-3。 +- DataRegion:数据分区,管理一部分设备的一段时间的数据。不同 DataNode 相同 RegionID 的 DataRegion 互为副本,如上图中 DataRegion-2 拥有两个副本,分别放置于 DataNode-1 和 DataNode-2。 +- 具体分区算法可参考:[数据分区](../Technical-Insider/Cluster-data-partitioning.md) + +### 多副本 + +数据和元数据的副本数可配置,不同部署模式下的副本数推荐如下配置,其中多副本时可提供高可用服务。 + +| 类别 | 配置项 | 单机推荐配置 | 集群推荐配置 | +| :----- | :------------------------ | :----------- | :----------- | +| 元数据 | schema_replication_factor | 1 | 3 | +| 数据 | data_replication_factor | 1 | 2 | + + +## 部署相关概念 + +IoTDB 有三种运行模式:单机模式、集群模式和双活模式。 + +### 单机模式 + +IoTDB单机实例包括 1 个ConfigNode、1个DataNode,即1C1D; + +- **特点**:便于开发者安装部署,部署和维护成本较低,操作方便。 +- **适用场景**:资源有限或对高可用要求不高的场景,例如边缘端服务器。 +- **部署方法**:[单机版部署](../Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md) + +### 双活模式 + +双活版部署为 TimechoDB 企业版功能,是指两个独立的实例进行双向同步,能同时对外提供服务。当一台停机重启后,另一个实例会将缺失数据断点续传。 + +> IoTDB 双活实例通常为2个单机节点,即2套1C1D。每个实例也可以为集群。 + +- **特点**:资源占用最低的高可用解决方案。 +- **适用场景**:资源有限(仅有两台服务器),但希望获得高可用能力。 +- **部署方法**:[双活版部署](../Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md) + +### 集群模式 + +IoTDB 集群实例为 3 个ConfigNode 和不少于 3 个 DataNode,通常为 3 个 DataNode,即3C3D;当部分节点出现故障时,剩余节点仍然能对外提供服务,保证数据库服务的高可用性,且可随节点增加提升数据库性能。 + +- **特点**:具有高可用性、高扩展性,可通过增加 DataNode 提高系统性能。 +- **适用场景**:需要提供高可用和可靠性的企业级应用场景。 +- **部署方法**:[集群版部署](../Deployment-and-Maintenance/Cluster-Deployment_timecho.md) + +### 特点总结 + +| 维度 | 单机模式 | 双活模式 | 集群模式 | +| ------------ | ---------------------------- | ------------------------ | ------------------------ | +| 适用场景 | 边缘侧部署、对高可用要求不高 | 高可用性业务、容灾场景等 | 高可用性业务、容灾场景等 | +| 所需机器数量 | 1 | 2 | ≥3 | +| 安全可靠性 | 无法容忍单点故障 | 高,可容忍单点故障 | 高,可容忍单点故障 | +| 扩展性 | 可扩展 DataNode 提升性能 | 每个实例可按需扩展 | 可扩展 DataNode 提升性能 | +| 性能 | 可随 DataNode 数量扩展 | 与其中一个实例性能相同 | 可随 DataNode 数量扩展 | + +- 单机模式和集群模式,部署步骤类似(逐个增加 ConfigNode 和 DataNode),仅副本数和可提供服务的最少节点数不同。 \ No newline at end of file diff --git a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Data-Model-and-Terminology.md b/src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Data-Model-and-Terminology.md similarity index 99% rename from src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Data-Model-and-Terminology.md rename to src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Data-Model-and-Terminology.md index 3f2e5c275..bf396459a 100644 --- a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Data-Model-and-Terminology.md +++ b/src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Data-Model-and-Terminology.md @@ -25,7 +25,7 @@ ## 1 时序数据模型 -在构建IoTDB建模方案前,需要先了解时序数据和时序数据模型,详细内容见此页面:[时序数据模型](../Basic-Concept/Navigating_Time_Series_Data.md) +在构建IoTDB建模方案前,需要先了解时序数据和时序数据模型,详细内容见此页面:[时序数据模型](../Background-knowledge/Navigating_Time_Series_Data.md) ## 2 IoTDB 的两种时序模型 diff --git a/src/zh/UserGuide/V2.0.1/Table/Background-knowledge/Data-Type.md b/src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Data-Type.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Background-knowledge/Data-Type.md rename to src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Data-Type.md diff --git a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Navigating_Time_Series_Data.md b/src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Navigating_Time_Series_Data.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Navigating_Time_Series_Data.md rename to src/zh/UserGuide/V2.0.1-Table/Background-knowledge/Navigating_Time_Series_Data.md diff --git a/src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Database-Management.md b/src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Database-Management.md new file mode 100644 index 000000000..4806f6996 --- /dev/null +++ b/src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Database-Management.md @@ -0,0 +1,175 @@ + + +# 数据库管理 + +## 1. 数据库管理 + +### 1.1 创建数据库 + +用于创建数据库。 + +**语法:** + +```SQL + CREATE DATABASE (IF NOT EXISTS)? (WITH properties)? +``` + +**说明:** + +1. 数据库名称,具有以下特性: + - 大小写不敏感 + - 名称的长度不得超过 64 个字符。 + - 名称中包含下划线(_)、数字(非开头)、英文字母可以直接创建 + - 名称中包含特殊字符(如`)、中文字符、数字开头时,必须用双引号 "" 括起来。 + +2. WITH properties 子句可配置如下属性: + +> 注:属性的大小写不敏感,且暂不支持修改,有关详细信息[大小写敏感规则](../SQL-Manual/Identifier.md#大小写敏感性)。 + +| 属性 | 含义 | 默认值 | +| ------------------------- | ---------------------------------------- | --------- | +| `TTL` | 数据自动过期删除,单位 ms | INF | +| `TIME_PARTITION_INTERVAL` | 数据库的时间分区间隔,单位 ms | 604800000 | +| `SCHEMA_REGION_GROUP_NUM` | 数据库的元数据副本组数量,一般不需要修改 | 1 | +| `DATA_REGION_GROUP_NUM` | 数据库的数据副本组数量,一般不需要修改 | 2 | + +**示例:** + +```SQL +CREATE DATABASE database1; +CREATE DATABASE IF NOT EXISTS database1; + +// 创建一个名为 database1 的数据库,并将数据库的TTL时间设置为1年。 +CREATE DATABASE IF NOT EXISTS database1 with(TTL=31536000000); +``` + +### 1.2 使用数据库 + +用于指定当前数据库作为表的命名空间。 + +**语法:** + +```SQL +USE +``` + +**示例:** + +```SQL +USE database1 +``` + +### 1.3 查看当前数据库 + +返回当前会话所连接的数据库名称,若未执行过 `use`语句指定数据库,则默认为 `null`。 + +**语法:** + +```SQL +SHOW CURRENT_DATABASE +``` + +**示例:** + +```SQL +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| null| ++---------------+ + +IoTDB> USE test; + +IoTDB> SHOW CURRENT_DATABASE; ++---------------+ +|CurrentDatabase| ++---------------+ +| iot_database| ++---------------+ +``` + +### 1.4 查看所有数据库 + +用于查看所有数据库和数据库的属性信息。 + +**语法:** + +```SQL +SHOW DATABASES (DETAILS)? +``` + +**语句返回列含义如下:** + +| 列名 | 含义 | +| ----------------------- | ------------------------------------------------------------ | +| database | database名称。 | +| TTL | 数据保留周期。如果在创建数据库的时候指定TTL,则TTL对该数据库下所有表的TTL生效。也可以再通过 [create table](#创建表) 、[alter table](#修改表) 来设置或更新表的TTL时间。 | +| SchemaReplicationFactor | 元数据副本数,用于确保元数据的高可用性。可以在`iotdb-system.properties`中修改`schema_replication_factor`配置项。 | +| DataReplicationFactor | 数据副本数,用于确保数据的高可用性。可以在`iotdb-system.properties`中修改`data_replication_factor`配置项。 | +| TimePartitionInterval | 时间分区间隔,决定了数据在磁盘上按多长时间进行目录分组,通常采用默认1周即可。 | +| SchemaRegionGroupNum | 使用`DETAILS`语句会返回此列,展示数据库的元数据副本组数量,一般不需要修改 | +| DataRegionGroupNum | 使用`DETAILS`语句会返回此列,展示数据库的数据副本组数量,一般不需要修改 | + +**示例:** + +```SQL +IoTDB> show databases ++---------+-------+-----------------------+---------------------+---------------------+ +| Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval| ++---------+-------+-----------------------+---------------------+---------------------+ +|test_prop| 300| 3| 2| 100000| +| test2| 300| 3| 2| 604800000| ++---------+-------+-----------------------+---------------------+---------------------+ +IoTDB> show databases details ++---------+-------+-----------------------+---------------------+---------------------+-----------------------+-----------------------+ +| Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum| DataRegionGroupNum| ++---------+-------+-----------------------+---------------------+---------------------+-----------------------+-----------------------+ +|test_prop| 300| 3| 2| 100000| 1| 2| +| test2| 300| 3| 2| 604800000| 1| 2| ++---------+-------+-----------------------+---------------------+---------------------+-----------------------+-----------------------+ +``` + +### 1.5 修改数据库 + +暂不支持,V2.0.2.1后支持 + +### 1.6 删除数据库 + +用于删除数据库。 + +**语法:** + +```SQL +DROP DATABASE (IF EXISTS)? +``` + +**说明:** + +1. 数据库已被设置为当前使用(use)的数据库,仍然可以被删除(drop)。 +2. 删除数据库将导致所选数据库及其内所有表连同其存储的数据一并被删除。 + +**示例:** + +```SQL +DROP DATABASE IF EXISTS database1 +``` diff --git a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Delete-Data.md b/src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Delete-Data.md similarity index 95% rename from src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Delete-Data.md rename to src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Delete-Data.md index d1b96f221..444f4e971 100644 --- a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Delete-Data.md +++ b/src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Delete-Data.md @@ -57,7 +57,7 @@ ID_CONDITION: ### 1.2 示例: -可以在[示例数据页面](../Basic-Concept/Sample-Data.md)中导入示例数据。可以使用这些数据来测试和执行示例中的SQL语句。 +可以在[示例数据页面](../Reference/Sample-Data.md)中导入示例数据。可以使用这些数据来测试和执行示例中的SQL语句。 #### 1.2.1 删除全表数据 diff --git a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Query-Data.md b/src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Query-Data.md similarity index 97% rename from src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Query-Data.md rename to src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Query-Data.md index 169f3590b..b28ebbbce 100644 --- a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Query-Data.md +++ b/src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Query-Data.md @@ -63,7 +63,7 @@ IoTDB 查询语法提供以下子句: ### 3.1 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 ### 3.2 原始数据查询 diff --git a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/TTL-Delete-Data.md b/src/zh/UserGuide/V2.0.1-Table/Basic-Concept/TTL-Delete-Data.md similarity index 94% rename from src/zh/UserGuide/V2.0.1/Table/Basic-Concept/TTL-Delete-Data.md rename to src/zh/UserGuide/V2.0.1-Table/Basic-Concept/TTL-Delete-Data.md index 86fb6560d..729649cd4 100644 --- a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/TTL-Delete-Data.md +++ b/src/zh/UserGuide/V2.0.1-Table/Basic-Concept/TTL-Delete-Data.md @@ -40,7 +40,7 @@ IoTDB支持表级的数据自动过期删除(TTL)设置,允许系统自动 ### 2.1 为表设置 TTL -如果在建表时通过sql语句设置了表的 TTL,则会以表的ttl为准。建表语句详情可见:[数据库&表管理](../Basic-Concept//Database&Table-Management.md) +如果在建表时通过sql语句设置了表的 TTL,则会以表的ttl为准。建表语句详情可见:[表管理](../Basic-Concept/Table-Management.md) 示例1:创建表时设置 TTL @@ -64,7 +64,7 @@ ALTER TABLE tableB set properties TTL=DEFAULT ### 2.2 为数据库设置 TTL -没有设置表的TTL,则会继承database的ttl。建数据库语句详情可见:[数据库&表管理](../Basic-Concept/Database&Table-Management.md) +没有设置表的TTL,则会继承database的ttl。建数据库语句详情可见:[数据库管理](../Basic-Concept/Database-Management.md) 示例4:数据库设置为 ttl =3600000,将生成一个ttl=3600000的表: @@ -100,7 +100,7 @@ ALTER TABLE tableB set properties TTL='INF' ## 4. 查看 TTL 信息 -使用 "SHOW DATABASES" 和 "SHOW TABLES" 命令可以直接显示数据库和表的 TTL 详情。数据库和表管理语句详情可见:[数据库&表管理](../Basic-Concept/Database&Table-Management.md) +使用 "SHOW DATABASES" 和 "SHOW TABLES" 命令可以直接显示数据库和表的 TTL 详情。数据库和表管理语句详情可见:[数据库管理](../Basic-Concept/Database-Management.md)、[表管理](../Basic-Concept/Table-Management.md) > 注意,树模型数据库的TTL也将显示。 diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Database&Table-Management.md b/src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Table-Management.md similarity index 60% rename from src/zh/UserGuide/Master/Table/Basic-Concept/Database&Table-Management.md rename to src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Table-Management.md index 77559792b..c2b59707a 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/Database&Table-Management.md +++ b/src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Table-Management.md @@ -19,166 +19,13 @@ --> -# 数据库&表管理 +# 表管理 -## 1. 数据库管理 +## 1. 表管理 -### 1.1 创建数据库 +### 1.1 创建表 -用于创建数据库。 - -**语法:** - -```SQL - CREATE DATABASE (IF NOT EXISTS)? (WITH properties)? -``` - -**说明:** - -1. 数据库名称,具有以下特性: - - 大小写不敏感 - - 名称的长度不得超过 64 个字符。 - - 名称中包含下划线(_)、数字(非开头)、英文字母可以直接创建 - - 名称中包含特殊字符(如`)、中文字符、数字开头时,必须用双引号 "" 括起来。 - -2. WITH properties 子句可配置如下属性: - -> 注:属性的大小写不敏感,且暂不支持修改,有关详细信息[大小写敏感规则](../SQL-Manual/Identifier.md#大小写敏感性)。 - -| 属性 | 含义 | 默认值 | -| ------------------------- | ---------------------------------------- | --------- | -| `TTL` | 数据自动过期删除,单位 ms | INF | -| `TIME_PARTITION_INTERVAL` | 数据库的时间分区间隔,单位 ms | 604800000 | -| `SCHEMA_REGION_GROUP_NUM` | 数据库的元数据副本组数量,一般不需要修改 | 1 | -| `DATA_REGION_GROUP_NUM` | 数据库的数据副本组数量,一般不需要修改 | 2 | - -**示例:** - -```SQL -CREATE DATABASE database1; -CREATE DATABASE IF NOT EXISTS database1; - -// 创建一个名为 database1 的数据库,并将数据库的TTL时间设置为1年。 -CREATE DATABASE IF NOT EXISTS database1 with(TTL=31536000000); -``` - -### 1.2 使用数据库 - -用于指定当前数据库作为表的命名空间。 - -**语法:** - -```SQL -USE -``` - -**示例:** - -```SQL -USE database1 -``` - -### 1.3 查看当前数据库 - -返回当前会话所连接的数据库名称,若未执行过 `use`语句指定数据库,则默认为 `null`。 - -**语法:** - -```SQL -SHOW CURRENT_DATABASE -``` - -**示例:** - -```SQL -IoTDB> SHOW CURRENT_DATABASE; -+---------------+ -|CurrentDatabase| -+---------------+ -| null| -+---------------+ - -IoTDB> USE test; - -IoTDB> SHOW CURRENT_DATABASE; -+---------------+ -|CurrentDatabase| -+---------------+ -| iot_database| -+---------------+ -``` - -### 1.4 查看所有数据库 - -用于查看所有数据库和数据库的属性信息。 - -**语法:** - -```SQL -SHOW DATABASES (DETAILS)? -``` - -**语句返回列含义如下:** - -| 列名 | 含义 | -| ----------------------- | ------------------------------------------------------------ | -| database | database名称。 | -| TTL | 数据保留周期。如果在创建数据库的时候指定TTL,则TTL对该数据库下所有表的TTL生效。也可以再通过 [create table](#创建表) 、[alter table](#修改表) 来设置或更新表的TTL时间。 | -| SchemaReplicationFactor | 元数据副本数,用于确保元数据的高可用性。可以在`iotdb-system.properties`中修改`schema_replication_factor`配置项。 | -| DataReplicationFactor | 数据副本数,用于确保数据的高可用性。可以在`iotdb-system.properties`中修改`data_replication_factor`配置项。 | -| TimePartitionInterval | 时间分区间隔,决定了数据在磁盘上按多长时间进行目录分组,通常采用默认1周即可。 | -| SchemaRegionGroupNum | 使用`DETAILS`语句会返回此列,展示数据库的元数据副本组数量,一般不需要修改 | -| DataRegionGroupNum | 使用`DETAILS`语句会返回此列,展示数据库的数据副本组数量,一般不需要修改 | - -**示例:** - -```SQL -IoTDB> show databases -+---------+-------+-----------------------+---------------------+---------------------+ -| Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval| -+---------+-------+-----------------------+---------------------+---------------------+ -|test_prop| 300| 3| 2| 100000| -| test2| 300| 3| 2| 604800000| -+---------+-------+-----------------------+---------------------+---------------------+ -IoTDB> show databases details -+---------+-------+-----------------------+---------------------+---------------------+-----------------------+-----------------------+ -| Database|TTL(ms)|SchemaReplicationFactor|DataReplicationFactor|TimePartitionInterval|SchemaRegionGroupNum| DataRegionGroupNum| -+---------+-------+-----------------------+---------------------+---------------------+-----------------------+-----------------------+ -|test_prop| 300| 3| 2| 100000| 1| 2| -| test2| 300| 3| 2| 604800000| 1| 2| -+---------+-------+-----------------------+---------------------+---------------------+-----------------------+-----------------------+ -``` - -### 1.5 修改数据库 - -暂不支持,V2.0.2.1后支持 - -### 1.6 删除数据库 - -用于删除数据库。 - -**语法:** - -```SQL -DROP DATABASE (IF EXISTS)? -``` - -**说明:** - -1. 数据库已被设置为当前使用(use)的数据库,仍然可以被删除(drop)。 -2. 删除数据库将导致所选数据库及其内所有表连同其存储的数据一并被删除。 - -**示例:** - -```SQL -DROP DATABASE IF EXISTS database1 -``` - -## 2. 表管理 - -### 2.1 创建表 - -#### 2.1.1 通过 Create 语句手动创建表 +#### 1.1.1 通过 Create 语句手动创建表 用于在当前数据库中创建表,也可以对任何指定数据库创建表,格式为“数据库名.表名”。 @@ -247,7 +94,7 @@ CREATE TABLE tableC ( ) with (TTL=DEFAULT); ``` -#### 2.1.2 通过 Session 写入自动创建表 +#### 1.1.2 通过 Session 写入自动创建表 在通过 Session 进行数据写入时,IoTDB 能够根据写入请求中的信息自动构建表结构,无需用户事先手动创建表即可直接执行数据写入操作。 @@ -323,7 +170,7 @@ IoTDB> desc table1 +-----------+---------+-----------+ ``` -### 2.2 查看表 +### 1.2 查看表 用于查看该数据库中或指定数据库中的所有表和表库的属性信息。 @@ -362,7 +209,7 @@ IoTDB> show tables details from test_db +---------+-------+----------+ ``` -### 2.3 查看表的列 +### 1.3 查看表的列 用于查看表的列名、数据类型、类别、状态。 @@ -403,7 +250,7 @@ IoTDB> desc tableB details +----------+---------+-----------+----------+ ``` -### 2.4 修改表 +### 1.4 修改表 用于修改表,包括添加列、删除列以及设置表的属性。 @@ -428,7 +275,7 @@ ALTER TABLE tableB ADD COLUMN IF NOT EXISTS a TAG ALTER TABLE tableB set properties TTL=3600 ``` -### 2.5 删除表 +### 1.5 删除表 用于删除表。 diff --git a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Write-Updata-Data.md b/src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Write-Updata-Data.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Write-Updata-Data.md rename to src/zh/UserGuide/V2.0.1-Table/Basic-Concept/Write-Updata-Data.md diff --git a/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md new file mode 100644 index 000000000..36afa05ae --- /dev/null +++ b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Cluster-Deployment_apache.md @@ -0,0 +1,326 @@ + +# 集群版安装部署 + +本小节描述如何手动部署包括3个ConfigNode和3个DataNode的实例,即通常所说的3C3D集群。 + +
+ +
+ +## 1 注意事项 + +1. 安装前请确认系统已参照[系统配置](../Deployment-and-Maintenance/Environment-Requirements.md)准备完成。 + +2. 推荐使用`hostname`进行IP配置,可避免后期修改主机ip导致数据库无法启动的问题。设置hostname需要在服务器上配`/etc/hosts`,如本机ip是11.101.17.224,hostname是iotdb-1,则可以使用以下命令设置服务器的 hostname,并使用hostname配置IoTDB的`cn_internal_address`、`dn_internal_address`。 + + ```shell + echo "11.101.17.224 iotdb-1" >> /etc/hosts + ``` + +3. 有些参数首次启动后不能修改,请参考下方的[参数配置](#参数配置)章节来进行设置。 + +4. 无论是在linux还是windows中,请确保IoTDB的安装路径中不含空格和中文,避免软件运行异常。 + +5. 请注意,安装部署(包括激活和使用软件)IoTDB时,您可以: + +- 使用 root 用户(推荐):可以避免权限等问题。 + +- 使用固定的非 root 用户: + + - 使用同一用户操作:确保在启动、激活、停止等操作均保持使用同一用户,不要切换用户。 + + - 避免使用 sudo:使用 sudo 命令会以 root 用户权限执行命令,可能会引起权限混淆或安全问题。 + +6. 推荐部署监控面板,可以对重要运行指标进行监控,随时掌握数据库运行状态,监控面板可以联系商务获取,部署监控面板步骤可以参考:[监控面板部署](./Monitoring-panel-deployment.md) + +## 2 准备步骤 + +1. 准备IoTDB数据库安装包 :timechodb-{version}-bin.zip(安装包获取见:[链接](./IoTDB-Package_apache.md)) +2. 按环境要求配置好操作系统环境(系统环境配置见:[链接](./Environment-Requirements.md)) + +## 3 安装步骤 + +假设现在有3台linux服务器,IP地址和服务角色分配如下: + +| 节点ip | 主机名 | 服务 | +| ------------- | ------- | -------------------- | +| 11.101.17.224 | iotdb-1 | ConfigNode、DataNode | +| 11.101.17.225 | iotdb-2 | ConfigNode、DataNode | +| 11.101.17.226 | iotdb-3 | ConfigNode、DataNode | + +### 3.1 设置主机名 + +在3台机器上分别配置主机名,设置主机名需要在目标服务器上配置/etc/hosts,使用如下命令: + +```shell +echo "11.101.17.224 iotdb-1" >> /etc/hosts +echo "11.101.17.225 iotdb-2" >> /etc/hosts +echo "11.101.17.226 iotdb-3" >> /etc/hosts +``` + +### 3.2 参数配置 + +解压安装包并进入安装目录 + +```shell +unzip timechodb-{version}-bin.zip +cd timechodb-{version}-bin +``` + +#### 3.2.1 环境脚本配置 + +- ./conf/confignode-env.sh配置 + +| **配置项** | **说明** | **默认值** | **推荐值** | 备注 | +| :---------- | :------------------------------------- | :--------- | :----------------------------------------------- | :----------- | +| MEMORY_SIZE | IoTDB ConfigNode节点可以使用的内存总量 | 空 | 可按需填写,填写后系统会根据填写的数值来分配内存 | 重启服务生效 | + +- ./conf/datanode-env.sh配置 + +| **配置项** | **说明** | **默认值** | **推荐值** | 备注 | +| :---------- | :----------------------------------- | :--------- | :----------------------------------------------- | :----------- | +| MEMORY_SIZE | IoTDB DataNode节点可以使用的内存总量 | 空 | 可按需填写,填写后系统会根据填写的数值来分配内存 | 重启服务生效 | + +#### 3.2.2 通用配置(./conf/iotdb-system.properties) + +- 集群配置 + +| 配置项 | 说明 | 11.101.17.224 | 11.101.17.225 | 11.101.17.226 | +| ------------------------- | ---------------------------------------- | -------------- | -------------- | -------------- | +| cluster_name | 集群名称 | defaultCluster | defaultCluster | defaultCluster | +| schema_replication_factor | 元数据副本数,DataNode数量不应少于此数目 | 3 | 3 | 3 | +| data_replication_factor | 数据副本数,DataNode数量不应少于此数目 | 2 | 2 | 2 | + +#### 3.2.3 ConfigNode 配置 + +| 配置项 | 说明 | 默认 | 推荐值 | 11.101.17.224 | 11.101.17.225 | 11.101.17.226 | 备注 | +| ------------------- | ------------------------------------------------------------ | --------------- | ------------------------------------------------------- | ------------- | ------------- | ------------- | ------------------ | +| cn_internal_address | ConfigNode在集群内部通讯使用的地址 | 127.0.0.1 | 所在服务器的IPV4地址或hostname,推荐使用hostname | iotdb-1 | iotdb-2 | iotdb-3 | 首次启动后不能修改 | +| cn_internal_port | ConfigNode在集群内部通讯使用的端口 | 10710 | 10710 | 10710 | 10710 | 10710 | 首次启动后不能修改 | +| cn_consensus_port | ConfigNode副本组共识协议通信使用的端口 | 10720 | 10720 | 10720 | 10720 | 10720 | 首次启动后不能修改 | +| cn_seed_config_node | 节点注册加入集群时连接的ConfigNode 的地址,cn_internal_address:cn_internal_port | 127.0.0.1:10710 | 第一个CongfigNode的cn_internal_address:cn_internal_port | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | 首次启动后不能修改 | + +#### 3.2.4 DataNode 配置 + +| 配置项 | 说明 | 默认 | 推荐值 | 11.101.17.224 | 11.101.17.225 | 11.101.17.226 | 备注 | +| ------------------------------- | ------------------------------------------------------------ | --------------- | ------------------------------------------------------- | ------------- | ------------- | ------------- | ------------------ | +| dn_rpc_address | 客户端 RPC 服务的地址 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 0.0.0.0 | 重启服务生效 | +| dn_rpc_port | 客户端 RPC 服务的端口 | 6667 | 6667 | 6667 | 6667 | 6667 | 重启服务生效 | +| dn_internal_address | DataNode在集群内部通讯使用的地址 | 127.0.0.1 | 所在服务器的IPV4地址或hostname,推荐使用hostname | iotdb-1 | iotdb-2 | iotdb-3 | 首次启动后不能修改 | +| dn_internal_port | DataNode在集群内部通信使用的端口 | 10730 | 10730 | 10730 | 10730 | 10730 | 首次启动后不能修改 | +| dn_mpp_data_exchange_port | DataNode用于接收数据流使用的端口 | 10740 | 10740 | 10740 | 10740 | 10740 | 首次启动后不能修改 | +| dn_data_region_consensus_port | DataNode用于数据副本共识协议通信使用的端口 | 10750 | 10750 | 10750 | 10750 | 10750 | 首次启动后不能修改 | +| dn_schema_region_consensus_port | DataNode用于元数据副本共识协议通信使用的端口 | 10760 | 10760 | 10760 | 10760 | 10760 | 首次启动后不能修改 | +| dn_seed_config_node | 节点注册加入集群时连接的ConfigNode地址,即cn_internal_address:cn_internal_port | 127.0.0.1:10710 | 第一个CongfigNode的cn_internal_address:cn_internal_port | iotdb-1:10710 | iotdb-1:10710 | iotdb-1:10710 | 首次启动后不能修改 | + +> ❗️注意:VSCode Remote等编辑器无自动保存配置功能,请确保修改的文件被持久化保存,否则配置项无法生效 + +### 3.3 启动ConfigNode节点 + +先启动第一个iotdb-1的confignode, 保证种子confignode节点先启动,然后依次启动第2和第3个confignode节点 + +```shell +cd sbin +./start-confignode.sh -d #“-d”参数将在后台进行启动 +``` + +如果启动失败,请参考下[常见问题](#常见问题) + +### 3.4 启动DataNode 节点 + + 分别进入iotdb的sbin目录下,依次启动3个datanode节点: + +```shell +cd sbin +./start-datanode.sh -d #-d参数将在后台进行启动 +``` + +### 3.5 验证激活 + +当看到“Result”字段状态显示为success表示激活成功 + +![](https://alioss.timecho.com/docs/img/%E9%9B%86%E7%BE%A4-%E9%AA%8C%E8%AF%81.png) + +## 4 节点维护步骤 + +### 4.1 ConfigNode节点维护 + +ConfigNode节点维护分为ConfigNode添加和移除两种操作,有两个常见使用场景: + +- 集群扩展:如集群中只有1个ConfigNode时,希望增加ConfigNode以提升ConfigNode节点高可用性,则可以添加2个ConfigNode,使得集群中有3个ConfigNode。 +- 集群故障恢复:1个ConfigNode所在机器发生故障,使得该ConfigNode无法正常运行,此时可以移除该ConfigNode,然后添加一个新的ConfigNode进入集群。 + +> ❗️注意,在完成ConfigNode节点维护后,需要保证集群中有1或者3个正常运行的ConfigNode。2个ConfigNode不具备高可用性,超过3个ConfigNode会导致性能损失。 + +#### 4.1.1 添加ConfigNode节点 + +脚本命令: + +```shell +# Linux / MacOS +# 首先切换到IoTDB根目录 +sbin/start-confignode.sh + +# Windows +# 首先切换到IoTDB根目录 +sbin/start-confignode.bat +``` + +#### 4.1.2 移除ConfigNode节点 + +首先通过CLI连接集群,通过`show confignodes`确认想要移除ConfigNode的内部地址与端口号: + +```shell +IoTDB> show confignodes ++------+-------+---------------+------------+--------+ +|NodeID| Status|InternalAddress|InternalPort| Role| ++------+-------+---------------+------------+--------+ +| 0|Running| 127.0.0.1| 10710| Leader| +| 1|Running| 127.0.0.1| 10711|Follower| +| 2|Running| 127.0.0.1| 10712|Follower| ++------+-------+---------------+------------+--------+ +Total line number = 3 +It costs 0.030s +``` + +然后使用脚本将DataNode移除。脚本命令: + +```Bash +# Linux / MacOS +sbin/remove-confignode.sh [confignode_id] +或 +./sbin/remove-confignode.sh [cn_internal_address:cn_internal_port] + +#Windows +sbin/remove-confignode.bat [confignode_id] +或 +./sbin/remove-confignode.bat [cn_internal_address:cn_internal_port] +``` + +### 4.2 DataNode节点维护 + +DataNode节点维护有两个常见场景: + +- 集群扩容:出于集群能力扩容等目的,添加新的DataNode进入集群 +- 集群故障恢复:一个DataNode所在机器出现故障,使得该DataNode无法正常运行,此时可以移除该DataNode,并添加新的DataNode进入集群 + +> ❗️注意,为了使集群能正常工作,在DataNode节点维护过程中以及维护完成后,正常运行的DataNode总数不得少于数据副本数(通常为2),也不得少于元数据副本数(通常为3)。 + +#### 4.2.1 添加DataNode节点 + +脚本命令: + +```Bash +# Linux / MacOS +# 首先切换到IoTDB根目录 +sbin/start-datanode.sh + +#Windows +# 首先切换到IoTDB根目录 +sbin/start-datanode.bat +``` + +说明:在添加DataNode后,随着新的写入到来(以及旧数据过期,如果设置了TTL),集群负载会逐渐向新的DataNode均衡,最终在所有节点上达到存算资源的均衡。 + +#### 4.2.2 移除DataNode节点 + +首先通过CLI连接集群,通过`show datanodes`确认想要移除的DataNode的RPC地址与端口号: + +```Bash +IoTDB> show datanodes ++------+-------+----------+-------+-------------+---------------+ +|NodeID| Status|RpcAddress|RpcPort|DataRegionNum|SchemaRegionNum| ++------+-------+----------+-------+-------------+---------------+ +| 1|Running| 0.0.0.0| 6667| 0| 0| +| 2|Running| 0.0.0.0| 6668| 1| 1| +| 3|Running| 0.0.0.0| 6669| 1| 0| ++------+-------+----------+-------+-------------+---------------+ +Total line number = 3 +It costs 0.110s +``` + +然后使用脚本将DataNode移除。脚本命令: + +```Bash +# Linux / MacOS +sbin/remove-datanode.sh [dn_rpc_address:dn_rpc_port] + +#Windows +sbin/remove-datanode.bat [dn_rpc_address:dn_rpc_port] +``` + +## 5 常见问题 + +1. 部署过程中多次提示激活失败 + - 使用 `ls -al` 命令:使用 `ls -al` 命令检查安装包根目录的所有者信息是否为当前用户。 + - 检查激活目录:检查 `./activation` 目录下的所有文件,所有者信息是否为当前用户。 +2. Confignode节点启动失败 + - 步骤 1: 请查看启动日志,检查是否修改了某些首次启动后不可改的参数。 + - 步骤 2: 请查看启动日志,检查是否出现其他异常。日志中若存在异常现象,请联系天谋技术支持人员咨询解决方案。 + - 步骤 3: 如果是首次部署或者数据可删除,也可按下述步骤清理环境,重新部署后,再次启动。 + - 清理环境: + + 1. 结束所有 ConfigNode 和 DataNode 进程。 + ```Bash + # 1. 停止 ConfigNode 和 DataNode 服务 + sbin/stop-standalone.sh + + # 2. 检查是否还有进程残留 + jps + # 或者 + ps -ef|gerp iotdb + + # 3. 如果有进程残留,则手动kill + kill -9 + # 如果确定机器上仅有1个iotdb,可以使用下面命令清理残留进程 + ps -ef|grep iotdb|grep -v grep|tr -s ' ' ' ' |cut -d ' ' -f2|xargs kill -9 + ``` + + 2. 删除 data 和 logs 目录。 + - 说明:删除 data 目录是必要的,删除 logs 目录是为了纯净日志,非必需。 + ```shell + cd /data/iotdb rm -rf data logs + ``` +## 6 附录 + +### 6.1 Confignode节点参数介绍 + +| 参数 | 描述 | 是否为必填项 | +| :--- | :------------------------------- | :----------- | +| -d | 以守护进程模式启动,即在后台运行 | 否 | + +### 6.2 Datanode节点参数介绍 + +| 缩写 | 描述 | 是否为必填项 | +| :--- | :--------------------------------------------- | :----------- | +| -v | 显示版本信息 | 否 | +| -f | 在前台运行脚本,不将其放到后台 | 否 | +| -d | 以守护进程模式启动,即在后台运行 | 否 | +| -p | 指定一个文件来存放进程ID,用于进程管理 | 否 | +| -c | 指定配置文件夹的路径,脚本会从这里加载配置文件 | 否 | +| -g | 打印垃圾回收(GC)的详细信息 | 否 | +| -H | 指定Java堆转储文件的路径,当JVM内存溢出时使用 | 否 | +| -E | 指定JVM错误日志文件的路径 | 否 | +| -D | 定义系统属性,格式为 key=value | 否 | +| -X | 直接传递 -XX 参数给 JVM | 否 | +| -h | 帮助指令 | 否 | + diff --git a/src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md rename to src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Cluster-Deployment_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Database-Resources.md b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Database-Resources.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Database-Resources.md rename to src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Database-Resources.md diff --git a/src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Docker-Deployment_apache.md b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Docker-Deployment_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Docker-Deployment_apache.md rename to src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Docker-Deployment_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md rename to src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Docker-Deployment_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md rename to src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Dual-Active-Deployment_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Environment-Requirements.md b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Environment-Requirements.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Environment-Requirements.md rename to src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Environment-Requirements.md diff --git a/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/IoTDB-Package_apache.md b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/IoTDB-Package_apache.md new file mode 100644 index 000000000..80e7cb01b --- /dev/null +++ b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/IoTDB-Package_apache.md @@ -0,0 +1,44 @@ + +# 安装包获取 +## 安装包获取方式 + +安装包可直接在Apache IoTDB官网获取:https://iotdb.apache.org/zh/Download/ + +## 安装包结构 + +解压后安装包(`apache-iotdb--all-bin.zip`),安装包解压后目录结构如下: + +| **目录** | **类型** | **说明** | +| ---------------- | -------- | ------------------------------------------------------------ | +| conf | 文件夹 | 配置文件目录,包含 ConfigNode、DataNode、JMX 和 logback 等配置文件 | +| data | 文件夹 | 默认的数据文件目录,包含 ConfigNode 和 DataNode 的数据文件。(启动程序后才会生成该目录) | +| lib | 文件夹 | IoTDB可执行库文件目录 | +| licenses | 文件夹 | 开源社区证书文件目录 | +| logs | 文件夹 | 默认的日志文件目录,包含 ConfigNode 和 DataNode 的日志文件(启动程序后才会生成该目录) | +| sbin | 文件夹 | 主要脚本目录,包含启、停等脚本等 | +| tools | 文件夹 | 系统周边工具目录 | +| ext | 文件夹 | pipe,trigger,udf插件的相关文件(需要使用时用户自行创建) | +| LICENSE | 文件 | 证书 | +| NOTICE | 文件 | 提示 | +| README_ZH\.md | 文件 | markdown格式的中文版说明 | +| README\.md | 文件 | 使用说明 | +| RELEASE_NOTES\.md | 文件 | 版本说明 | \ No newline at end of file diff --git a/src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md rename to src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/IoTDB-Package_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md rename to src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Monitoring-panel-deployment.md diff --git a/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md new file mode 100644 index 000000000..9205ed436 --- /dev/null +++ b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md @@ -0,0 +1,180 @@ + +# 单机版安装部署 + +本章将介绍如何启动IoTDB单机实例,IoTDB单机实例包括 1 个ConfigNode 和1个DataNode(即通常所说的1C1D)。 + +## 1 注意事项 + +1. 安装前请确认系统已参照[系统配置](../Deployment-and-Maintenance/Environment-Requirements.md)准备完成。 +2. 推荐使用`hostname`进行IP配置,可避免后期修改主机ip导致数据库无法启动的问题。设置hostname需要在服务器上配置`/etc/hosts`,如本机ip是192.168.1.3,hostname是iotdb-1,则可以使用以下命令设置服务器的 hostname,并使用hostname配置IoTDB的 `cn_internal_address`、`dn_internal_address`。 + + ```shell + echo "192.168.1.3 iotdb-1" >> /etc/hosts + ``` + +3. 部分参数首次启动后不能修改,请参考下方的[参数配置](#2参数配置)章节进行设置。 +4. 无论是在linux还是windows中,请确保IoTDB的安装路径中不含空格和中文,避免软件运行异常。 +5. 请注意,安装部署(包括激活和使用软件)IoTDB时,您可以: + - 使用 root 用户(推荐):可以避免权限等问题。 + - 使用固定的非 root 用户: + - 使用同一用户操作:确保在启动、激活、停止等操作均保持使用同一用户,不要切换用户。 + - 避免使用 sudo:使用 sudo 命令会以 root 用户权限执行命令,可能会引起权限混淆或安全问题。 +6. 推荐部署监控面板,可以对重要运行指标进行监控,随时掌握数据库运行状态,监控面板可以联系工作人员获取,部署监控面板步骤可以参考:[监控面板部署](../Deployment-and-Maintenance/Monitoring-panel-deployment.md) + +## 2 安装步骤 + +### 2.1 解压安装包并进入安装目录 + +```Plain +unzip timechodb-{version}-bin.zip +cd timechodb-{version}-bin +``` + +### 2.2 参数配置 + +#### 2.2.1 内存配置 + +- conf/confignode-env.sh(或 .bat) + +| **配置项** | **说明** | **默认值** | **推荐值** | 备注 | +| :---------- | :------------------------------------- | :--------- | :----------------------------------------------- | :----------- | +| MEMORY_SIZE | IoTDB ConfigNode节点可以使用的内存总量 | 空 | 可按需填写,填写后系统会根据填写的数值来分配内存 | 重启服务生效 | + +- conf/datanode-env.sh(或 .bat) + +| **配置项** | **说明** | **默认值** | **推荐值** | 备注 | +| :---------- | :----------------------------------- | :--------- | :----------------------------------------------- | :----------- | +| MEMORY_SIZE | IoTDB DataNode节点可以使用的内存总量 | 空 | 可按需填写,填写后系统会根据填写的数值来分配内存 | 重启服务生效 | + +#### 2.2.2 功能配置 + +系统实际生效的参数在文件 conf/iotdb-system.properties 中,启动需设置以下参数,可以从 conf/iotdb-system.properties.template 文件中查看全部参数 + +集群级功能配置 + +| **配置项** | **说明** | **默认值** | **推荐值** | 备注 | +| :------------------------ | :------------------------------- | :------------- | :----------------------------------------------- | :------------------------ | +| cluster_name | 集群名称 | defaultCluster | 可根据需要设置集群名称,如无特殊需要保持默认即可 | 首次启动后不可修改 | +| schema_replication_factor | 元数据副本数,单机版此处设置为 1 | 1 | 1 | 默认1,首次启动后不可修改 | +| data_replication_factor | 数据副本数,单机版此处设置为 1 | 1 | 1 | 默认1,首次启动后不可修改 | + +ConfigNode 配置 + +| **配置项** | **说明** | **默认** | 推荐值 | **备注** | +| :------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------- | :----------------- | +| cn_internal_address | ConfigNode在集群内部通讯使用的地址 | 127.0.0.1 | 所在服务器的IPV4地址或hostname,推荐使用hostname | 首次启动后不能修改 | +| cn_internal_port | ConfigNode在集群内部通讯使用的端口 | 10710 | 10710 | 首次启动后不能修改 | +| cn_consensus_port | ConfigNode副本组共识协议通信使用的端口 | 10720 | 10720 | 首次启动后不能修改 | +| cn_seed_config_node | 节点注册加入集群时连接的ConfigNode 的地址,cn_internal_address:cn_internal_port | 127.0.0.1:10710 | cn_internal_address:cn_internal_port | 首次启动后不能修改 | + +DataNode 配置 + +| **配置项** | **说明** | **默认** | 推荐值 | **备注** | +| :------------------------------ | :----------------------------------------------------------- | :-------------- | :----------------------------------------------- | :----------------- | +| dn_rpc_address | 客户端 RPC 服务的地址 | 0.0.0.0 | 0.0.0.0 | 重启服务生效 | +| dn_rpc_port | 客户端 RPC 服务的端口 | 6667 | 6667 | 重启服务生效 | +| dn_internal_address | DataNode在集群内部通讯使用的地址 | 127.0.0.1 | 所在服务器的IPV4地址或hostname,推荐使用hostname | 首次启动后不能修改 | +| dn_internal_port | DataNode在集群内部通信使用的端口 | 10730 | 10730 | 首次启动后不能修改 | +| dn_mpp_data_exchange_port | DataNode用于接收数据流使用的端口 | 10740 | 10740 | 首次启动后不能修改 | +| dn_data_region_consensus_port | DataNode用于数据副本共识协议通信使用的端口 | 10750 | 10750 | 首次启动后不能修改 | +| dn_schema_region_consensus_port | DataNode用于元数据副本共识协议通信使用的端口 | 10760 | 10760 | 首次启动后不能修改 | +| dn_seed_config_node | 节点注册加入集群时连接的ConfigNode地址,即cn_internal_address:cn_internal_port | 127.0.0.1:10710 | cn_internal_address:cn_internal_port | 首次启动后不能修改 | + +### 2.3 启动 ConfigNode 节点 + +进入iotdb的sbin目录下,启动confignode + +```shell +./sbin/start-confignode.sh -d #“-d”参数将在后台进行启动 +``` + +如果启动失败,请参考下方[常见问题](#常见问题)。 + +### 2.4 启动 DataNode 节点 + + 进入iotdb的sbin目录下,启动datanode: + +```shell +./sbin/start-datanode.sh -d #“-d”参数将在后台进行启动 +``` +### 2.5 验证激活 + +当看到“ClusterActivationStatus”字段状态显示为ACTIVATED表示激活成功 + +![](https://alioss.timecho.com/docs/img/%E5%8D%95%E6%9C%BA-%E9%AA%8C%E8%AF%81.png) + +## 3 常见问题 + +1. 部署过程中多次提示激活失败 + - 使用 `ls -al` 命令:使用 `ls -al` 命令检查安装包根目录的所有者信息是否为当前用户。 + - 检查激活目录:检查 `./activation` 目录下的所有文件,所有者信息是否为当前用户。 +2. Confignode节点启动失败 + - 步骤 1: 请查看启动日志,检查是否修改了某些首次启动后不可改的参数。 + - 步骤 2: 请查看启动日志,检查是否出现其他异常。日志中若存在异常现象,请联系天谋技术支持人员咨询解决方案。 + - 步骤 3: 如果是首次部署或者数据可删除,也可按下述步骤清理环境,重新部署后,再次启动。 + - 清理环境: + 1. 结束所有 ConfigNode 和 DataNode 进程。 + ```Bash + # 1. 停止 ConfigNode 和 DataNode 服务 + sbin/stop-standalone.sh + + # 2. 检查是否还有进程残留 + jps + # 或者 + ps -ef|gerp iotdb + + # 3. 如果有进程残留,则手动kill + kill -9 + # 如果确定机器上仅有1个iotdb,可以使用下面命令清理残留进程 + ps -ef|grep iotdb|grep -v grep|tr -s ' ' ' ' |cut -d ' ' -f2|xargs kill -9 + ``` + + 2. 删除 data 和 logs 目录。 + - 说明:删除 data 目录是必要的,删除 logs 目录是为了纯净日志,非必需。 + ```shell + cd /data/iotdb rm -rf data logs + ``` + +## 4 附录 + +### 4.1 Confignode节点参数介绍 + +| 参数 | 描述 | 是否为必填项 | +| :--- | :------------------------------- | :----------- | +| -d | 以守护进程模式启动,即在后台运行 | 否 | + +### 4.2 Datanode节点参数介绍 + +| 缩写 | 描述 | 是否为必填项 | +| :--- | :--------------------------------------------- | :----------- | +| -v | 显示版本信息 | 否 | +| -f | 在前台运行脚本,不将其放到后台 | 否 | +| -d | 以守护进程模式启动,即在后台运行 | 否 | +| -p | 指定一个文件来存放进程ID,用于进程管理 | 否 | +| -c | 指定配置文件夹的路径,脚本会从这里加载配置文件 | 否 | +| -g | 打印垃圾回收(GC)的详细信息 | 否 | +| -H | 指定Java堆转储文件的路径,当JVM内存溢出时使用 | 否 | +| -E | 指定JVM错误日志文件的路径 | 否 | +| -D | 定义系统属性,格式为 key=value | 否 | +| -X | 直接传递 -XX 参数给 JVM | 否 | +| -h | 帮助指令 | 否 | + diff --git a/src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md b/src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md rename to src/zh/UserGuide/V2.0.1-Table/Deployment-and-Maintenance/Stand-Alone-Deployment_timecho.md diff --git a/src/zh/UserGuide/V2.0.1/Table/IoTDB-Introduction/IoTDB-Introduction_apache.md b/src/zh/UserGuide/V2.0.1-Table/IoTDB-Introduction/IoTDB-Introduction_apache.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/IoTDB-Introduction/IoTDB-Introduction_apache.md rename to src/zh/UserGuide/V2.0.1-Table/IoTDB-Introduction/IoTDB-Introduction_apache.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/IoTDB-Introduction/IoTDB-Introduction_timecho.md b/src/zh/UserGuide/V2.0.1-Table/IoTDB-Introduction/IoTDB-Introduction_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/IoTDB-Introduction/IoTDB-Introduction_timecho.md rename to src/zh/UserGuide/V2.0.1-Table/IoTDB-Introduction/IoTDB-Introduction_timecho.md diff --git a/src/zh/UserGuide/V2.0.1-Table/IoTDB-Introduction/Release-history_apache.md b/src/zh/UserGuide/V2.0.1-Table/IoTDB-Introduction/Release-history_apache.md new file mode 100644 index 000000000..bab81abd0 --- /dev/null +++ b/src/zh/UserGuide/V2.0.1-Table/IoTDB-Introduction/Release-history_apache.md @@ -0,0 +1,162 @@ + +# 发版历史 + +## V1.3.3 + +> 发版时间:2024.11.20 +> + +V1.3.3主要新增 String、Blob、Date、Timestamp 数据类型、增加数据订阅、DataNode 主动监听并加载 TsFile,同时增加可观测性指标、发送端支持传文件至指定目录后,接收端自动加载到 IoTDB、配置文件整合、客户端查询请求负载均衡等功能,对数据库监控、性能、稳定性进行了全方位提升,并修复部分产品 bug 和性能问题。具体发布内容如下: + +- 存储模块:新增 String、Blob、Date、Timestamp 数据类型 +- 存储模块:合并模块内存控制性能提升 +- 查询模块:新增客户端查询请求负载均衡优化 +- 查询模块:新增活跃元数据统计查询 +- 查询模块:Filter 性能优化,提升聚合查询和 where 条件查询的速度 +- 数据同步:发送端支持传文件至指定目录后,接收端自动加载到 IoTDB +- 数据同步:接收端新增数据类型请求的自动转换机制 +- 数据订阅:新增数据订阅能力,支持以数据点或 TsFile 文件方式订阅数据库数据 +- 数据加载:DataNode 主动监听并加载 TsFile,同时增加可观测性指标 +- 流处理:Alter Pipe 支持 Alter Source 的能力 +- 系统模块:优化配置文件,原有配置文件三合一,降低用户操作成本 +- 系统模块:新增配置项设置接口 +- 系统模块:优化重启恢复性能,减少启动时间 +- 脚本与工具:新增元数据导入导出脚本 +- 脚本与工具:新增对 Kubernetes Helm 的支持 + +## V1.3.2 + +> 发版时间:2024.7.1 +> + +V1.3.2主要新增 explain analyze 语句分析单个 SQL 查询耗时、新增 UDAF 用户自定义聚合函数框架、元数据同步、统计指定路径下数据点数、SQL 语句导入导出脚本等功能,同时集群管理工具支持滚动升级、上传插件到整个集群,对数据库监控、性能、稳定性进行了全方位提升,并修复部分产品 bug 和性能问题。具体发布内容如下: + +- 存储模块:insertRecords 接口写入性能提升 +- 查询模块:新增 Explain Analyze 语句(监控单条 SQL 执行各阶段耗时) +- 查询模块:新增 UDAF 用户自定义聚合函数框架 +- 查询模块:新增 MaxBy/MinBy 函数,支持获取最大/小值的同时返回对应时间戳 +- 查询模块:值过滤查询性能提升 +- 数据同步:路径匹配支持通配符 +- 数据同步:支持元数据同步(含时间序列及相关属性、权限等设置) +- 流处理:增加 Alter Pipe 语句,支持热更新 Pipe 任务的插件 +- 系统模块:系统数据点数统计增加对 load TsFile 导入数据的统计 +- 脚本与工具:新增本地升级备份工具(通过硬链接对原有数据进行备份) +- 脚本与工具:新增 export-data/import-data 脚本,支持将数据导出为 CSV、TsFile 格式或 SQL 语句 +- 脚本与工具:Windows 环境支持通过窗口名区分 ConfigNode、DataNode、Cli + +## V1.3.1 + +> 发版时间:2024.4.22 +> + +V1.3.1主要新增一键启停集群脚本、一键收集实例信息脚本、多种内置函数等新特性,优化了原有数据同步、日志输出策略、查询执行过程,提升系统可观测性,并修复部分产品 bug 和性能问题。具体发布内容如下: + +- 增加一键启停集群脚本(start-all/stop-all.sh & start-all/stop-all.bat) +- 增加一键收集实例信息脚本(collect-info.sh & collect-info.bat) +- 新增标准差、方差内置聚合函数 +- 新增 tsfile 修复命令 +- Fill 子句支持设置填充超时阈值,超过时间阈值不填充 +- 数据同步简化时间范围指定方式,直接设置起止时间 +- 系统可观测性提升(增加集群节点的散度监控、分布式任务调度框架可观测性) +- 日志默认输出策略优化 +- Load TsFile 完善内存控制,覆盖全流程 +- Rest 接口(V2 版)增加列类型返回 +- 优化查询执行过程 +- 客户端自动拉取可用 DataNode 列表 + +## V1.3.0 + +> 发版时间:2024.1.1 +> + + +V1.3.0主要新增SSL通讯加密、数据同步监控项统计等新特性,优化了原有权限模块的语法和逻辑、metrics算法库性能、python客户端写入性能以及在部分查询场景下的查询效率,修复部分产品 bug 和性能问题。具体发布内容如下: + +- 安全模块:优化权限模块,支持时间序列粒度的权限控制 +- 安全模块:客户端服务器支持 SSL 通讯加密 +- 查询模块:计算类型视图支持 LAST 查询 +- 流处理:新增 pipe 相关监控指标 +- 存储模块:支持负数时间戳写入 +- 脚本与工具:load 脚本导入数据纳入数据点数监控项统计 +- 客户端模块:优化 python 客户端的性能 +- 查询模块优化 show path 返回时间长的问题 +- 查询模块:优化 explain 语句的展示结果,使展示结果对齐 +- 系统模块:环境配置脚本中增加统一内存配置项 MEMORY_SIZE +- 系统模块:配置项 target_config_node_list 更名为 seed_config_node +- 系统模块:配置项 storage_query_schema_consensus_free_memory_proportion 更名为 datanode_memory_proportion + +## V1.2.0 + +> 发版时间:2023.6.30 +> + + +V1.2.0主要增加了流处理框架、动态模板、substring/replace/round内置查询函数等新特性,增强了show region、show timeseries、show variable等内置语句功能和Session接口,同时优化了内置监控项及其实现,修复部分产品bug和性能问题。 + +- 流处理:新增流处理框架 +- 元数据模块:新增模板动态扩充功能 +- 存储模块:新增SPRINTZ和RLBE编码以及LZMA2压缩算法 +- 查询模块:新增cast、round、substr、replace内置标量函数 +- 查询模块:新增time_duration、mode内置聚合函数 +- 查询模块:SQL语句支持case when语法 +- 查询模块:SQL语句支持order by表达式 +- 接口模块:Python API支持连接分布式多个节点 +- 接口模块:Python客户端支持写入重定向 +- 接口模块:Session API增加用模板批量创建序列接口 + +## V1.1.0 + +> 发版时间:2023-04-03 +> + +V1.1.0主要改进增加了部分新特性,如支持 GROUP BY VARIATION、GROUP BY CONDITION 等分段方式、增加 DIFF、COUNT_IF 等实用函数,引入 pipeline 执行引擎进一步提升查询速度等。同时修复对齐序列 last 查询 order by timeseries、LIMIT&OFFSET 不生效、重启后元数据模版错误、删除所有 database 后创建序列错误等相关问题。 + +- 查询模块:align by device 语句支持 order by time +- 查询模块:支持 Show Queries 命令 +- 查询模块:支持 kill query 命令 +- 系统模块:show regions 支持指定特定的 database +- 系统模块:新增 SQL show variables, 可以展示当前集群参数 +- 查询模块:聚合查询支持 GROUP BY VARIATION +- 查询模块:SELECT INTO 支持特定的数据类型强转 +- 查询模块:实现内置标量函数 DIFF +- 系统模块:show regions 显示创建时间 +- 查询模块:实现内置聚合函数 COUNT_IF +- 查询模块:聚合查询支持 GROUP BY CONDITION +- 系统模块:支持修改 dn_rpc_port 和 dn_rpc_address + +## V1.0.0 + +> 发版时间:2022.12.03 +> + +V1.0.0主要修复分区计算及查询执行时的相关问题,历史快照未删除,数据查询及 SessionPool 内存使用上的相关问题等;同时改进增加部分新特性,如支持 show variables、explain align by device 等命令,完善 ExportCSV/ExportTsFile/MQTT 等功能,完善集群的启停流程、更改 IoTDB 集群默认的内部端口、新增用于区分集群的 cluster_name 属性等。 + +- 系统模块:支持分布式高可用架构 +- 系统模块:支持多副本存储 +- 系统模块:启动节点时,如果端口已被占用,则终止启动流程 +- 系统模块:支持集群管理sql +- 系统模块:支持对Confignode、Datanode进行启动、停止、移除等功能管理 +- 系统模块:可配置共识协议框架及多种共识协议:Simple、IoTConsensus、Ratis +- 系统模块:支持数据、元数据、Confignode的多副本管理 +- 查询模块:支持大规模并行处理框架MPP,提供分布式读写能力 +- 流处理模块:支持流处理框架 +- 流处理模块:支持集群间数据同步功能 \ No newline at end of file diff --git a/src/zh/UserGuide/V2.0.1-Table/IoTDB-Introduction/Release-history_timecho.md b/src/zh/UserGuide/V2.0.1-Table/IoTDB-Introduction/Release-history_timecho.md new file mode 100644 index 000000000..cff39be78 --- /dev/null +++ b/src/zh/UserGuide/V2.0.1-Table/IoTDB-Introduction/Release-history_timecho.md @@ -0,0 +1,219 @@ + +# 发版历史 + +## TimechoDB(数据库内核) +### V1.3.4.1 +> 发版时间:2025.01.08 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.4.1版本新增模式匹配函数、持续优化数据订阅机制,提升稳定性、import-data/export-data 脚本扩展支持新数据类型,import-data/export-data 脚本合并同时兼容 TsFile、CSV 和 SQL 三种类型数据的导入导出等功能,同时对数据库监控、性能、稳定性进行了全方位提升。具体发布内容如下: + +- 查询模块:用户可通过配置项控制 UDF、PipePlugin、Trigger 和 AINode 通过 URI 加载 jar 包 +- 系统模块:UDF 函数拓展,新增 pattern_match 模式匹配函数 +- 数据同步:支持在发送端指定接收端鉴权信息 +- 生态集成:支持 Kubernetes Operator +- 脚本与工具:import-data/export-data 脚本扩展,支持新数据类型(字符串、大二进制对象、日期、时间戳) +- 脚本与工具:import-data/export-data 脚本迭代,同时兼容 TsFile、CSV 和 SQL 三种类型数据的导入导出 + +### V1.3.3.3 + +> 发版时间:2024.10.31 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.3.3版本增加优化重启恢复性能,减少启动时间、DataNode 主动监听并加载 TsFile,同时增加可观测性指标、发送端支持传文件至指定目录后,接收端自动加载到IoTDB、Alter Pipe 支持 Alter Source 的能力等功能,同时对数据库监控、性能、稳定性进行了全方位提升。具体发布内容如下: + +- 数据同步:接收端支持对不一致数据类型的自动转换 +- 数据同步:接收端增强可观测性,支持多个内部接口的 ops/latency 统计 +- 数据同步:opc-ua-sink 插件支持 CS 模式访问和非匿名访问方式 +- 数据订阅: SDK 支持 create if not exists 和 drop if exists 接口 +- 流处理:Alter Pipe 支持 Alter Source 的能力 +- 系统模块:新增 rest 模块的耗时监控 +- 脚本与工具:支持加载自动加载指定目录的TsFile文件 +- 脚本与工具:import-tsfile脚本扩展,支持脚本与iotdb server不在同一服务器运行 +- 脚本与工具:新增对Kubernetes Helm的支持 +- 脚本与工具:Python 客户端支持新数据类型(字符串、大二进制对象、日期、时间戳) + +### V1.3.3.2 + +> 发版时间:2024.8.15 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.3.2版本支持输出读取mods文件的耗时、输入最大顺乱序归并排序内存 以及dispatch 耗时、通过参数配置对时间分区原点的调整、支持根据 pipe 历史数据处理结束标记自动结束订阅,同时合并了模块内存控制性能提升,具体发布内容如下: + +- 查询模块:Explain Analyze 功能支持输出读取mods文件的耗时 +- 查询模块:Explain Analyze 功能支持输入最大顺乱序归并排序内存以及 dispatch 耗时 +- 存储模块:新增合并目标文件拆分功能,增加配置文件参数 +- 系统模块:支持通过参数配置对时间分区原点的调整 +- 流处理:数据订阅支持根据 pipe 历史数据处理结束标记自动结束订阅 +- 数据同步:RPC 压缩支持指定压缩等级 +- 脚本与工具:数据/元数据导出只过滤 root.__system,不对root.__systema 等开头的数据进行过滤 + +### V1.3.3.1 + +> 发版时间:2024.7.12 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.3.1版本多级存储增加限流机制、数据同步支持在发送端 sink 指定接收端使用用户名密码密码鉴权,优化了数据同步接收端一些不明确的WARN日志、重启恢复性能,减少启动时间,同时对脚本内容进行了合并,具体发布内容如下: + +- 查询模块:Filter 性能优化,提升聚合查询和where条件查询的速度 +- 查询模块:Java Session客户端查询 sql 请求均分到所有节点 +- 系统模块:将"iotdb-confignode.properties、iotdb-datanode.properties、iotdb-common.properties"配置文件合并为" iotdb-system.properties" +- 存储模块:多级存储增加限流机制 +- 数据同步:数据同步支持在发送端 sink 指定接收端使用用户名密码密码鉴权 +- 系统模块:优化重启恢复性能,减少启动时间 + +### V1.3.2.2 + +> 发版时间:2024.6.4 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.2.2 版本新增 explain analyze 语句分析单个 SQL 查询耗时、新增 UDAF 用户自定义聚合函数框架、支持磁盘空间到达设置阈值自动删除数据、元数据同步、统计指定路径下数据点数、SQL 语句导入导出脚本等功能,同时集群管理工具支持滚动升级、上传插件到整个集群,同时对数据库监控、性能、稳定性进行了全方位提升。具体发布内容如下: + +- 存储模块:insertRecords 接口写入性能提升 +- 存储模块:新增 SpaceTL 功能,支持磁盘空间到达设置阈值自动删除数据 +- 查询模块:新增 Explain Analyze 语句(监控单条 SQL 执行各阶段耗时) +- 查询模块:新增 UDAF 用户自定义聚合函数框架 +- 查询模块:UDF 新增包络解调分析 +- 查询模块:新增 MaxBy/MinBy 函数,支持获取最大/小值的同时返回对应时间戳 +- 查询模块:值过滤查询性能提升 +- 数据同步:路径匹配支持通配符 +- 数据同步:支持元数据同步(含时间序列及相关属性、权限等设置) +- 流处理:增加 Alter Pipe 语句,支持热更新 Pipe 任务的插件 +- 系统模块:系统数据点数统计增加对 load TsFile 导入数据的统计 +- 脚本与工具:新增本地升级备份工具(通过硬链接对原有数据进行备份) +- 脚本与工具:新增 export-data/import-data 脚本,支持将数据导出为 CSV、TsFile 格式或 SQL 语句 +- 脚本与工具:Windows 环境支持通过窗口名区分 ConfigNode、DataNode、Cli + +### V1.3.1.4 + +> 发版时间:2024.4.23 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.1 版本增加系统激活情况查看、内置方差/标准差聚合函数、内置Fill语句支持超时时间设置、tsfile修复命令等功能,增加一键收集实例信息脚本、一键启停集群等脚本,并对视图、流处理等功能进行优化,提升使用易用度和版本性能。具体发布内容如下: + +- 查询模块:Fill 子句支持设置填充超时阈值,超过时间阈值不填充 +- 查询模块:Rest 接口(V2 版)增加列类型返回 +- 数据同步:数据同步简化时间范围指定方式,直接设置起止时间 +- 数据同步:数据同步支持 SSL 传输协议(iotdb-thrift-ssl-sink 插件) +- 系统模块:支持使用 SQL 查询集群激活信息 +- 系统模块:多级存储增加迁移时传输速率控制 +- 系统模块:系统可观测性提升(增加集群节点的散度监控、分布式任务调度框架可观测性) +- 系统模块:日志默认输出策略优化 +- 脚本与工具:增加一键启停集群脚本(start-all/stop-all.sh & start-all/stop-all.bat) +- 脚本与工具:增加一键收集实例信息脚本(collect-info.sh & collect-info.bat) + +### V1.3.0.4 + +> 发版时间:2024.1.3 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.3.0.4 发布了全新内生机器学习框架 AINode,全面升级权限模块支持序列粒度授予权限,并对视图、流处理等功能进行诸多细节优化,进一步提升了产品的使用易用度,并增强了版本稳定性和各方面性能。具体发布内容如下: + +- 查询模块:新增 AINode 内生机器学习模块 +- 查询模块:优化 show path 语句返回时间长的问题 +- 安全模块:升级权限模块,支持时间序列粒度的权限设置 +- 安全模块:支持客户端与服务器 SSL 通讯加密 +- 流处理:流处理模块新增多种 metrics 监控项 +- 查询模块:非可写视图序列支持 LAST 查询 +- 系统模块:优化数据点监控项统计准确性 + +### V1.2.0.1 + +> 发版时间:2023.6.30 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.2.0.1主要增加了流处理框架、动态模板、substring/replace/round内置查询函数等新特性,增强了show region、show timeseries、show variable等内置语句功能和Session接口,同时优化了内置监控项及其实现,修复部分产品bug和性能问题。 + +- 流处理:新增流处理框架 +- 元数据模块:新增模板动态扩充功能 +- 存储模块:新增SPRINTZ和RLBE编码以及LZMA2压缩算法 +- 查询模块:新增cast、round、substr、replace内置标量函数 +- 查询模块:新增time_duration、mode内置聚合函数 +- 查询模块:SQL语句支持case when语法 +- 查询模块:SQL语句支持order by表达式 +- 接口模块:Python API支持连接分布式多个节点 +- 接口模块:Python客户端支持写入重定向 +- 接口模块:Session API增加用模板批量创建序列接口 + +### V1.1.0.1 + +> 发版时间:2023-04-03 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.1.0.1主要改进增加了部分新特性,如支持 GROUP BY VARIATION、GROUP BY CONDITION 等分段方式、增加 DIFF、COUNT_IF 等实用函数,引入 pipeline 执行引擎进一步提升查询速度等。同时修复对齐序列 last 查询 order by timeseries、LIMIT&OFFSET 不生效、重启后元数据模版错误、删除所有 database 后创建序列错误等相关问题。 + +- 查询模块:align by device 语句支持 order by time +- 查询模块:支持 Show Queries 命令 +- 查询模块:支持 kill query 命令 +- 系统模块:show regions 支持指定特定的 database +- 系统模块:新增 SQL show variables, 可以展示当前集群参数 +- 查询模块:聚合查询支持 GROUP BY VARIATION +- 查询模块:SELECT INTO 支持特定的数据类型强转 +- 查询模块:实现内置标量函数 DIFF +- 系统模块:show regions 显示创建时间 +- 查询模块:实现内置聚合函数 COUNT_IF +- 查询模块:聚合查询支持 GROUP BY CONDITION +- 系统模块:支持修改 dn_rpc_port 和 dn_rpc_address + +### V1.0.0.1 + +> 发版时间:2022.12.03 +> +> 下载地址:请联系天谋工作人员进行下载 + +V1.0.0.1主要修复分区计算及查询执行时的相关问题,历史快照未删除,数据查询及 SessionPool 内存使用上的相关问题等;同时改进增加部分新特性,如支持 show variables、explain align by device 等命令,完善 ExportCSV/ExportTsFile/MQTT 等功能,完善集群的启停流程、更改 IoTDB 集群默认的内部端口、新增用于区分集群的 cluster_name 属性等。 + +- 系统模块:支持分布式高可用架构 +- 系统模块:支持多副本存储 +- 系统模块:启动节点时,如果端口已被占用,则终止启动流程 +- 系统模块:支持集群管理sql +- 系统模块:支持对Confignode、Datanode进行启动、停止、移除等功能管理 +- 系统模块:可配置共识协议框架及多种共识协议:Simple、IoTConsensus、Ratis +- 系统模块:支持数据、元数据、Confignode的多副本管理 +- 查询模块:支持大规模并行处理框架MPP,提供分布式读写能力 +- 流处理模块:支持流处理框架 +- 流处理模块:支持集群间数据同步功能 + +## Workbench(控制台工具) + +| **控制台版本号** | **版本说明** | **可支持IoTDB版本** | +| ---------------- | ------------------------------------------------------------ | ------------------- | +| V1.5.1 | 新增AI分析功能以及模式匹配功能 | V1.3.2及以上版本 | +| V1.4.0 | 新增树模型展示及英文版 | V1.3.2及以上版本 | +| V1.3.1 | 分析功能新增分析方式,优化导入模版等功能 | V1.3.2及以上版本 | +| V1.3.0 | 新增数据库配置功能,优化部分版本细节 | V1.3.2及以上版本 | +| V1.2.6 | 优化各模块权限控制功能 | V1.3.1及以上版本 | +| V1.2.5 | 可视化功能新增“常用模版”概念,所有界面优化补充页面缓存等功能 | V1.3.0及以上版本 | +| V1.2.4 | 计算功能新增“导入、导出”功能,测点列表新增“时间对齐”字段 | V1.2.2及以上版本 | +| V1.2.3 | 首页新增“激活详情”,新增分析等功能 | V1.2.2及以上版本 | +| V1.2.2 | 优化“测点描述”展示内容等功能 | V1.2.2及以上版本 | +| V1.2.1 | 数据同步界面新增“监控面板”,优化Prometheus提示信息 | V1.2.2及以上版本 | +| V1.2.0 | 全新Workbench版本升级 | V1.2.0及以上版本 | \ No newline at end of file diff --git a/src/zh/UserGuide/V2.0.1/Tree/IoTDB-Introduction/Scenario.md b/src/zh/UserGuide/V2.0.1-Table/IoTDB-Introduction/Scenario.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/IoTDB-Introduction/Scenario.md rename to src/zh/UserGuide/V2.0.1-Table/IoTDB-Introduction/Scenario.md diff --git a/src/zh/UserGuide/V2.0.1/Tree/QuickStart/QuickStart.md b/src/zh/UserGuide/V2.0.1-Table/QuickStart/QuickStart.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/QuickStart/QuickStart.md rename to src/zh/UserGuide/V2.0.1-Table/QuickStart/QuickStart.md diff --git a/src/zh/UserGuide/V2.0.1-Table/QuickStart/QuickStart_apache.md b/src/zh/UserGuide/V2.0.1-Table/QuickStart/QuickStart_apache.md new file mode 100644 index 000000000..53491c8d5 --- /dev/null +++ b/src/zh/UserGuide/V2.0.1-Table/QuickStart/QuickStart_apache.md @@ -0,0 +1,77 @@ + + +# 快速上手 + +本篇文档将帮助您了解快速入门 IoTDB 的方法。 + +## 如何安装部署? + +本篇文档将帮助您快速安装部署 IoTDB,您可以通过以下文档的链接快速定位到所需要查看的内容: + +1. 准备所需机器资源:IoTDB 的部署和运行需要考虑多个方面的机器资源配置。具体资源配置可查看 [资源规划](../Deployment-and-Maintenance/Database-Resources.md) + +2. 完成系统配置准备:IoTDB 的系统配置涉及多个方面,关键的系统配置介绍可查看 [系统配置](../Deployment-and-Maintenance/Environment-Requirements.md) + +3. 获取安装包:您可以在[ Apache IoTDB 官网](https://iotdb.apache.org/zh/Download/)获取获取 IoTDB 安装包。具体安装包结构可查看:[安装包获取](../Deployment-and-Maintenance/IoTDB-Package_apache.md) + +4. 安装数据库:您可以根据实际部署架构选择以下教程进行安装部署: + + - 单机版:[单机版](../Deployment-and-Maintenance/Stand-Alone-Deployment_apache.md) + + - 集群版:[集群版](../Deployment-and-Maintenance/Cluster-Deployment_apache.md) + +> ❗️注意:目前我们仍然推荐直接在物理机/虚拟机上安装部署,如需要 docker 部署,可参考:[Docker 部署](../Deployment-and-Maintenance/Docker-Deployment_apache.md) + +## 如何使用? + +1. 数据库建模设计:数据库建模是创建数据库系统的重要步骤,它涉及到设计数据的结构和关系,以确保数据的组织方式能够满足特定应用的需求,下面的文档将会帮助您快速了解 IoTDB 的建模设计: + + - 时序概念介绍:[时序数据模型](../Background-knowledge/Navigating_Time_Series_Data.md) + + - 建模设计介绍:[建模方案设计](../Background-knowledge/Data-Model-and-Terminology.md) + + - 数据库介绍:[数据库管理](../Basic-Concept/Database-Management.md) + + - 表介绍:[表管理](../Basic-Concept/Table-Management.md) + +2. 数据写入&更新:在数据写入&更新方面,IoTDB 提供了多种方式来插入实时数据,基本的数据写入&更新操作请查看 [数据写入&更新](../Basic-Concept/Write-Updata-Data.md) + +3. 数据查询:IoTDB 提供了丰富的数据查询功能,数据查询的基本介绍请查看 [数据查询](../Basic-Concept/Query-Data.md) + +4. 数据删除:IoTDB 提供了两种删除方式,分别为SQL语句删除与过期自动删除(TTL) + + - SQL语句删除:基本介绍请查看 [数据删除](../Basic-Concept/Delete-Data.md) + - 过期自动删除(TTL):基本介绍请查看 [过期自动删除](../Basic-Concept/TTL-Delete-Data.md) + +5. 其他进阶功能:除了数据库常见的写入、查询等功能外,IoTDB 还支持“数据同步”等功能,具体使用方法可参见具体文档: + + - 数据同步:[数据同步](../User-Manual/Data-Sync_apache.md) + +6. 应用编程接口: IoTDB 提供了多种应用编程接口(API),以便于开发者在应用程序中与 IoTDB 进行交互,目前支持[ Java 原生接口](../API/Programming-Java-Native-API.md)、[Python 原生接口](../API/Programming-Python-Native-API.md)、[JDBC](../API/Programming-JDBC.md)等,更多编程接口可参见官网【应用编程接口】其他章节 + +## 想了解更多技术细节? + +如果您想了解 IoTDB 的更多技术内幕,可以移步至下面的文档: + + - 数据分区和负载均衡:IoTDB 基于时序数据特性,精心设计了数据分区策略和负载均衡算法,提升了集群的可用性和性能,想了解更多请查看 [数据分区和负载均衡](../Technical-Insider/Cluster-data-partitioning.md) + + - 压缩&编码:IoTDB 通过多样化的编码和压缩技术,针对不同数据类型优化存储效率,想了解更多请查看 [压缩&编码](../Technical-Insider/Encoding-and-Compression.md) diff --git a/src/zh/UserGuide/V2.0.1/Table/QuickStart/QuickStart_timecho.md b/src/zh/UserGuide/V2.0.1-Table/QuickStart/QuickStart_timecho.md similarity index 93% rename from src/zh/UserGuide/V2.0.1/Table/QuickStart/QuickStart_timecho.md rename to src/zh/UserGuide/V2.0.1-Table/QuickStart/QuickStart_timecho.md index f030bca42..28912cd34 100644 --- a/src/zh/UserGuide/V2.0.1/Table/QuickStart/QuickStart_timecho.md +++ b/src/zh/UserGuide/V2.0.1-Table/QuickStart/QuickStart_timecho.md @@ -52,11 +52,13 @@ 1. 数据库建模设计:数据库建模是创建数据库系统的重要步骤,它涉及到设计数据的结构和关系,以确保数据的组织方式能够满足特定应用的需求,下面的文档将会帮助您快速了解 IoTDB 的建模设计: - - 时序概念介绍:[时序数据模型](../Basic-Concept/Navigating_Time_Series_Data.md) + - 时序概念介绍:[时序数据模型](../Background-knowledge/Navigating_Time_Series_Data.md) - - 建模设计介绍:[建模方案设计](../Basic-Concept/Data-Model-and-Terminology.md) + - 建模设计介绍:[建模方案设计](../Background-knowledge/Data-Model-and-Terminology.md) - - 数据库&表介绍:[数据库&表管理](../Basic-Concept/Database&Table-Management.md) + - 数据库介绍:[数据库管理](../Basic-Concept/Database-Management.md) + + - 表介绍:[表管理](../Basic-Concept/Table-Management.md) 2. 数据写入&更新:在数据写入&更新方面,IoTDB 提供了多种方式来插入实时数据,基本的数据写入&更新操作请查看 [数据写入&更新](../Basic-Concept/Write-Updata-Data.md) diff --git a/src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Sample-Data.md b/src/zh/UserGuide/V2.0.1-Table/Reference/Sample-Data.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Basic-Concept/Sample-Data.md rename to src/zh/UserGuide/V2.0.1-Table/Reference/Sample-Data.md diff --git a/src/zh/UserGuide/V2.0.1-Table/Reference/Status-Codes.md b/src/zh/UserGuide/V2.0.1-Table/Reference/Status-Codes.md new file mode 100644 index 000000000..d941f7862 --- /dev/null +++ b/src/zh/UserGuide/V2.0.1-Table/Reference/Status-Codes.md @@ -0,0 +1,178 @@ + + +# 状态码 + +IoTDB 引入了**状态码**这一概念。例如,因为 IoTDB 需要在写入数据之前首先注册时间序列,一种可能的解决方案是: + +``` +try { + writeData(); +} catch (SQLException e) { + // the most case is that the time series does not exist + if (e.getMessage().contains("exist")) { + //However, using the content of the error message is not so efficient + registerTimeSeries(); + //write data once again + writeData(); + } +} + +``` + +利用状态码,我们就可以不必写诸如`if (e.getErrorMessage().contains("exist"))`的代码, +只需要使用`e.getStatusType().getCode() == TSStatusCode.TIME_SERIES_NOT_EXIST_ERROR.getStatusCode()`。 + +这里是状态码和相对应信息的列表: + +| 状态码 | 状态类型 | 状态信息 | +|:-----|:---------------------------------------|:--------------------------| +| 200 | SUCCESS_STATUS | 成功状态 | +| 201 | INCOMPATIBLE_VERSION | 版本不兼容 | +| 202 | CONFIGURATION_ERROR | 配置文件有错误项 | +| 203 | START_UP_ERROR | 启动错误 | +| 204 | SHUT_DOWN_ERROR | 关机错误 | +| 300 | UNSUPPORTED_OPERATION | 不支持的操作 | +| 301 | EXECUTE_STATEMENT_ERROR | 执行语句错误 | +| 302 | MULTIPLE_ERROR | 多行语句执行错误 | +| 303 | ILLEGAL_PARAMETER | 参数错误 | +| 304 | OVERLAP_WITH_EXISTING_TASK | 与正在执行的其他操作冲突 | +| 305 | INTERNAL_SERVER_ERROR | 服务器内部错误 | +| 306 | DISPATCH_ERROR | 分发错误 | +| 400 | REDIRECTION_RECOMMEND | 推荐客户端重定向 | +| 500 | DATABASE_NOT_EXIST | 数据库不存在 | +| 501 | DATABASE_ALREADY_EXISTS | 数据库已存在 | +| 502 | SERIES_OVERFLOW | 序列数量超过阈值 | +| 503 | TIMESERIES_ALREADY_EXIST | 时间序列已存在 | +| 504 | TIMESERIES_IN_BLACK_LIST | 时间序列正在删除 | +| 505 | ALIAS_ALREADY_EXIST | 路径别名已经存在 | +| 506 | PATH_ALREADY_EXIST | 路径已经存在 | +| 507 | METADATA_ERROR | 处理元数据错误 | +| 508 | PATH_NOT_EXIST | 路径不存在 | +| 509 | ILLEGAL_PATH | 路径不合法 | +| 510 | CREATE_TEMPLATE_ERROR | 创建物理量模板失败 | +| 511 | DUPLICATED_TEMPLATE | 元数据模板重复 | +| 512 | UNDEFINED_TEMPLATE | 元数据模板未定义 | +| 513 | TEMPLATE_NOT_SET | 元数据模板未设置 | +| 514 | DIFFERENT_TEMPLATE | 元数据模板不一致 | +| 515 | TEMPLATE_IS_IN_USE | 元数据模板正在使用 | +| 516 | TEMPLATE_INCOMPATIBLE | 元数据模板不兼容 | +| 517 | SEGMENT_NOT_FOUND | 未找到 Segment | +| 518 | PAGE_OUT_OF_SPACE | PBTreeFile 中 Page 空间不够 | +| 519 | RECORD_DUPLICATED | 记录重复 | +| 520 | SEGMENT_OUT_OF_SPACE | PBTreeFile 中 segment 空间不够 | +| 521 | PBTREE_FILE_NOT_EXISTS | PBTreeFile 不存在 | +| 522 | OVERSIZE_RECORD | 记录大小超过元数据文件页面大小 | +| 523 | PBTREE_FILE_REDO_LOG_BROKEN | PBTreeFile 的 redo 日志损坏 | +| 524 | TEMPLATE_NOT_ACTIVATED | 元数据模板未激活 | +| 526 | SCHEMA_QUOTA_EXCEEDED | 集群元数据超过配额上限 | +| 527 | MEASUREMENT_ALREADY_EXISTS_IN_TEMPLATE | 元数据模板中已存在物理量 | +| 600 | SYSTEM_READ_ONLY | IoTDB 系统只读 | +| 601 | STORAGE_ENGINE_ERROR | 存储引擎相关错误 | +| 602 | STORAGE_ENGINE_NOT_READY | 存储引擎还在恢复中,还不能接受读写操作 | +| 603 | DATAREGION_PROCESS_ERROR | DataRegion 相关错误 | +| 604 | TSFILE_PROCESSOR_ERROR | TsFile 处理器相关错误 | +| 605 | WRITE_PROCESS_ERROR | 写入相关错误 | +| 606 | WRITE_PROCESS_REJECT | 写入拒绝错误 | +| 607 | OUT_OF_TTL | 插入时间少于 TTL 时间边界 | +| 608 | COMPACTION_ERROR | 合并错误 | +| 609 | ALIGNED_TIMESERIES_ERROR | 对齐时间序列错误 | +| 610 | WAL_ERROR | WAL 异常 | +| 611 | DISK_SPACE_INSUFFICIENT | 磁盘空间不足 | +| 700 | SQL_PARSE_ERROR | SQL 语句分析错误 | +| 701 | SEMANTIC_ERROR | SQL 语义错误 | +| 702 | GENERATE_TIME_ZONE_ERROR | 生成时区错误 | +| 703 | SET_TIME_ZONE_ERROR | 设置时区错误 | +| 704 | QUERY_NOT_ALLOWED | 查询语句不允许 | +| 705 | LOGICAL_OPERATOR_ERROR | 逻辑符相关错误 | +| 706 | LOGICAL_OPTIMIZE_ERROR | 逻辑优化相关错误 | +| 707 | UNSUPPORTED_FILL_TYPE | 不支持的填充类型 | +| 708 | QUERY_PROCESS_ERROR | 查询处理相关错误 | +| 709 | MPP_MEMORY_NOT_ENOUGH | MPP 框架中任务执行内存不足 | +| 710 | CLOSE_OPERATION_ERROR | 关闭操作错误 | +| 711 | TSBLOCK_SERIALIZE_ERROR | TsBlock 序列化错误 | +| 712 | INTERNAL_REQUEST_TIME_OUT | MPP 操作超时 | +| 713 | INTERNAL_REQUEST_RETRY_ERROR | 内部操作重试失败 | +| 714 | NO_SUCH_QUERY | 查询不存在 | +| 715 | QUERY_WAS_KILLED | 查询执行时被终止 | +| 800 | UNINITIALIZED_AUTH_ERROR | 授权模块未初始化 | +| 801 | WRONG_LOGIN_PASSWORD | 用户名或密码错误 | +| 802 | NOT_LOGIN | 没有登录 | +| 803 | NO_PERMISSION | 没有操作权限 | +| 804 | USER_NOT_EXIST | 用户不存在 | +| 805 | USER_ALREADY_EXIST | 用户已存在 | +| 806 | USER_ALREADY_HAS_ROLE | 用户拥有对应角色 | +| 807 | USER_NOT_HAS_ROLE | 用户未拥有对应角色 | +| 808 | ROLE_NOT_EXIST | 角色不存在 | +| 809 | ROLE_ALREADY_EXIST | 角色已存在 | +| 810 | ALREADY_HAS_PRIVILEGE | 已拥有对应权限 | +| 811 | NOT_HAS_PRIVILEGE | 未拥有对应权限 | +| 812 | CLEAR_PERMISSION_CACHE_ERROR | 清空权限缓存失败 | +| 813 | UNKNOWN_AUTH_PRIVILEGE | 未知权限 | +| 814 | UNSUPPORTED_AUTH_OPERATION | 不支持的权限操作 | +| 815 | AUTH_IO_EXCEPTION | 权限模块IO异常 | +| 900 | MIGRATE_REGION_ERROR | Region 迁移失败 | +| 901 | CREATE_REGION_ERROR | 创建 region 失败 | +| 902 | DELETE_REGION_ERROR | 删除 region 失败 | +| 903 | PARTITION_CACHE_UPDATE_ERROR | 更新分区缓存失败 | +| 904 | CONSENSUS_NOT_INITIALIZED | 共识层未初始化,不能提供服务 | +| 905 | REGION_LEADER_CHANGE_ERROR | Region leader 迁移失败 | +| 906 | NO_AVAILABLE_REGION_GROUP | 无法找到可用的 Region 副本组 | +| 907 | LACK_DATA_PARTITION_ALLOCATION | 调用创建数据分区方法的返回结果里缺少信息 | +| 1000 | DATANODE_ALREADY_REGISTERED | DataNode 在集群中已经注册 | +| 1001 | NO_ENOUGH_DATANODE | DataNode 数量不足,无法移除节点或创建副本 | +| 1002 | ADD_CONFIGNODE_ERROR | 新增 ConfigNode 失败 | +| 1003 | REMOVE_CONFIGNODE_ERROR | 移除 ConfigNode 失败 | +| 1004 | DATANODE_NOT_EXIST | 此 DataNode 不存在 | +| 1005 | DATANODE_STOP_ERROR | DataNode 关闭失败 | +| 1006 | REMOVE_DATANODE_ERROR | 移除 datanode 失败 | +| 1007 | REGISTER_DATANODE_WITH_WRONG_ID | 注册的 DataNode 中有错误的注册id | +| 1008 | CAN_NOT_CONNECT_DATANODE | 连接 DataNode 失败 | +| 1100 | LOAD_FILE_ERROR | 加载文件错误 | +| 1101 | LOAD_PIECE_OF_TSFILE_ERROR | 加载 TsFile 片段异常 | +| 1102 | DESERIALIZE_PIECE_OF_TSFILE_ERROR | 反序列化 TsFile 片段异常 | +| 1103 | SYNC_CONNECTION_ERROR | 同步连接错误 | +| 1104 | SYNC_FILE_REDIRECTION_ERROR | 同步文件时重定向异常 | +| 1105 | SYNC_FILE_ERROR | 同步文件异常 | +| 1106 | CREATE_PIPE_SINK_ERROR | 创建 PIPE Sink 失败 | +| 1107 | PIPE_ERROR | PIPE 异常 | +| 1108 | PIPESERVER_ERROR | PIPE server 异常 | +| 1109 | VERIFY_METADATA_ERROR | 校验元数据失败 | +| 1200 | UDF_LOAD_CLASS_ERROR | UDF 加载类异常 | +| 1201 | UDF_DOWNLOAD_ERROR | 无法从 ConfigNode 下载 UDF | +| 1202 | CREATE_UDF_ON_DATANODE_ERROR | 在 DataNode 创建 UDF 失败 | +| 1203 | DROP_UDF_ON_DATANODE_ERROR | 在 DataNode 卸载 UDF 失败 | +| 1300 | CREATE_TRIGGER_ERROR | ConfigNode 创建 Trigger 失败 | +| 1301 | DROP_TRIGGER_ERROR | ConfigNode 删除 Trigger 失败 | +| 1302 | TRIGGER_FIRE_ERROR | 触发器执行错误 | +| 1303 | TRIGGER_LOAD_CLASS_ERROR | 触发器加载类异常 | +| 1304 | TRIGGER_DOWNLOAD_ERROR | 从 ConfigNode 下载触发器异常 | +| 1305 | CREATE_TRIGGER_INSTANCE_ERROR | 创建触发器实例异常 | +| 1306 | ACTIVE_TRIGGER_INSTANCE_ERROR | 激活触发器实例异常 | +| 1307 | DROP_TRIGGER_INSTANCE_ERROR | 删除触发器实例异常 | +| 1308 | UPDATE_TRIGGER_LOCATION_ERROR | 更新有状态的触发器所在 DataNode 异常 | +| 1400 | NO_SUCH_CQ | CQ 任务不存在 | +| 1401 | CQ_ALREADY_ACTIVE | CQ 任务已激活 | +| 1402 | CQ_AlREADY_EXIST | CQ 任务已存在 | +| 1403 | CQ_UPDATE_LAST_EXEC_TIME_ERROR | CQ 更新上一次执行时间失败 | + +> 在最新版本中,我们重构了 IoTDB 的异常类。通过将错误信息统一提取到异常类中,并为所有异常添加不同的错误代码,从而当捕获到异常并引发更高级别的异常时,错误代码将保留并传递,以便用户了解详细的错误原因。 +除此之外,我们添加了一个基础异常类“ProcessException”,由所有异常扩展。 diff --git a/src/zh/UserGuide/V2.0.1/Table/Reference/System-Config-Manual.md b/src/zh/UserGuide/V2.0.1-Table/Reference/System-Config-Manual.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Reference/System-Config-Manual.md rename to src/zh/UserGuide/V2.0.1-Table/Reference/System-Config-Manual.md diff --git a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Fill-Clause.md b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Fill-Clause.md similarity index 98% rename from src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Fill-Clause.md rename to src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Fill-Clause.md index e8797f836..b61f74040 100644 --- a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Fill-Clause.md +++ b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Fill-Clause.md @@ -89,7 +89,7 @@ IoTDB 支持以下三种空值填充方式: ## 3 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 ### 3.1 PREVIOUS 填充: diff --git a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/From-Join-Clause.md b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/From-Join-Clause.md similarity index 98% rename from src/zh/UserGuide/V2.0.1/Table/SQL-Manual/From-Join-Clause.md rename to src/zh/UserGuide/V2.0.1-Table/SQL-Manual/From-Join-Clause.md index 47367651f..3492401ac 100644 --- a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/From-Join-Clause.md +++ b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/From-Join-Clause.md @@ -117,7 +117,7 @@ SELECT selectExpr [, selectExpr] ... FROM [, ] ... [WHE ## 4 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 ### 4.1 From 示例 diff --git a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/GroupBy-Clause.md b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/GroupBy-Clause.md similarity index 96% rename from src/zh/UserGuide/V2.0.1/Table/SQL-Manual/GroupBy-Clause.md rename to src/zh/UserGuide/V2.0.1-Table/SQL-Manual/GroupBy-Clause.md index c36c93840..f253b3d87 100644 --- a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/GroupBy-Clause.md +++ b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/GroupBy-Clause.md @@ -195,7 +195,7 @@ It costs 0.047s ## 3 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 #### 示例 1:降采样时间序列数据 diff --git a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Having-Clause.md b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Having-Clause.md similarity index 90% rename from src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Having-Clause.md rename to src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Having-Clause.md index d02911d4a..98412d6b2 100644 --- a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Having-Clause.md +++ b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Having-Clause.md @@ -37,7 +37,7 @@ HAVING booleanExpression ## 2 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 #### 示例 1:过滤计数低于特定值的设备 diff --git a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Identifier.md b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Identifier.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Identifier.md rename to src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Identifier.md diff --git a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Keywords.md b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Keywords.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Keywords.md rename to src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Keywords.md diff --git a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Limit-Offset-Clause.md b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Limit-Offset-Clause.md similarity index 95% rename from src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Limit-Offset-Clause.md rename to src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Limit-Offset-Clause.md index 4a18e7632..30d950193 100644 --- a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Limit-Offset-Clause.md +++ b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Limit-Offset-Clause.md @@ -48,7 +48,7 @@ OFFSET 子句与 LIMIT 子句配合使用,用于指定查询结果跳过前 OF ## 2 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 #### 示例 1:查询设备的最新行 diff --git a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/OrderBy-Clause.md b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/OrderBy-Clause.md similarity index 96% rename from src/zh/UserGuide/V2.0.1/Table/SQL-Manual/OrderBy-Clause.md rename to src/zh/UserGuide/V2.0.1-Table/SQL-Manual/OrderBy-Clause.md index f5be7a4f8..aec9e0dfb 100644 --- a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/OrderBy-Clause.md +++ b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/OrderBy-Clause.md @@ -40,7 +40,7 @@ sortItem ## 2 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 #### 示例 1: 按时间降序查询过去一小时的数据 diff --git a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Select-Clause.md b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Select-Clause.md similarity index 97% rename from src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Select-Clause.md rename to src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Select-Clause.md index ebf97c93a..78635e7da 100644 --- a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Select-Clause.md +++ b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Select-Clause.md @@ -45,7 +45,7 @@ selectItem ## 3 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 ### 3.1 选择列表 diff --git a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Where-Clause.md b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Where-Clause.md similarity index 95% rename from src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Where-Clause.md rename to src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Where-Clause.md index 82c4d2cc4..58c56b34b 100644 --- a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/Where-Clause.md +++ b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/Where-Clause.md @@ -31,7 +31,7 @@ __WHERE 子句__:用于在 SQL 查询中指定筛选条件,WHERE 子句在 FRO ## 2 示例数据 -在[示例数据页面](../Basic-Concept/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 +在[示例数据页面](../Reference/Sample-Data.md)中,包含了用于构建表结构和插入数据的SQL语句,下载并在IoTDB CLI中执行这些语句,即可将数据导入IoTDB,您可以使用这些数据来测试和执行示例中的SQL语句,并获得相应的结果。 #### 示例1:选择特定 ID 的行 diff --git a/src/zh/UserGuide/V2.0.1/Table/SQL-Manual/overview.md b/src/zh/UserGuide/V2.0.1-Table/SQL-Manual/overview.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/SQL-Manual/overview.md rename to src/zh/UserGuide/V2.0.1-Table/SQL-Manual/overview.md diff --git a/src/zh/UserGuide/V2.0.1/Table/Technical-Insider/Cluster-data-partitioning.md b/src/zh/UserGuide/V2.0.1-Table/Technical-Insider/Cluster-data-partitioning.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Technical-Insider/Cluster-data-partitioning.md rename to src/zh/UserGuide/V2.0.1-Table/Technical-Insider/Cluster-data-partitioning.md diff --git a/src/zh/UserGuide/V2.0.1/Table/Technical-Insider/Encoding-and-Compression.md b/src/zh/UserGuide/V2.0.1-Table/Technical-Insider/Encoding-and-Compression.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Technical-Insider/Encoding-and-Compression.md rename to src/zh/UserGuide/V2.0.1-Table/Technical-Insider/Encoding-and-Compression.md diff --git a/src/zh/UserGuide/V2.0.1/Table/Tools-System/CLI.md b/src/zh/UserGuide/V2.0.1-Table/Tools-System/CLI.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/Tools-System/CLI.md rename to src/zh/UserGuide/V2.0.1-Table/Tools-System/CLI.md diff --git a/src/zh/UserGuide/V2.0.1-Table/User-Manual/Data-Sync_apache.md b/src/zh/UserGuide/V2.0.1-Table/User-Manual/Data-Sync_apache.md new file mode 100644 index 000000000..cbad5d698 --- /dev/null +++ b/src/zh/UserGuide/V2.0.1-Table/User-Manual/Data-Sync_apache.md @@ -0,0 +1,512 @@ + + +# 数据同步 +数据同步是工业物联网的典型需求,通过数据同步机制,可实现 IoTDB 之间的数据共享,搭建完整的数据链路来满足内网外网数据互通、端边云同步、数据迁移、数据备份等需求。 + +## 1 功能概述 + +### 1.1 数据同步 + +一个数据同步任务包含 3 个阶段: + +![](https://alioss.timecho.com/docs/img/dataSync01.png) + +- 抽取(Source)阶段:该部分用于从源 IoTDB 抽取数据,在 SQL 语句中的 source 部分定义 +- 处理(Process)阶段:该部分用于处理从源 IoTDB 抽取出的数据,在 SQL 语句中的 processor 部分定义 +- 发送(Sink)阶段:该部分用于向目标 IoTDB 发送数据,在 SQL 语句中的 sink 部分定义 + +通过 SQL 语句声明式地配置 3 个部分的具体内容,可实现灵活的数据同步能力。 + +### 1.2 功能限制及说明 + +- 支持 1.x 系列版本 IoTDB 数据同步到 2.x 以及以上系列版本版本的 IoTDB。 +- 不支持 2.x 系列版本 IoTDB 数据同步到 1.x 系列版本版本的 IoTDB。 +- 在进行数据同步任务时,请避免执行任何删除操作,防止两端状态不一致。 + +## 2 使用说明 + +数据同步任务有三种状态:RUNNING、STOPPED 和 DROPPED。任务状态转换如下图所示: + +![](https://alioss.timecho.com/docs/img/Data-Sync01.png) + +创建后任务会直接启动,同时当任务发生异常停止后,系统会自动尝试重启任务。 + +提供以下 SQL 语句对同步任务进行状态管理。 + +### 2.1 创建任务 + +使用 `CREATE PIPE` 语句来创建一条数据同步任务,下列属性中`PipeId`和`sink`必填,`source`和`processor`为选填项,输入 SQL 时注意 `SOURCE`与 `SINK` 插件顺序不能替换。 + +SQL 示例如下: + +```SQL +CREATE PIPE [IF NOT EXISTS] -- PipeId 是能够唯一标定任务的名字 +-- 数据抽取插件,可选插件 +WITH SOURCE ( + [ = ,], +) +-- 数据处理插件,可选插件 +WITH PROCESSOR ( + [ = ,], +) +-- 数据连接插件,必填插件 +WITH SINK ( + [ = ,], +) +``` + +**IF NOT EXISTS 语义**:用于创建操作中,确保当指定 Pipe 不存在时,执行创建命令,防止因尝试创建已存在的 Pipe 而导致报错。 + +### 2.2 开始任务 + +创建之后,任务直接进入运行状态,不需要执行启动任务。当使用`STOP PIPE`语句停止任务时需手动使用`START PIPE`语句来启动任务,PIPE 发生异常情况停止后会自动重新启动任务,从而开始处理数据: + +```SQL +START PIPE +``` + +### 2.3 停止任务 + +停止处理数据: + +```SQL +STOP PIPE +``` + +### 2.4 删除任务 + +删除指定任务: + +```SQL +DROP PIPE [IF EXISTS] +``` + +**IF EXISTS 语义**:用于删除操作中,确保当指定 Pipe 存在时,执行删除命令,防止因尝试删除不存在的 Pipe 而导致报错。 + +删除任务不需要先停止同步任务。 + +### 2.5 查看任务 + +查看全部任务: + +```SQL +SHOW PIPES +``` + +查看指定任务: + +```SQL +SHOW PIPE +``` + + pipe 的 show pipes 结果示例: + +```SQL ++--------------------------------+-----------------------+-------+----------+-------------+-----------------------------------------------------------+----------------+-------------------+-------------------------+ +| ID| CreationTime| State|PipeSource|PipeProcessor| PipeSink|ExceptionMessage|RemainingEventCount|EstimatedRemainingSeconds| ++--------------------------------+-----------------------+-------+----------+-------------+-----------------------------------------------------------+----------------+-------------------+-------------------------+ +|59abf95db892428b9d01c5fa318014ea|2024-06-17T14:03:44.189|RUNNING| {}| {}|{sink=iotdb-thrift-sink, sink.ip=127.0.0.1, sink.port=6668}| | 128| 1.03| ++--------------------------------+-----------------------+-------+----------+-------------+-----------------------------------------------------------+----------------+-------------------+-------------------------+ +``` + +其中各列含义如下: + +- **ID**:同步任务的唯一标识符 +- **CreationTime**:同步任务的创建的时间 +- **State**:同步任务的状态 +- **PipeSource**:同步数据流的来源 +- **PipeProcessor**:同步数据流在传输过程中的处理逻辑 +- **PipeSink**:同步数据流的目的地 +- **ExceptionMessage**:显示同步任务的异常信息 +- **RemainingEventCount(统计存在延迟)**:剩余 event 数,当前数据同步任务中的所有 event 总数,包括数据同步的 event,以及系统和用户自定义的 event。 +- **EstimatedRemainingSeconds(统计存在延迟)**:剩余时间,基于当前 event 个数和 pipe 处速率,预估完成传输的剩余时间。 + +### 同步插件 + +为了使得整体架构更加灵活以匹配不同的同步场景需求,我们支持在同步任务框架中进行插件组装。系统为您预置了一些常用插件可直接使用,同时您也可以自定义 processor 插件 和 Sink 插件,并加载至 IoTDB 系统进行使用。查看系统中的插件(含自定义与内置插件)可以用以下语句: + +```SQL +SHOW PIPEPLUGINS +``` + +返回结果如下: + +```SQL +IoTDB> SHOW PIPEPLUGINS ++------------------------------+----------+--------------------------------------------------------------------------------------------------+----------------------------------------------------+ +| PluginName|PluginType| ClassName| PluginJar| ++------------------------------+----------+--------------------------------------------------------------------------------------------------+----------------------------------------------------+ +| DO-NOTHING-PROCESSOR| Builtin| org.apache.iotdb.commons.pipe.plugin.builtin.processor.donothing.DoNothingProcessor| | +| DO-NOTHING-SINK| Builtin| org.apache.iotdb.commons.pipe.plugin.builtin.connector.donothing.DoNothingConnector| | +| IOTDB-SOURCE| Builtin| org.apache.iotdb.commons.pipe.plugin.builtin.extractor.iotdb.IoTDBExtractor| | +| IOTDB-THRIFT-SINK| Builtin| org.apache.iotdb.commons.pipe.plugin.builtin.connector.iotdb.thrift.IoTDBThriftConnector| | +| IOTDB-THRIFT-SSL-SINK| Builtin| org.apache.iotdb.commons.pipe.plugin.builtin.connector.iotdb.thrift.IoTDBThriftSslConnector| | ++------------------------------+----------+--------------------------------------------------------------------------------------------------+----------------------------------------------------+ + +``` + +预置插件详细介绍如下(各插件的详细参数可参考本文[参数说明](#参考参数说明)): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
类型自定义插件插件名称介绍
source 插件不支持iotdb-source默认的 extractor 插件,用于抽取 IoTDB 历史或实时数据
processor 插件支持do-nothing-processor默认的 processor 插件,不对传入的数据做任何的处理
sink 插件支持do-nothing-sink不对发送出的数据做任何的处理
iotdb-thrift-sink默认的 sink 插件,用于 IoTDB 到 IoTDB(V2.0.0 及以上)之间的数据传输。使用 Thrift RPC 框架传输数据,多线程 async non-blocking IO 模型,传输性能高,尤其适用于目标端为分布式时的场景
iotdb-thrift-ssl-sink用于 IoTDB 与 IoTDB(V2.0.0 及以上)之间的数据传输。使用 Thrift RPC 框架传输数据,多线程 sync blocking IO 模型,适用于安全需求较高的场景
+ + +## 3 使用示例 + +### 3.1 全量数据同步 + +本例子用来演示将一个 IoTDB 的所有数据同步至另一个 IoTDB,数据链路如下图所示: + +![](https://alioss.timecho.com/docs/img/%E6%95%B0%E6%8D%AE%E5%90%8C%E6%AD%A51.png) + +在这个例子中,我们可以创建一个名为 A2B 的同步任务,用来同步 A IoTDB 到 B IoTDB 间的全量数据,这里需要用到用到 sink 的 iotdb-thrift-sink 插件(内置插件),需通过 node-urls 配置目标端 IoTDB 中 DataNode 节点的数据服务端口的 url,如下面的示例语句: + +```SQL +create pipe A2B +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6668', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +### 3.2 部分数据同步 + +本例子用来演示同步某个历史时间范围( 2023 年 8 月 23 日 8 点到 2023 年 10 月 23 日 8 点)的数据至另一个 IoTDB,数据链路如下图所示: + +![](https://alioss.timecho.com/docs/img/%E6%95%B0%E6%8D%AE%E5%90%8C%E6%AD%A51.png) + +在这个例子中,我们可以创建一个名为 A2B 的同步任务。首先我们需要在 source 中定义传输数据的范围,由于传输的是历史数据(历史数据是指同步任务创建之前存在的数据),需要配置数据的起止时间 start-time 和 end-time 以及传输的模式 mode.streaming。通过 node-urls 配置目标端 IoTDB 中 DataNode 节点的数据服务端口的 url。 + +详细语句如下: + +```SQL +create pipe A2B +WITH SOURCE ( + 'source'= 'iotdb-source', + 'mode.streaming' = 'true' -- 新插入数据(pipe创建后)的抽取模式:是否按流式抽取(false 时为批式) + 'start-time' = '2023.08.23T08:00:00+00:00', -- 同步所有数据的开始 event time,包含 start-time + 'end-time' = '2023.10.23T08:00:00+00:00' -- 同步所有数据的结束 event time,包含 end-time +) +with SINK ( + 'sink'='iotdb-thrift-async-sink', + 'node-urls' = '127.0.0.1:6668', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +### 3.3 双向数据传输 + +本例子用来演示两个 IoTDB 之间互为双活的场景,数据链路如下图所示: + +![](https://alioss.timecho.com/docs/img/1706698592139.jpg) + +在这个例子中,为了避免数据无限循环,需要将 A 和 B 上的参数`source.mode.double-living` 均设置为 `true`,表示不转发从另一 pipe 传输而来的数据。 + +详细语句如下: + +在 A IoTDB 上执行下列语句: + +```SQL +create pipe AB +with source ( + 'source.mode.double-living' ='true' --不转发由其他 Pipe 写入的数据 +) +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6668', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +在 B IoTDB 上执行下列语句: + +```SQL +create pipe BA +with source ( + 'source.mode.double-living' ='true' --不转发由其他 Pipe 写入的数据 +) +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6667', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` +### 3.4 边云数据传输 + +本例子用来演示多个 IoTDB 之间边云传输数据的场景,数据由 B 、C、D 集群分别都同步至 A 集群,数据链路如下图所示: + +![](https://alioss.timecho.com/docs/img/dataSync03.png) + +在这个例子中,为了将 B 、C、D 集群的数据同步至 A,在 BA 、CA、DA 之间的 pipe 需要配置database-name 和 table-name 限制范围,详细语句如下: + +在 B IoTDB 上执行下列语句,将 B 中数据同步至 A: + +```SQL +create pipe BA +with source ( + 'database-name'='db_b.*', -- 限制范围 + 'table-name'='.*', -- 可选择匹配所有 +) +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6667', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +在 C IoTDB 上执行下列语句,将 C 中数据同步至 A: + +```SQL +create pipe CA +with source ( + 'database-name'='db_c.*', -- 限制范围 + 'table-name'='.*', -- 可选择匹配所有 +) +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6668', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +在 D IoTDB 上执行下列语句,将 D 中数据同步至 A: + +```SQL +create pipe DA +with source ( + 'database-name'='db_d.*', -- 限制范围 + 'table-name'='.*', -- 可选择匹配所有 +) +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6669', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +### 3.5 级联数据传输 + +本例子用来演示多个 IoTDB 之间级联传输数据的场景,数据由 A 集群同步至 B 集群,再同步至 C 集群,数据链路如下图所示: + +![](https://alioss.timecho.com/docs/img/1706698610134.jpg) + +在这个例子中,为了将 A 集群的数据同步至 C,在 BC 之间的 pipe 需要将 `source.mode.double-living` 配置为`true`,详细语句如下: + +在 A IoTDB 上执行下列语句,将 A 中数据同步至 B: + +```SQL +create pipe AB +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6668', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +在 B IoTDB 上执行下列语句,将 B 中数据同步至 C: + +```SQL +create pipe BC +with source ( + 'source.mode.double-living' ='true' --不转发由其他 Pipe 写入的数据 +) +with sink ( + 'sink'='iotdb-thrift-sink', + 'node-urls' = '127.0.0.1:6669', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url +) +``` + +### 3.6 压缩同步 + +IoTDB 支持在同步过程中指定数据压缩方式。可通过配置 `compressor` 参数,实现数据的实时压缩和传输。`compressor`目前支持 snappy / gzip / lz4 / zstd / lzma2 5 种可选算法,且可以选择多种压缩算法组合,按配置的顺序进行压缩。`rate-limit-bytes-per-second`(V1.3.3 及以后版本支持)每秒最大允许传输的byte数,计算压缩后的byte,若小于0则不限制。 + +如创建一个名为 A2B 的同步任务: + +```SQL +create pipe A2B +with sink ( + 'node-urls' = '127.0.0.1:6668', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url + 'compressor' = 'snappy,lz4' -- + 'rate-limit-bytes-per-second'='1048576' -- 每秒最大允许传输的byte数 +) +``` + + +### 3.7 加密同步 + +IoTDB 支持在同步过程中使用 SSL 加密,从而在不同的 IoTDB 实例之间安全地传输数据。通过配置 SSL 相关的参数,如证书地址和密码(`ssl.trust-store-path`)、(`ssl.trust-store-pwd`)可以确保数据在同步过程中被 SSL 加密所保护。 + +如创建名为 A2B 的同步任务: + +```SQL +create pipe A2B +with sink ( + 'sink'='iotdb-thrift-ssl-sink', + 'node-urls'='127.0.0.1:6667', -- 目标端 IoTDB 中 DataNode 节点的数据服务端口的 url + 'ssl.trust-store-path'='pki/trusted', -- 连接目标端 DataNode 所需的 trust store 证书路径 + 'ssl.trust-store-pwd'='root' -- 连接目标端 DataNode 所需的 trust store 证书密码 +) +``` + +## 参考:注意事项 + +可通过修改 IoTDB 配置文件(`iotdb-system.properties`)以调整数据同步的参数,如同步数据存储目录等。完整配置如下:: + +```Properties +# pipe_receiver_file_dir +# If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/${cn_system_dir}/pipe/receiver). +# If it is absolute, system will save the data in the exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. +# Note: If pipe_receiver_file_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. +# effectiveMode: restart +# For windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. +# pipe_receiver_file_dir=data\\confignode\\system\\pipe\\receiver +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +pipe_receiver_file_dir=data/confignode/system/pipe/receiver + +#################### +### Pipe Configuration +#################### + +# Uncomment the following field to configure the pipe lib directory. +# effectiveMode: first_start +# For Windows platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is +# absolute. Otherwise, it is relative. +# pipe_lib_dir=ext\\pipe +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +pipe_lib_dir=ext/pipe + +# The maximum number of threads that can be used to execute the pipe subtasks in PipeSubtaskExecutor. +# The actual value will be min(pipe_subtask_executor_max_thread_num, max(1, CPU core number / 2)). +# effectiveMode: restart +# Datatype: int +pipe_subtask_executor_max_thread_num=5 + +# The connection timeout (in milliseconds) for the thrift client. +# effectiveMode: restart +# Datatype: int +pipe_sink_timeout_ms=900000 + +# The maximum number of selectors that can be used in the sink. +# Recommend to set this value to less than or equal to pipe_sink_max_client_number. +# effectiveMode: restart +# Datatype: int +pipe_sink_selector_number=4 + +# The maximum number of clients that can be used in the sink. +# effectiveMode: restart +# Datatype: int +pipe_sink_max_client_number=16 + +# The total bytes that all pipe sinks can transfer per second. +# When given a value less than or equal to 0, it means no limit. +# default value is -1, which means no limit. +# effectiveMode: hot_reload +# Datatype: double +pipe_all_sinks_rate_limit_bytes_per_second=-1 +``` + +## 参考:参数说明 + +### source 参数 + +| **参数** | **描述** | **value 取值范围** | **是否必填** | **默认取值** | +| ------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------ | ------------------------------- | +| source | iotdb-source | String: iotdb-source | 必填 | - | +| mode.streaming | 此参数指定时序数据写入的捕获来源。适用于 `mode.streaming`为 `false` 模式下的场景,决定`inclusion`中`data.insert`数据的捕获来源。提供两种捕获策略:true: 动态选择捕获的类型。系统将根据下游处理速度,自适应地选择是捕获每个写入请求还是仅捕获 TsFile 文件的封口请求。当下游处理速度快时,优先捕获写入请求以减少延迟;当处理速度慢时,仅捕获文件封口请求以避免处理堆积。这种模式适用于大多数场景,能够实现处理延迟和吞吐量的最优平衡。false:固定按批捕获方式。仅捕获 TsFile 文件的封口请求,适用于资源受限的应用场景,以降低系统负载。注意,pipe 启动时捕获的快照数据只会以文件的方式供下游处理。 | Boolean: true / false | 否 | true | +| mode.strict | 在使用 time / path / database-name / table-name 参数过滤数据时,是否需要严格按照条件筛选:`true`: 严格筛选。系统将完全按照给定条件过滤筛选被捕获的数据,确保只有符合条件的数据被选中。`false`:非严格筛选。系统在筛选被捕获的数据时可能会包含一些额外的数据,适用于性能敏感的场景,可降低 CPU 和 IO 消耗。 | Boolean: true / false | 否 | true | +| mode.snapshot | 此参数决定时序数据的捕获方式,影响`inclusion`中的`data`数据。提供两种模式:`true`:静态数据捕获。启动 pipe 时,会进行一次性的数据快照捕获。当快照数据被完全消费后,**pipe 将自动终止(DROP PIPE SQL 会自动执行)**。`false`:动态数据捕获。除了在 pipe 启动时捕获快照数据外,还会持续捕获后续的数据变更。pipe 将持续运行以处理动态数据流。 | Boolean: true / false | 否 | false | +| database-name | 当用户连接指定的 sql_dialect 为 table 时可以指定。此参数决定时序数据的捕获范围,影响`inclusion`中的`data`数据。表示要过滤的数据库的名称。它可以是具体的数据库名,也可以是 Java 风格正则表达式来匹配多个数据库。默认情况下,匹配所有的库。 | String:数据库名或数据库正则模式串,可以匹配未创建的、不存在的库 | 否 | ".*" | +| table-name | 当用户连接指定的 sql_dialect 为 table 时可以指定。此参数决定时序数据的捕获范围,影响`inclusion`中的`data`数据。表示要过滤的表的名称。它可以是具体的表名,也可以是 Java 风格正则表达式来匹配多个表。默认情况下,匹配所有的表。 | String:数据表名或数据表正则模式串,可以是未创建的、不存在的表 | 否 | ".*" | +| start-time | 此参数决定时序数据的捕获范围,影响`inclusion`中的`data`数据。当数据的 event time 大于等于该参数时,数据会被筛选出来进入流处理 pipe。 | Long: [Long.MIN_VALUE, Long.MAX_VALUE] (unix 裸时间戳)或 String:IoTDB 支持的 ISO 格式时间戳 | 否 | Long.MIN_VALUE(unix 裸时间戳) | +| end-time | 此参数决定时序数据的捕获范围,影响`inclusion`中的`data`数据。当数据的 event time 小于等于该参数时,数据会被筛选出来进入流处理 pipe。 | Long: [Long.MIN_VALUE, Long.MAX_VALUE](unix 裸时间戳)或String:IoTDB 支持的 ISO 格式时间戳 | 否 | Long.MAX_VALUE(unix 裸时间戳) | +| forwarding-pipe-requests | 是否转发由 pipe 数据同步而来的集群外的数据。一般供搭建双活集群时使用,双活集群模式下该参数为 false,以此避免无限的环形同步。 | Boolean: true / false | 否 | true | + +> 💎 **说明:数据抽取模式 mode.streaming 取值 true 和 false 的差异** +> - **true(推荐)**:该取值下,任务将对数据进行实时处理、发送,其特点是高时效、低吞吐 +> - **false**:该取值下,任务将对数据进行批量(按底层数据文件)处理、发送,其特点是低时效、高吞吐 + +### sink 参数 + +#### iotdb-thrift-sink + +| **参数** | **描述** | **value 取值范围** | **是否必填** | **默认取值** | +| --------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -------- | ------------ | +| sink | iotdb-thrift-sink 或 iotdb-thrift-async-sink | String: iotdb-thrift-sink 或 iotdb-thrift-async-sink | 必填 | - | +| node-urls | 目标端 IoTDB 任意多个 DataNode 节点的数据服务端口的 url(请注意同步任务不支持向自身服务进行转发) | String. 例:'127.0.0.1:6667,127.0.0.1:6668,127.0.0.1:6669', '127.0.0.1:6667' | 必填 | - | +| user/usename | 连接接收端使用的用户名,同步要求该用户具备相应的操作权限 | String | 选填 | root | +| password | 连接接收端使用的用户名对应的密码,同步要求该用户具备相应的操作权限 | String | 选填 | root | +| batch.enable | 是否开启日志攒批发送模式,用于提高传输吞吐,降低 IOPS | Boolean: true, false | 选填 | true | +| batch.max-delay-seconds | 在开启日志攒批发送模式时生效,表示一批数据在发送前的最长等待时间(单位:s) | Integer | 选填 | 1 | +| batch.size-bytes | 在开启日志攒批发送模式时生效,表示一批数据最大的攒批大小(单位:byte) | Long | 选填 | 16*1024*1024 | +| compressor | 所选取的 rpc 压缩算法,可配置多个,对每个请求顺序采用 | String: snappy / gzip / lz4 / zstd / lzma2 | 选填 | "" | +| compressor.zstd.level | 所选取的 rpc 压缩算法为 zstd 时,可使用该参数额外配置 zstd 算法的压缩等级 | Int: [-131072, 22] | 选填 | 3 | +| rate-limit-bytes-per-second | 每秒最大允许传输的 byte 数,计算压缩后的 byte(如压缩),若小于 0 则不限制 | Double: [Double.MIN_VALUE, Double.MAX_VALUE] | 选填 | -1 | + + +#### iotdb-thrift-ssl-sink + +| **参数** | **描述** | **value 取值范围** | **是否必填** | **默认取值** | +| --------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -------- | ------------ | +| sink | iotdb-thrift-ssl-sink | String: iotdb-thrift-ssl-sink | 必填 | - | +| node-urls | 目标端 IoTDB 任意多个 DataNode 节点的数据服务端口的 url(请注意同步任务不支持向自身服务进行转发) | String. 例:'127.0.0.1:6667,127.0.0.1:6668,127.0.0.1:6669', '127.0.0.1:6667' | 必填 | - | +| user/usename | 连接接收端使用的用户名,同步要求该用户具备相应的操作权限 | String | 选填 | root | +| password | 连接接收端使用的用户名对应的密码,同步要求该用户具备相应的操作权限 | String | 选填 | root | +| batch.enable | 是否开启日志攒批发送模式,用于提高传输吞吐,降低 IOPS | Boolean: true, false | 选填 | true | +| batch.max-delay-seconds | 在开启日志攒批发送模式时生效,表示一批数据在发送前的最长等待时间(单位:s) | Integer | 选填 | 1 | +| batch.size-bytes | 在开启日志攒批发送模式时生效,表示一批数据最大的攒批大小(单位:byte) | Long | 选填 | 16*1024*1024 | +| compressor | 所选取的 rpc 压缩算法,可配置多个,对每个请求顺序采用 | String: snappy / gzip / lz4 / zstd / lzma2 | 选填 | "" | +| compressor.zstd.level | 所选取的 rpc 压缩算法为 zstd 时,可使用该参数额外配置 zstd 算法的压缩等级 | Int: [-131072, 22] | 选填 | 3 | +| rate-limit-bytes-per-second | 每秒最大允许传输的 byte 数,计算压缩后的 byte(如压缩),若小于 0 则不限制 | Double: [Double.MIN_VALUE, Double.MAX_VALUE] | 选填 | -1 | +| ssl.trust-store-path | 连接目标端 DataNode 所需的 trust store 证书路径 | String.Example: '127.0.0.1:6667,127.0.0.1:6668,127.0.0.1:6669', '127.0.0.1:6667' | 必填 | - | +| ssl.trust-store-pwd | 连接目标端 DataNode 所需的 trust store 证书密码 | Integer | 必填 | - | \ No newline at end of file diff --git a/src/zh/UserGuide/V2.0.1/Table/User-Manual/Data-Sync_timecho.md b/src/zh/UserGuide/V2.0.1-Table/User-Manual/Data-Sync_timecho.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Table/User-Manual/Data-Sync_timecho.md rename to src/zh/UserGuide/V2.0.1-Table/User-Manual/Data-Sync_timecho.md diff --git a/src/zh/UserGuide/latest/API/Programming-Python-Native-API.md b/src/zh/UserGuide/latest/API/Programming-Python-Native-API.md index a894bc1ab..129b94e5b 100644 --- a/src/zh/UserGuide/latest/API/Programming-Python-Native-API.md +++ b/src/zh/UserGuide/latest/API/Programming-Python-Native-API.md @@ -29,9 +29,9 @@ 首先下载包:`pip3 install apache-iotdb` -您可以从这里得到一个使用该包进行数据读写的例子:[Session Example](https://github.com/apache/iotdb/blob/rc/1.3.3/iotdb-client/client-py/SessionExample.py) +您可以从这里得到一个使用该包进行数据读写的例子:[Session Example](https://github.com/apache/iotdb/blob/rc/2.0.1/iotdb-client/client-py/session_example.py) -关于对齐时间序列读写的例子:[Aligned Timeseries Session Example](https://github.com/apache/iotdb/blob/rc/1.3.3/iotdb-client/client-py/SessionAlignedTimeseriesExample.py) +关于对齐时间序列读写的例子:[Aligned Timeseries Session Example](https://github.com/apache/iotdb/blob/rc/2.0.1/iotdb-client/client-py/session_aligned_timeseries_example.py) (您需要在文件的头部添加`import iotdb`) diff --git a/src/zh/UserGuide/latest/API/RestServiceV1.md b/src/zh/UserGuide/latest/API/RestServiceV1.md index 98ef9eec4..c1d12587b 100644 --- a/src/zh/UserGuide/latest/API/RestServiceV1.md +++ b/src/zh/UserGuide/latest/API/RestServiceV1.md @@ -23,8 +23,8 @@ IoTDB 的 RESTful 服务可用于查询、写入和管理操作,它使用 OpenAPI 标准来定义接口并生成框架。 ## 开启RESTful 服务 -RESTful 服务默认情况是关闭的 - +RESTful 服务默认情况是关闭的 + 找到IoTDB安装目录下面的`conf/iotdb-system.properties`文件,将 `enable_rest_service` 设置为 `true` 以启用该模块。 ```properties diff --git a/src/zh/UserGuide/latest/API/RestServiceV2.md b/src/zh/UserGuide/latest/API/RestServiceV2.md index 51fae7854..b572379d3 100644 --- a/src/zh/UserGuide/latest/API/RestServiceV2.md +++ b/src/zh/UserGuide/latest/API/RestServiceV2.md @@ -24,7 +24,7 @@ IoTDB 的 RESTful 服务可用于查询、写入和管理操作,它使用 Open ## 开启RESTful 服务 RESTful 服务默认情况是关闭的 - + 找到IoTDB安装目录下面的`conf/iotdb-system.properties`文件,将 `enable_rest_service` 设置为 `true` 以启用该模块。 ```properties diff --git a/src/zh/UserGuide/latest/Basic-Concept/Data-Model-and-Terminology.md b/src/zh/UserGuide/latest/Basic-Concept/Data-Model-and-Terminology.md index 79c1c21b8..25e7083f8 100644 --- a/src/zh/UserGuide/latest/Basic-Concept/Data-Model-and-Terminology.md +++ b/src/zh/UserGuide/latest/Basic-Concept/Data-Model-and-Terminology.md @@ -130,7 +130,7 @@ wildcard ### 特殊字符(反引号) -如果需要在路径结点名中用特殊字符,可以用反引号引用路径结点名,具体使用方法可以参考[反引号](../Reference/Syntax-Rule.md#反引号)。 +如果需要在路径结点名中用特殊字符,可以用反引号引用路径结点名,具体使用方法可以参考[反引号](../SQL-Manual/Syntax-Rule.md#反引号)。 ## 路径模式(Path Pattern) diff --git a/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md b/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md index 3c1ed63c8..5ba32f82a 100644 --- a/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md +++ b/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md @@ -19,7 +19,8 @@ --> -# 测点管理 +# 测点管理 + ## 数据库管理 数据库(Database)可以被视为关系数据库中的Database。 diff --git a/src/zh/UserGuide/latest/Ecosystem-Integration/Telegraf.md b/src/zh/UserGuide/latest/Ecosystem-Integration/Telegraf.md index 080b70483..47d7bd078 100644 --- a/src/zh/UserGuide/latest/Ecosystem-Integration/Telegraf.md +++ b/src/zh/UserGuide/latest/Ecosystem-Integration/Telegraf.md @@ -62,7 +62,7 @@ Telegraf支持多种操作系统,包括Linux、Windows、macOS,Telegraf 的 | interval | 收集此指标的频率。普通插件使用单个全局间隔,但如果某个特定输入的运行频率应更低或更高,您可以在此处进行配置。`interval`可以增加间隔以减少数据输入速率限制。 | | | precision | 覆盖`precision`代理的设置。收集的指标四舍五入到指定的精度`interval`。当在服务输入上设置此值(例如`statsd`:)时,输出数据库可能会合并在同一时间戳发生的多个事件。 | | | collection_jitter | 覆盖`collection_jitter`代理的设置。Collection jitter 用于通过随机的`interval` | | -| name_override | 输出到 IoTDB 时使用的自定义时间序列路径名 | 输出的路径名称需满足“[语法要求](../Reference/Syntax-Rule.md)”要求 | +| name_override | 输出到 IoTDB 时使用的自定义时间序列路径名 | 输出的路径名称需满足“[语法要求](../SQL-Manual/Syntax-Rule.md)”要求 | | name_prefix | 指定附加到测量名称的前缀 | | | name_suffix | 指定附加到测量名称的后缀 | | diff --git a/src/zh/UserGuide/latest/Reference/UDF-Libraries_apache.md b/src/zh/UserGuide/latest/Reference/UDF-Libraries_apache.md index b35e35e1a..7112666cf 100644 --- a/src/zh/UserGuide/latest/Reference/UDF-Libraries_apache.md +++ b/src/zh/UserGuide/latest/Reference/UDF-Libraries_apache.md @@ -27,20 +27,20 @@ ## 安装步骤 1. 请获取与 IoTDB 版本兼容的 UDF 函数库 JAR 包的压缩包。 - | UDF 安装包 | 支持的 IoTDB 版本 | 下载链接 | + | UDF 函数库版本 | 支持的 IoTDB 版本 | 下载链接 | | --------------- | ----------------- | ------------------------------------------------------------ | - | apache-UDF-1.3.3.zip | V1.3.3及以上 | 请联系天谋商务获取 | - | apache-UDF-1.3.2.zip | V1.0.0~V1.3.2 | 请联系天谋商务获取| + | UDF-1.3.3.zip | V1.3.3及以上 | 请联系天谋商务获取 | + | UDF-1.3.2.zip | V1.0.0~V1.3.2 | 请联系天谋商务获取 | 2. 将获取的压缩包中的 library-udf.jar 文件放置在 IoTDB 集群所有节点的 `/ext/udf` 的目录下 -3. 在 IoTDB 的 SQL 命令行终端(CLI)的 SQL 操作界面中,执行下述相应的函数注册语句。 +3. 在 IoTDB 的 SQL 命令行终端(CLI)或可视化控制台(Workbench)的 SQL 操作界面中,执行下述相应的函数注册语句。 4. 批量注册:两种注册方式:注册脚本 或 SQL汇总语句 - 注册脚本 - 将压缩包中的注册脚本(register-UDF.sh 或 register-UDF.bat)按需复制到 IoTDB 的 tools 目录下,修改脚本中的参数(默认为host=127.0.0.1,rpcPort=6667,user=root,pass=root); - 启动 IoTDB 服务,运行注册脚本批量注册 UDF - SQL汇总语句 - - 打开压缩包中的SQl文件,复制全部 SQL 语句,在 IoTDB 的 SQL 命令行终端(CLI)的 SQL 操作界面中,执行全部 SQl 语句批量注册 UDF + - 打开压缩包中的SQl文件,复制全部 SQL 语句,在 IoTDB 的 SQL 命令行终端(CLI)或可视化控制台(Workbench)的 SQL 操作界面中,执行全部 SQl 语句批量注册 UDF ## 数据质量 diff --git a/src/zh/UserGuide/latest/Reference/Keywords.md b/src/zh/UserGuide/latest/SQL-Manual/Keywords.md similarity index 100% rename from src/zh/UserGuide/latest/Reference/Keywords.md rename to src/zh/UserGuide/latest/SQL-Manual/Keywords.md diff --git a/src/zh/UserGuide/latest/SQL-Manual/Operator-and-Expression.md b/src/zh/UserGuide/latest/SQL-Manual/Operator-and-Expression.md index df99144bf..1d02a15bb 100644 --- a/src/zh/UserGuide/latest/SQL-Manual/Operator-and-Expression.md +++ b/src/zh/UserGuide/latest/SQL-Manual/Operator-and-Expression.md @@ -234,7 +234,7 @@ OR, |, || | EQUAL_SIZE_BUCKET_OUTLIER_SAMPLE | INT32 / INT64 / FLOAT / DOUBLE | `proportion`取值范围为`(0, 1]`,默认为`0.1`
`type`取值为`avg`或`stendis`或`cos`或`prenextdis`,默认为`avg`
`number`取值应大于0,默认`3`| INT32 / INT64 / FLOAT / DOUBLE | 返回符合采样比例和桶内采样个数的等分桶离群值采样 | | M4 | INT32 / INT64 / FLOAT / DOUBLE | 包含固定点数的窗口和滑动时间窗口使用不同的属性参数。包含固定点数的窗口使用属性`windowSize`和`slidingStep`。滑动时间窗口使用属性`timeInterval`、`slidingStep`、`displayWindowBegin`和`displayWindowEnd`。更多细节见下文。 | INT32 / INT64 / FLOAT / DOUBLE | 返回每个窗口内的第一个点(`first`)、最后一个点(`last`)、最小值点(`bottom`)、最大值点(`top`)。在一个窗口内的聚合点输出之前,M4会将它们按照时间戳递增排序并且去重。 | -详细说明及示例见文档 [采样函数](./Function-and-Expression.md#采样函数)。 +详细说明及示例见文档 [采样函数](../SQL-Manual/Function-and-Expression.md#采样函数)。 ### 时间序列处理函数 | 函数名 | 输入序列类型 | 参数 | 输出序列类型 | 功能描述 | diff --git a/src/zh/UserGuide/V2.0.1/Tree/Reference/Syntax-Rule.md b/src/zh/UserGuide/latest/SQL-Manual/Syntax-Rule.md similarity index 100% rename from src/zh/UserGuide/V2.0.1/Tree/Reference/Syntax-Rule.md rename to src/zh/UserGuide/latest/SQL-Manual/Syntax-Rule.md diff --git a/src/zh/UserGuide/latest/User-Manual/IoTDB-View_timecho.md b/src/zh/UserGuide/latest/User-Manual/IoTDB-View_timecho.md index 2181ae4d4..482317b37 100644 --- a/src/zh/UserGuide/latest/User-Manual/IoTDB-View_timecho.md +++ b/src/zh/UserGuide/latest/User-Manual/IoTDB-View_timecho.md @@ -361,6 +361,7 @@ WHERE temperature01 < temperature02 此外,对于别名序列,如果用户想要得到时间序列的tag、attributes等信息,则需要先查询视图列的映射,找到对应的时间序列,再向时间序列查询tag、attributes等信息。查询视图列的映射的方法将会在3.5部分说明。 + ### 视图修改 视图支持的修改操作包括:修改计算逻辑,修改标签/属性,以及删除。 diff --git a/src/zh/UserGuide/latest/User-Manual/Load-Balance.md b/src/zh/UserGuide/latest/User-Manual/Load-Balance.md index a9093e0be..99730de97 100644 --- a/src/zh/UserGuide/latest/User-Manual/Load-Balance.md +++ b/src/zh/UserGuide/latest/User-Manual/Load-Balance.md @@ -102,10 +102,9 @@ IoTDB 是一个分布式数据库,数据的均衡分布对集群的磁盘空 - **阻塞写入**: IoTConsensus 的 region 迁移不直接阻塞写入,但由于过程中需要阻塞 WAL 文件的清理,如果 WAL 文件堆积达到阈值`wal_throttle_threshold_in_byte`,那么当前 DataNode 会暂停写入,直到 WAL 文件恢复到阈值以下。 - + 如果迁移过程中由于 WAL 达到阈值造成写入报错(例如报错信息为 The write is rejected because the wal directory size has reached the threshold),可以将`wal_throttle_threshold_in_byte`调大到 500GB 或更大以允许继续写入。使用 SQL 语句: ```plain IoTDB> set configuration "wal_throttle_threshold_in_byte"="536870912000" Msg: The statement is executed successfully. - ``` - + ``` \ No newline at end of file