diff --git a/src/.vuepress/sidebar/V1.3.3/en.ts b/src/.vuepress/sidebar/V1.3.3/en.ts
index 874d18640..08a7151e0 100644
--- a/src/.vuepress/sidebar/V1.3.3/en.ts
+++ b/src/.vuepress/sidebar/V1.3.3/en.ts
@@ -147,7 +147,12 @@ export const enSidebar = {
prefix: 'API/',
// children: 'structure',
children: [
- { text: 'Java Native API', link: 'Programming-Java-Native-API' },
+ { text: 'Java Native Interface', collapsible: true,
+ children: [
+ { text: 'Java Native API', link: 'Programming-Java-Native-API' },
+ { text: 'Data Sync API', link: 'Programming-Data-Subscription' },
+ ],
+ },
{ text: 'Python Native API', link: 'Programming-Python-Native-API' },
{ text: 'C++ Native API', link: 'Programming-Cpp-Native-API' },
{ text: 'Go Native API', link: 'Programming-Go-Native-API' },
diff --git a/src/.vuepress/sidebar/V1.3.3/zh.ts b/src/.vuepress/sidebar/V1.3.3/zh.ts
index 97ee05677..16d5e6166 100644
--- a/src/.vuepress/sidebar/V1.3.3/zh.ts
+++ b/src/.vuepress/sidebar/V1.3.3/zh.ts
@@ -134,7 +134,12 @@ export const zhSidebar = {
prefix: 'API/',
// children: 'structure',
children: [
- { text: 'Java原生接口', link: 'Programming-Java-Native-API' },
+ { text: 'Java原生接口', collapsible: true,
+ children: [
+ { text: 'Java原生API', link: 'Programming-Java-Native-API' },
+ { text: '数据订阅API', link: 'Programming-Data-Subscription' },
+ ],
+ },
{ text: 'Python原生接口', link: 'Programming-Python-Native-API' },
{ text: 'C++原生接口', link: 'Programming-Cpp-Native-API' },
{ text: 'Go原生接口', link: 'Programming-Go-Native-API' },
diff --git a/src/.vuepress/sidebar_timecho/V1.3.3/en.ts b/src/.vuepress/sidebar_timecho/V1.3.3/en.ts
index a539be0dd..199c14a0b 100644
--- a/src/.vuepress/sidebar_timecho/V1.3.3/en.ts
+++ b/src/.vuepress/sidebar_timecho/V1.3.3/en.ts
@@ -163,7 +163,12 @@ export const enSidebar = {
prefix: 'API/',
// children: 'structure',
children: [
- { text: 'Java Native API', link: 'Programming-Java-Native-API' },
+ { text: 'Java Native Interface', collapsible: true,
+ children: [
+ { text: 'Java Native API', link: 'Programming-Java-Native-API' },
+ { text: 'Data Sync API', link: 'Programming-Data-Subscription' },
+ ],
+ },
{ text: 'Python Native API', link: 'Programming-Python-Native-API' },
{ text: 'C++ Native API', link: 'Programming-Cpp-Native-API' },
{ text: 'Go Native API', link: 'Programming-Go-Native-API' },
diff --git a/src/.vuepress/sidebar_timecho/V1.3.3/zh.ts b/src/.vuepress/sidebar_timecho/V1.3.3/zh.ts
index acd81e02f..54dcb21e4 100644
--- a/src/.vuepress/sidebar_timecho/V1.3.3/zh.ts
+++ b/src/.vuepress/sidebar_timecho/V1.3.3/zh.ts
@@ -146,7 +146,12 @@ export const zhSidebar = {
prefix: 'API/',
// children: 'structure',
children: [
- { text: 'Java原生接口', link: 'Programming-Java-Native-API' },
+ { text: 'Java原生接口', collapsible: true,
+ children: [
+ { text: 'Java原生API', link: 'Programming-Java-Native-API' },
+ { text: '数据订阅API', link: 'Programming-Data-Subscription' },
+ ],
+ },
{ text: 'Python原生接口', link: 'Programming-Python-Native-API' },
{ text: 'C++原生接口', link: 'Programming-Cpp-Native-API' },
{ text: 'Go原生接口', link: 'Programming-Go-Native-API' },
diff --git a/src/.vuepress/sidebar_timecho/V2.0.1/zh-Tree.ts b/src/.vuepress/sidebar_timecho/V2.0.1/zh-Tree.ts
index 79e849f58..dc0900727 100644
--- a/src/.vuepress/sidebar_timecho/V2.0.1/zh-Tree.ts
+++ b/src/.vuepress/sidebar_timecho/V2.0.1/zh-Tree.ts
@@ -146,7 +146,12 @@ export const zhSidebar = {
prefix: 'API/',
// children: 'structure',
children: [
- { text: 'Java原生接口', link: 'Programming-Java-Native-API' },
+ { text: 'Java原生接口', collapsible: true,
+ children: [
+ { text: 'Java原生API', link: 'Programming-Java-Native-API' },
+ { text: '数据订阅API', link: 'Programming-Data-Subscription' },
+ ],
+ },
{ text: 'Python原生接口', link: 'Programming-Python-Native-API' },
{ text: 'C++原生接口', link: 'Programming-Cpp-Native-API' },
{ text: 'Go原生接口', link: 'Programming-Go-Native-API' },
diff --git a/src/UserGuide/Master/Tree/API/Programming-Data-Subscription.md b/src/UserGuide/Master/Tree/API/Programming-Data-Subscription.md
new file mode 100644
index 000000000..89dfbb33c
--- /dev/null
+++ b/src/UserGuide/Master/Tree/API/Programming-Data-Subscription.md
@@ -0,0 +1,244 @@
+
+
+# Data Sync API
+IoTDB provides a powerful data subscription feature, allowing users to obtain newly added data from IoTDB in real-time through the subscription SDK. For detailed functional definitions and introductions:[Data Sync](../../User-Manual/Data-Sync_timecho.md#Data Sync)
+
+## 1 Core Steps
+
+1. Create Topic: Create a Topic that includes the measurement points you wish to subscribe to.
+2. Subscribe to Topic: Before a consumer subscribes to a topic, the topic must have been created, otherwise the subscription will fail. Consumers under the same consumer group will evenly distribute the data.
+3. Consume Data: Only by explicitly subscribing to a specific topic will you receive data from that topic.
+4. Unsubscribe: When a consumer is closed, it will exit the corresponding consumer group and cancel all existing subscriptions.
+
+
+## 2 Detailed Steps
+This section is used to illustrate the core development process and does not demonstrate all parameters and interfaces. For a comprehensive understanding of all features and parameters, please refer to: [Java Native API](./Programming-Java-Native-API.md#Java Native API)
+
+
+### 2.1 Create a Maven project
+Create a Maven project and import the following dependencies(JDK >= 1.8, Maven >= 3.6)
+
+```xml
+
+
+ org.apache.iotdb
+ iotdb-session
+
+ ${project.version}
+
+
+```
+
+### 2.2 Code Example
+#### 2.2.1 Topic operations
+```java
+import java.util.Optional;
+import java.util.Properties;
+import java.util.Set;
+import org.apache.iotdb.rpc.IoTDBConnectionException;
+import org.apache.iotdb.rpc.StatementExecutionException;
+import org.apache.iotdb.rpc.subscription.config.TopicConstant;
+import org.apache.iotdb.session.subscription.SubscriptionSession;
+import org.apache.iotdb.session.subscription.model.Topic;
+
+public class DataConsumerExample {
+
+ public static void main(String[] args) throws IoTDBConnectionException, StatementExecutionException {
+ try (SubscriptionSession session = new SubscriptionSession("127.0.0.1", 6667)) {
+ // 1. open session
+ session.open();
+
+ // 2. create a topic of all data
+ Properties sessionConfig = new Properties();
+ sessionConfig.put(TopicConstant.PATH_KEY, "root.**");
+
+ session.createTopic("allData", sessionConfig);
+
+ // 3. show all topics
+ Set topics = session.getTopics();
+ System.out.println(topics);
+
+ // 4. show a specific topic
+ Optional allData = session.getTopic("allData");
+ System.out.println(allData.get());
+ }
+ }
+}
+```
+#### 2.2.2 Data Consume
+
+##### Scenario-1: Subscribing to newly added real-time data in IoTDB (for scenarios such as dashboard or configuration display)
+
+```java
+import java.io.IOException;
+import java.util.List;
+import java.util.Properties;
+import org.apache.iotdb.rpc.subscription.config.ConsumerConstant;
+import org.apache.iotdb.rpc.subscription.config.TopicConstant;
+import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer;
+import org.apache.iotdb.session.subscription.payload.SubscriptionMessage;
+import org.apache.iotdb.session.subscription.payload.SubscriptionMessageType;
+import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet;
+import org.apache.tsfile.read.common.RowRecord;
+
+public class DataConsumerExample {
+
+ public static void main(String[] args) throws IOException {
+
+ // 5. create a pull consumer, the subscription is automatically cancelled when the logic in the try resources is completed
+ Properties consumerConfig = new Properties();
+ consumerConfig.put(ConsumerConstant.CONSUMER_ID_KEY, "c1");
+ consumerConfig.put(ConsumerConstant.CONSUMER_GROUP_ID_KEY, "cg1");
+ consumerConfig.put(ConsumerConstant.CONSUME_LISTENER_KEY, TopicConstant.FORMAT_SESSION_DATA_SETS_HANDLER_VALUE);
+ try (SubscriptionPullConsumer pullConsumer = new SubscriptionPullConsumer(consumerConfig)) {
+ pullConsumer.open();
+ pullConsumer.subscribe("topic_all");
+ while (true) {
+ List messages = pullConsumer.poll(10000);
+ for (final SubscriptionMessage message : messages) {
+ final short messageType = message.getMessageType();
+ if (SubscriptionMessageType.isValidatedMessageType(messageType)) {
+ for (final SubscriptionSessionDataSet dataSet : message.getSessionDataSetsHandler()) {
+ while (dataSet.hasNext()) {
+ final RowRecord record = dataSet.next();
+ System.out.println(record);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
+```
+##### Scenario-2: Subscribing to newly added TsFiles (for scenarios such as regular data backup)
+
+Prerequisite: The format of the topic to be consumed must be of the TsfileHandler type. For example:`create topic topic_all_tsfile with ('path'='root.**','format'='TsFileHandler')`
+
+```java
+import java.io.IOException;
+import java.util.List;
+import java.util.Properties;
+import org.apache.iotdb.rpc.subscription.config.ConsumerConstant;
+import org.apache.iotdb.rpc.subscription.config.TopicConstant;
+import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer;
+import org.apache.iotdb.session.subscription.payload.SubscriptionMessage;
+
+
+public class DataConsumerExample {
+
+ public static void main(String[] args) throws IOException {
+ // 1. create a pull consumer, the subscription is automatically cancelled when the logic in the try resources is completed
+ Properties consumerConfig = new Properties();
+ consumerConfig.put(ConsumerConstant.CONSUMER_ID_KEY, "c1");
+ consumerConfig.put(ConsumerConstant.CONSUMER_GROUP_ID_KEY, "cg1");
+ // 2. Specify the consumption type as the tsfile type
+ consumerConfig.put(TopicConstant.FORMAT_KEY, TopicConstant.FORMAT_TS_FILE_HANDLER_VALUE);
+ consumerConfig.put(ConsumerConstant.FILE_SAVE_DIR_KEY, "/Users/iotdb/Downloads");
+ try (SubscriptionPullConsumer pullConsumer = new SubscriptionPullConsumer(consumerConfig)) {
+ pullConsumer.open();
+ pullConsumer.subscribe("topic_all_tsfile");
+ while (true) {
+ List messages = pullConsumer.poll(10000);
+ for (final SubscriptionMessage message : messages) {
+ message.getTsFileHandler().copyFile("/Users/iotdb/Downloads/1.tsfile");
+ }
+ }
+ }
+ }
+}
+```
+
+
+
+
+## 3 Java Native API Description
+
+### 3.1 Parameter List
+The consumer-related parameters can be set through the Properties parameter object. The specific parameters are as follows:
+
+#### SubscriptionConsumer
+
+
+| **Parameter** | **required or optional with default** | **Parameter Meaning** |
+| :---------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- |
+| host | optional: 127.0.0.1 | `String`: The RPC host of a DataNode in IoTDB |
+| port | optional: 6667 | `Integer`: The RPC port of a DataNode in IoTDB |
+| node-urls | optional: 127.0.0.1:6667 | `List`: The RPC addresses of all DataNodes in IoTDB, which can be multiple; either host:port or node-urls can be filled. If both host:port and node-urls are filled, the **union** of host:port and node-urls will be taken to form a new node-urls for application |
+| username | optional: root | `String`: The username of the DataNode in IoTDB |
+| password | optional: root | `String`: The password of the DataNode in IoTDB |
+| groupId | optional | `String`: consumer group id,if not specified, it will be randomly assigned (a new consumer group),ensuring that the consumer group id of different consumer groups are all different |
+| consumerId | optional | `String`: consumer client id,if not specified, it will be randomly assigned,ensuring that each consumer client id in the same consumer group is different |
+| heartbeatIntervalMs | optional: 30000 (min: 1000) | `Long`: The interval at which the consumer sends periodic heartbeat requests to the IoTDB DataNode |
+| endpointsSyncIntervalMs | optional: 120000 (min: 5000) | `Long`: The interval at which the consumer detects the expansion or contraction of IoTDB cluster nodes and adjusts the subscription connection |
+| fileSaveDir | optional: Paths.get(System.getProperty("user.dir"), "iotdb-subscription").toString() | `String`: The temporary directory path where the consumer stores the subscribed TsFile files |
+| fileSaveFsync | optional: false | `Boolean`: Whether the consumer actively calls fsync during the subscription of TsFiles |
+
+Special configurations in `SubscriptionPushConsumer` :
+
+| **Parameter** | **required or optional with default** | **Parameter Meaning** |
+| :----------------- | :------------------------------------ | :----------------------------------------------------------------------------- |
+| ackStrategy | optional: `ACKStrategy.AFTER_CONSUME` | The acknowledgment mechanism for consumption progress includes the following options: `ACKStrategy.BEFORE_CONSUME`(the consumer submits the consumption progress immediately upon receiving the data, before `onReceive` )`ACKStrategy.AFTER_CONSUME`(the consumer submits the consumption progress after consuming the data, after `onReceive` ) |
+| consumeListener | optional | The callback function for consuming data, which needs to implement the `ConsumeListener` interface, defining the processing logic for consuming `SessionDataSetsHandler` and `TsFileHandler` formatted data |
+| autoPollIntervalMs | optional: 5000 (min: 500) | Long: The time interval at which the consumer automatically pulls data, in **ms** |
+| autoPollTimeoutMs | optional: 10000 (min: 1000) | Long: The timeout duration for the consumer to pull data each time, in **ms** |
+
+Special configurations in `SubscriptionPullConsumer` :
+
+| **Parameter** | **required or optional with default** | **Parameter Meaning** |
+| :-------------------------------------------- | :--------------------------------- | :----------------------------------------------------------- |
+| autoCommit | optional: true | Boolean: Whether to automatically commit the consumption progress. If this parameter is set to false, the `commit` method needs to be called manually to submit the consumption progress |
+| autoCommitInterval | optional: 5000 (min: 500) | Long: The time interval for automatically committing the consumption progress, in **ms** .This parameter only takes effect when the `autoCommit` parameter is set to true |
+
+
+### 3.2 Function List
+#### Data Sync
+##### SubscriptionPullConsumer
+
+| **Function name** | **Description** | **Parameter** |
+|-------------------------------------|--------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `open()` | Opens the consumer connection and starts message consumption. If `autoCommit` is enabled, it will start the automatic commit worker. | None |
+| `close()` | Closes the consumer connection. If `autoCommit` is enabled, it will commit all uncommitted messages before closing. | None |
+| `poll(final Duration timeout)` | Pulls messages with a specified timeout. | `timeout` : The timeout duration. |
+| `poll(final long timeoutMs)` | Pulls messages with a specified timeout in milliseconds. | `timeoutMs` : The timeout duration in milliseconds. |
+| `poll(final Set topicNames, final Duration timeout)` | Pulls messages from specified topics with a specified timeout. | `topicNames` : The set of topics to pull messages from. `timeout`: The timeout duration。 |
+| `poll(final Set topicNames, final long timeoutMs)` | Pulls messages from specified topics with a specified timeout in milliseconds. | `topicNames` : The set of topics to pull messages from.`timeoutMs`: The timeout duration in milliseconds. |
+| `commitSync(final SubscriptionMessage message)` | Synchronously commits a single message. | `message` : The message object to be committed. |
+| `commitSync(final Iterable messages)` | Synchronously commits multiple messages. | `messages` : The collection of message objects to be committed. |
+| `commitAsync(final SubscriptionMessage message)` | Asynchronously commits a single message. | `message` : The message object to be committed. |
+| `commitAsync(final Iterable messages)` | Asynchronously commits multiple messages. | `messages` : The collection of message objects to be committed. |
+| `commitAsync(final SubscriptionMessage message, final AsyncCommitCallback callback)` | Asynchronously commits a single message with a specified callback. | `message` : The message object to be committed. `callback` : The callback function to be executed after asynchronous commit. |
+| `commitAsync(final Iterable messages, final AsyncCommitCallback callback)` | Asynchronously commits multiple messages with a specified callback. | `messages` : The collection of message objects to be committed.`callback` : The callback function to be executed after asynchronous commit. |
+
+##### SubscriptionPushConsumer
+
+| **Function name** | **Description** | **Parameter** |
+|-------------------------------------|----------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `open()` | Opens the consumer connection, starts message consumption, and submits the automatic polling worker. | None |
+| `close()` | Closes the consumer connection and stops message consumption. | None |
+| `toString()` | Returns the core configuration information of the consumer object. | None |
+| `coreReportMessage()` | Obtains the key-value representation of the consumer's core configuration. | None |
+| `allReportMessage()` | Obtains the key-value representation of all the consumer's configurations. | None |
+| `buildPushConsumer()` | Builds a `SubscriptionPushConsumer` instance through the `Builder` | None |
+| `ackStrategy(final AckStrategy ackStrategy)` | Configures the message acknowledgment strategy for the consumer. | `ackStrategy`: The specified message acknowledgment strategy. |
+| `consumeListener(final ConsumeListener consumeListener)` |Configures the message consumption logic for the consumer. | `consumeListener`: The processing logic when the consumer receives messages. |
+| `autoPollIntervalMs(final long autoPollIntervalMs)` | Configures the interval for automatic polling. | `autoPollIntervalMs` : The interval for automatic polling, in milliseconds. |
+| `autoPollTimeoutMs(final long autoPollTimeoutMs)` | Configures the timeout for automatic polling.间。 | `autoPollTimeoutMs`: The timeout for automatic polling, in milliseconds. |
\ No newline at end of file
diff --git a/src/UserGuide/Master/Tree/API/Programming-Java-Native-API.md b/src/UserGuide/Master/Tree/API/Programming-Java-Native-API.md
index 984b06ab4..e4c04fa89 100644
--- a/src/UserGuide/Master/Tree/API/Programming-Java-Native-API.md
+++ b/src/UserGuide/Master/Tree/API/Programming-Java-Native-API.md
@@ -1,845 +1,468 @@
-# Java Native API
+# Session Native API
-## Installation
+In the native API of IoTDB, the `Session` is the core interface for interacting with the database. It integrates a rich set of methods that support data writing, querying, and metadata operations. By instantiating a `Session`, you can establish a connection to the IoTDB server and perform various database operations within the environment constructed by this connection. The `Session` is not thread-safe and should not be called simultaneously by multiple threads.
-### Dependencies
+`SessionPool` is a connection pool for `Session`, and it is recommended to use `SessionPool` for programming. In scenarios with multi-threaded concurrency, `SessionPool` can manage and allocate connection resources effectively, thereby improving system performance and resource utilization efficiency.
-* JDK >= 1.8
-* Maven >= 3.6
+## 1 Overview of Steps
+1. Create a Connection Pool Instance: Initialize a SessionPool object to manage multiple Session instances.
+2. Perform Operations: Directly obtain a Session instance from the SessionPool and execute database operations, without the need to open and close connections each time.
+3. Close Connection Pool Resources: When database operations are no longer needed, close the SessionPool to release all related resources.
-### Using IoTDB Java Native API with Maven
+## 2 Detailed Steps
+This section provides an overview of the core development process and does not demonstrate all parameters and interfaces. For a complete list of functionalities and parameters, please refer to:[Java Native API](./Programming-Java-Native-API.md#3-native-interface-description) or check the: [Source Code](https://github.com/apache/iotdb/tree/master/example/session/src/main/java/org/apache/iotdb)
+
+### 2.1 Create a Maven Project
+Create a Maven project and add the following dependencies to the pom.xml file (JDK >= 1.8, Maven >= 3.6):
```xml
org.apache.iotdb
iotdb-session
- 1.0.0
+
+ ${project.version}
```
+### 2.2 Creating a Connection Pool Instance
-## Syntax Convention
-
-- **IoTDB-SQL interface:** The input SQL parameter needs to conform to the [syntax conventions](../User-Manual/Syntax-Rule.md#Literal-Values) and be escaped for JAVA strings. For example, you need to add a backslash before the double-quotes. (That is: after JAVA escaping, it is consistent with the SQL statement executed on the command line.)
-- **Other interfaces:**
- - The node names in path or path prefix as parameter: The node names which should be escaped by backticks (`) in the SQL statement, escaping is required here.
- - Identifiers (such as template names) as parameters: The identifiers which should be escaped by backticks (`) in the SQL statement, and escaping is not required here.
-- **Code example for syntax convention could be found at:** `example/session/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java`
-
-## Native APIs
-
-Here we show the commonly used interfaces and their parameters in the Native API:
-
-### Session Management
-
-* Initialize a Session
-
-``` java
-// use default configuration
-session = new Session.Builder.build();
-
-// initialize with a single node
-session =
- new Session.Builder()
- .host(String host)
- .port(int port)
- .build();
-
-// initialize with multiple nodes
-session =
- new Session.Builder()
- .nodeUrls(List nodeUrls)
- .build();
-
-// other configurations
-session =
- new Session.Builder()
- .fetchSize(int fetchSize)
- .username(String username)
- .password(String password)
- .thriftDefaultBufferSize(int thriftDefaultBufferSize)
- .thriftMaxFrameSize(int thriftMaxFrameSize)
- .enableRedirection(boolean enableRedirection)
- .version(Version version)
- .build();
-```
-
-Version represents the SQL semantic version used by the client, which is used to be compatible with the SQL semantics of 0.12 when upgrading 0.13. The possible values are: `V_0_12`, `V_0_13`, `V_1_0`, etc.
-
-
-* Open a Session
-
-``` java
-void open()
-```
-
-* Open a session, with a parameter to specify whether to enable RPC compression
-
-``` java
-void open(boolean enableRPCCompression)
-```
-
-Notice: this RPC compression status of client must comply with that of IoTDB server
-
-* Close a Session
-
-``` java
-void close()
-```
-
-* SessionPool
-
-We provide a connection pool (`SessionPool) for Native API.
-Using the interface, you need to define the pool size.
-
-If you can not get a session connection in 60 seconds, there is a warning log but the program will hang.
-
-If a session has finished an operation, it will be put back to the pool automatically.
-If a session connection is broken, the session will be removed automatically and the pool will try
-to create a new session and redo the operation.
-You can also specify an url list of multiple reachable nodes when creating a SessionPool, just as you would when creating a Session. To ensure high availability of clients in distributed cluster.
-
-For query operations:
-
-1. When using SessionPool to query data, the result set is `SessionDataSetWrapper`;
-2. Given a `SessionDataSetWrapper`, if you have not scanned all the data in it and stop to use it,
-you have to call `SessionPool.closeResultSet(wrapper)` manually;
-3. When you call `hasNext()` and `next()` of a `SessionDataSetWrapper` and there is an exception, then
-you have to call `SessionPool.closeResultSet(wrapper)` manually;
-4. You can call `getColumnNames()` of `SessionDataSetWrapper` to get the column names of query result;
-
-Examples: ```session/src/test/java/org/apache/iotdb/session/pool/SessionPoolTest.java```
-
-Or `example/session/src/main/java/org/apache/iotdb/SessionPoolExample.java`
-
-
-### Database & Timeseries Management API
-
-#### Database Management
-
-* CREATE DATABASE
-
-``` java
-void setStorageGroup(String storageGroupId)
-```
-
-* Delete one or several databases
-
-``` java
-void deleteStorageGroup(String storageGroup)
-void deleteStorageGroups(List storageGroups)
-```
-
-#### Timeseries Management
-
-* Create one or multiple timeseries
-
-``` java
-void createTimeseries(String path, TSDataType dataType,
- TSEncoding encoding, CompressionType compressor, Map props,
- Map tags, Map attributes, String measurementAlias)
-
-void createMultiTimeseries(List paths, List dataTypes,
- List encodings, List compressors,
- List> propsList, List> tagsList,
- List> attributesList, List measurementAliasList)
-```
-
-* Create aligned timeseries
-```
-void createAlignedTimeseries(String prefixPath, List measurements,
- List dataTypes, List encodings,
- List compressors, List measurementAliasList);
-```
-
-Attention: Alias of measurements are **not supported** currently.
-
-* Delete one or several timeseries
-
-``` java
-void deleteTimeseries(String path)
-void deleteTimeseries(List paths)
-```
-
-* Check whether the specific timeseries exists.
-
-``` java
-boolean checkTimeseriesExists(String path)
-```
-
-#### Schema Template
-
-
-Create a schema template for massive identical devices will help to improve memory performance. You can use Template, InternalNode and MeasurementNode to depict the structure of the template, and use belowed interface to create it inside session.
-
-``` java
-public void createSchemaTemplate(Template template);
-
-Class Template {
- private String name;
- private boolean directShareTime;
- Map children;
- public Template(String name, boolean isShareTime);
-
- public void addToTemplate(Node node);
- public void deleteFromTemplate(String name);
- public void setShareTime(boolean shareTime);
-}
-
-Abstract Class Node {
- private String name;
- public void addChild(Node node);
- public void deleteChild(Node node);
-}
-
-Class MeasurementNode extends Node {
- TSDataType dataType;
- TSEncoding encoding;
- CompressionType compressor;
- public MeasurementNode(String name,
- TSDataType dataType,
- TSEncoding encoding,
- CompressionType compressor);
-}
-```
-
-We strongly suggest you implement templates only with flat-measurement (like object 'flatTemplate' in belowed snippet), since tree-structured template may not be a long-term supported feature in further version of IoTDB.
-
-A snippet of using above Method and Class:
-
-``` java
-MeasurementNode nodeX = new MeasurementNode("x", TSDataType.FLOAT, TSEncoding.RLE, CompressionType.SNAPPY);
-MeasurementNode nodeY = new MeasurementNode("y", TSDataType.FLOAT, TSEncoding.RLE, CompressionType.SNAPPY);
-MeasurementNode nodeSpeed = new MeasurementNode("speed", TSDataType.DOUBLE, TSEncoding.GORILLA, CompressionType.SNAPPY);
-
-// This is the template we suggest to implement
-Template flatTemplate = new Template("flatTemplate");
-template.addToTemplate(nodeX);
-template.addToTemplate(nodeY);
-template.addToTemplate(nodeSpeed);
-
-createSchemaTemplate(flatTemplate);
-```
-
-You can query measurement inside templates with these APIS:
```java
-// Return the amount of measurements inside a template
-public int countMeasurementsInTemplate(String templateName);
-
-// Return true if path points to a measurement, otherwise returne false
-public boolean isMeasurementInTemplate(String templateName, String path);
-
-// Return true if path exists in template, otherwise return false
-public boolean isPathExistInTemplate(String templateName, String path);
-
-// Return all measurements paths inside template
-public List showMeasurementsInTemplate(String templateName);
-
-// Return all measurements paths under the designated patter inside template
-public List showMeasurementsInTemplate(String templateName, String pattern);
-```
-
-To implement schema template, you can set the measurement template named 'templateName' at path 'prefixPath'.
-
-**Please notice that, we strongly recommend not setting templates on the nodes above the database to accommodate future updates and collaboration between modules.**
-
-``` java
-void setSchemaTemplate(String templateName, String prefixPath)
-```
-
-Before setting template, you should firstly create the template using
-
-``` java
-void createSchemaTemplate(Template template)
-```
-
-After setting template to a certain path, you can use the template to create timeseries on given device paths through the following interface, or you can write data directly to trigger timeseries auto creation using schema template under target devices.
-
-``` java
-void createTimeseriesUsingSchemaTemplate(List devicePathList)
-```
-
-After setting template to a certain path, you can query for info about template using belowed interface in session:
-
-``` java
-/** @return All template names. */
-public List showAllTemplates();
-
-/** @return All paths have been set to designated template. */
-public List showPathsTemplateSetOn(String templateName);
-
-/** @return All paths are using designated template. */
-public List showPathsTemplateUsingOn(String templateName)
-```
-
-If you are ready to get rid of schema template, you can drop it with belowed interface. Make sure the template to drop has been unset from MTree.
-
-``` java
-void unsetSchemaTemplate(String prefixPath, String templateName);
-public void dropSchemaTemplate(String templateName);
-```
-
-Unset the measurement template named 'templateName' from path 'prefixPath'. When you issue this interface, you should assure that there is a template named 'templateName' set at the path 'prefixPath'.
-
-Attention: Unsetting the template named 'templateName' from node at path 'prefixPath' or descendant nodes which have already inserted records using template is **not supported**.
-
-
-### Data Manipulation Interface (DML Interface)
-
-### Data Insert API
-
-It is recommended to use insertTablet to help improve write efficiency.
-
-* Insert a Tablet,which is multiple rows of a device, each row has the same measurements
- * **Better Write Performance**
- * **Support batch write**
- * **Support null values**: fill the null value with any value, and then mark the null value via BitMap
-
-``` java
-void insertTablet(Tablet tablet)
-
-public class Tablet {
- /** DeviceId if using tree-view interfaces or TableName when using table-view interfaces. */
- private String insertTargetName;
- /** the list of measurement schemas for creating the tablet */
- private List schemas;
- /**
- * Marking the type of each column, namely ID or MEASUREMENT. Notice: the ID columns must be the
- * FIRST ones.
- */
- private List columnCategories;
- /** timestamps in this tablet */
- private long[] timestamps;
- /** each object is a primitive type array, which represents values of one measurement */
- private Object[] values;
- /** each bitmap represents the existence of each value in the current column. */
- private BitMap[] bitMaps;
- /** the number of rows to include in this tablet */
- private int rowSize;
- /** the maximum number of rows for this tablet */
- private int maxRowNumber;
-}
-```
-
-* Insert multiple Tablets
-
-``` java
-void insertTablets(Map tablet)
-```
-
-* Insert a Record, which contains multiple measurement value of a device at a timestamp. This method is equivalent to providing a common interface for multiple data types of values. Later, the value can be cast to the original type through TSDataType.
-
- The correspondence between the Object type and the TSDataType type is shown in the following table.
-
- | TSDataType | Object |
- |------------|--------------|
- | BOOLEAN | Boolean |
- | INT32 | Integer |
- | DATE | LocalDate |
- | INT64 | Long |
- | TIMESTAMP | Long |
- | FLOAT | Float |
- | DOUBLE | Double |
- | TEXT | String, Binary |
- | STRING | String, Binary |
- | BLOB | Binary |
-``` java
-void insertRecord(String deviceId, long time, List measurements,
- List types, List values)
-```
-
-* Insert multiple Records
-
-``` java
-void insertRecords(List deviceIds, List times,
- List> measurementsList, List> typesList,
- List> valuesList)
-```
-* Insert multiple Records that belong to the same device.
- With type info the server has no need to do type inference, which leads a better performance
-
-``` java
-void insertRecordsOfOneDevice(String deviceId, List times,
- List> measurementsList, List> typesList,
- List> valuesList)
-```
-
-#### Insert with type inference
-
-When the data is of String type, we can use the following interface to perform type inference based on the value of the value itself. For example, if value is "true" , it can be automatically inferred to be a boolean type. If value is "3.2" , it can be automatically inferred as a flout type. Without type information, server has to do type inference, which may cost some time.
-
-* Insert a Record, which contains multiple measurement value of a device at a timestamp
-
-``` java
-void insertRecord(String prefixPath, long time, List measurements, List values)
-```
-
-* Insert multiple Records
-
-``` java
-void insertRecords(List deviceIds, List times,
- List> measurementsList, List> valuesList)
-```
-
-* Insert multiple Records that belong to the same device.
-
-``` java
-void insertStringRecordsOfOneDevice(String deviceId, List times,
- List> measurementsList, List> valuesList)
-```
-
-#### Insert of Aligned Timeseries
-
-The Insert of aligned timeseries uses interfaces like insertAlignedXXX, and others are similar to the above interfaces:
-
-* insertAlignedRecord
-* insertAlignedRecords
-* insertAlignedRecordsOfOneDevice
-* insertAlignedStringRecordsOfOneDevice
-* insertAlignedTablet
-* insertAlignedTablets
-
-### Data Delete API
-
-* Delete data before or equal to a timestamp of one or several timeseries
-
-``` java
-void deleteData(String path, long time)
-void deleteData(List paths, long time)
-```
-
-### Data Query API
-
-* Time-series raw data query with time range:
- - The specified query time range is a left-closed right-open interval, including the start time but excluding the end time.
-
-``` java
-SessionDataSet executeRawDataQuery(List paths, long startTime, long endTime);
-```
-
-* Last query:
- - Query the last data, whose timestamp is greater than or equal LastTime.
- ``` java
- SessionDataSet executeLastDataQuery(List paths, long LastTime);
- ```
- - Query the latest point of the specified series of single device quickly, and support redirection;
- If you are sure that the query path is valid, set 'isLegalPathNodes' to true to avoid performance penalties from path verification.
- ``` java
- SessionDataSet executeLastDataQueryForOneDevice(
- String db, String device, List sensors, boolean isLegalPathNodes);
- ```
-
-* Aggregation query:
- - Support specified query time range: The specified query time range is a left-closed right-open interval, including the start time but not the end time.
- - Support GROUP BY TIME.
-
-``` java
-SessionDataSet executeAggregationQuery(List paths, List aggregations);
-
-SessionDataSet executeAggregationQuery(
- List paths, List aggregations, long startTime, long endTime);
-
-SessionDataSet executeAggregationQuery(
- List paths,
- List aggregations,
- long startTime,
- long endTime,
- long interval);
-
-SessionDataSet executeAggregationQuery(
- List paths,
- List aggregations,
- long startTime,
- long endTime,
- long interval,
- long slidingStep);
-```
-
-* Execute query statement
-
-``` java
-SessionDataSet executeQueryStatement(String sql)
-```
-
-### Data Subscription
-
-#### 1 Topic Management
-
-The `SubscriptionSession` class in the IoTDB subscription client provides interfaces for topic management. The status changes of topics are illustrated in the diagram below:
-
-
-
-
-
-##### 1.1 Create Topic
-
-```Java
- void createTopicIfNotExists(String topicName, Properties properties) throws Exception;
-```
-
-Example:
-
-```Java
-try (final SubscriptionSession session = new SubscriptionSession(host, port)) {
- session.open();
- final Properties config = new Properties();
- config.put(TopicConstant.PATH_KEY, "root.db.**");
- session.createTopic(topicName, config);
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.session.pool.SessionPool;
+
+public class IoTDBSessionPoolExample {
+ private static SessionPool sessionPool;
+
+ public static void main(String[] args) {
+ // Using nodeUrls ensures that when one node goes down, other nodes are automatically connected to retry
+ List nodeUrls = new ArrayList<>();
+ nodeUrls.add("127.0.0.1:6667");
+ nodeUrls.add("127.0.0.1:6668");
+ sessionPool =
+ new SessionPool.Builder()
+ .nodeUrls(nodeUrls)
+ .user("root")
+ .password("root")
+ .maxSize(3)
+ .build();
+ }
}
```
-##### 1.2 Delete Topic
+### 2.3 Performing Database Operations
-```Java
-void dropTopicIfExists(String topicName) throws Exception;
-```
+#### 2.3.1 Data Insertion
-##### 1.3 View Topic
+In industrial scenarios, data insertion can be categorized into the following types: inserting multiple rows of data, and inserting multiple rows of data for a single device. Below, we introduce the insertion interfaces for different scenarios.
-```Java
-// Get all topics
-Set getTopics() throws Exception;
-
-// Get a specific topic
-Optional getTopic(String topicName) throws Exception;
-```
-
-#### 2 Check Subscription Status
-The `SubscriptionSession` class in the IoTDB subscription client provides interfaces to check the subscription status:
-
-```Java
-Set getSubscriptions() throws Exception;
-Set getSubscriptions(final String topicName) throws Exception;
-```
-
-#### 3 Create Consumer
-
-When creating a consumer using the JAVA native interface, you need to specify the parameters applied to the consumer.
-
-For both `SubscriptionPullConsumer` and `SubscriptionPushConsumer`, the following common configurations are available:
+##### Multi-Row Data Insertion Interface
+Interface Description: Supports inserting multiple rows of data at once, where each row corresponds to multiple measurement values for a device at a specific timestamp.
-| key | **required or optional with default** | description |
-| :---------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- |
-| host | optional: 127.0.0.1 | `String`: The RPC host of a certain DataNode in IoTDB |
-| port | optional: 6667 | Integer: The RPC port of a certain DataNode in IoTDB |
-| node-urls | optional: 127.0.0.1:6667 | `List`: The RPC addresses of all DataNodes in IoTDB, can be multiple; either host:port or node-urls can be filled in. If both host:port and node-urls are filled in, the union of host:port and node-urls will be used to form a new node-urls application |
-| username | optional: root | `String`: The username of a DataNode in IoTDB |
-| password | optional: root | `String`: The password of a DataNode in IoTDB |
-| groupId | optional | `String`: consumer group id, if not specified, a new consumer group will be randomly assigned, ensuring that different consumer groups have different consumer group ids |
-| consumerId | optional | `String`: consumer client id, if not specified, it will be randomly assigned, ensuring that each consumer client id in the same consumer group is unique |
-| heartbeatIntervalMs | optional: 30000 (min: 1000) | `Long`: The interval at which the consumer sends heartbeat requests to the IoTDB DataNode |
-| endpointsSyncIntervalMs | optional: 120000 (min: 5000) | `Long`: The interval at which the consumer detects the expansion and contraction of IoTDB cluster nodes and adjusts the subscription connection |
-| fileSaveDir | optional: Paths.get(System.getProperty("user.dir"), "iotdb-subscription").toString() | `String`: The temporary directory path where the TsFile files subscribed by the consumer are stored |
-| fileSaveFsync | optional: false | `Boolean`: Whether the consumer actively calls fsync during the subscription of TsFile |
+Interface List:
+| **Interface Name** | **Function Description** |
+|------------------------------------------------------------------------------------------------------------------------|-----------------------|
+| `insertRecords(List deviceIds, List times, List> measurementsList, List> typesList, List> valuesList)` | Inserts multiple rows of data, suitable for scenarios where measurements are independently collected. |
-##### 3.1 SubscriptionPushConsumer
+Code Example:
-The following are special configurations for `SubscriptionPushConsumer`:
-
-
-| key | **required or optional with default** | description |
-| :----------------- | :------------------------------------ | :----------------------------------------------------------- |
-| ackStrategy | optional: `ACKStrategy.AFTER_CONSUME` | Consumption progress confirmation mechanism includes the following options: `ACKStrategy.BEFORE_CONSUME` (submit consumption progress immediately when the consumer receives data, before `onReceive`) `ACKStrategy.AFTER_CONSUME` (submit consumption progress after the consumer has consumed the data, after `onReceive`) |
-| consumeListener | optional | Consumption data callback function, need to implement the `ConsumeListener` interface, define the consumption logic of `SessionDataSetsHandler` and `TsFileHandler` form data|
-| autoPollIntervalMs | optional: 5000 (min: 500) | Long: The interval at which the consumer automatically pulls data, in ms |
-| autoPollTimeoutMs | optional: 10000 (min: 1000) | Long: The timeout time for the consumer to pull data each time, in ms |
-
-Among them, the ConsumerListener interface is defined as follows:
-
-
-```Java
-@FunctionInterface
-interface ConsumeListener {
- default ConsumeResult onReceive(Message message) {
- return ConsumeResult.SUCCESS;
+```java
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.rpc.IoTDBConnectionException;
+import org.apache.iotdb.rpc.StatementExecutionException;
+import org.apache.iotdb.session.pool.SessionPool;
+import org.apache.tsfile.enums.TSDataType;
+
+public class SessionPoolExample {
+ private static SessionPool sessionPool;
+ public static void main(String[] args) throws IoTDBConnectionException, StatementExecutionException {
+ // 1. init SessionPool
+ constructSessionPool();
+ // 2. execute insert data
+ insertRecordsExample();
+ // 3. close SessionPool
+ closeSessionPool();
}
-}
-
-enum ConsumeResult {
- SUCCESS,
- FAILURE,
-}
-```
-
-##### 3.2 SubscriptionPullConsumer
-
-The following are special configurations for `SubscriptionPullConsumer` :
-
-| key | **required or optional with default** | description |
-| :----------------- | :------------------------------------ | :----------------------------------------------------------- |
-| autoCommit | optional: true | Boolean: Whether to automatically commit consumption progress. If this parameter is set to false, the commit method must be called to manually `commit` consumption progress. |
-| autoCommitInterval | optional: 5000 (min: 500) | Long: The interval at which consumption progress is automatically committed, in milliseconds. This only takes effect when the autoCommit parameter is true.
- |
-
-After creating a consumer, you need to manually call the consumer's open method:
-
-
-```Java
-void open() throws Exception;
-```
-
-At this point, the IoTDB subscription client will verify the correctness of the consumer's configuration. After a successful verification, the consumer will join the corresponding consumer group. That is, only after opening the consumer can you use the returned consumer object to subscribe to topics, consume data, and perform other operations.
-
-#### 4 Subscribe to Topics
-
-Both `SubscriptionPushConsumer` and `SubscriptionPullConsumer` provide the following JAVA native interfaces for subscribing to topics:
-
-```Java
-// Subscribe to topics
-void subscribe(String topic) throws Exception;
-void subscribe(List topics) throws Exception;
-```
-
-- Before a consumer subscribes to a topic, the topic must have been created, otherwise, the subscription will fail.
-
-- If a consumer subscribes to a topic that it has already subscribed to, no error will occur.
-
-- If there are other consumers in the same consumer group that have subscribed to the same topics, the consumer will reuse the corresponding consumption progress.
-
-
-#### 5 Consume Data
-
-For both push and pull mode consumers:
-
-
-- Only after explicitly subscribing to a topic will the consumer receive data for that topic.
-
-- If no topics are subscribed to after creation, the consumer will not be able to consume any data, even if other consumers in the same consumer group have subscribed to some topics.
-
-##### 5.1 SubscriptionPushConsumer
-
-After `SubscriptionPushConsumer` subscribes to topics, there is no need to manually pull data.
-
-The data consumption logic is within the `consumeListener` configuration specified when creating `SubscriptionPushConsumer`.
-
-##### 5.2 SubscriptionPullConsumer
-
-After SubscriptionPullConsumer subscribes to topics, it needs to actively call the poll method to pull data:
-
-```Java
-List poll(final Duration timeout) throws Exception;
-List poll(final long timeoutMs) throws Exception;
-List poll(final Set topicNames, final Duration timeout) throws Exception;
-List poll(final Set topicNames, final long timeoutMs) throws Exception;
-```
-In the poll method, you can specify the topic names to be pulled (if not specified, it defaults to pulling all topics that the consumer has subscribed to) and the timeout period.
-
-
-When the SubscriptionPullConsumer is configured with the autoCommit parameter set to false, it is necessary to manually call the commitSync and commitAsync methods to synchronously or asynchronously commit the consumption progress of a batch of data:
-
-
-```Java
-void commitSync(final SubscriptionMessage message) throws Exception;
-void commitSync(final Iterable messages) throws Exception;
-
-CompletableFuture commitAsync(final SubscriptionMessage message);
-CompletableFuture commitAsync(final Iterable messages);
-void commitAsync(final SubscriptionMessage message, final AsyncCommitCallback callback);
-void commitAsync(final Iterable messages, final AsyncCommitCallback callback);
-```
-
-The AsyncCommitCallback class is defined as follows:
-
-```Java
-public interface AsyncCommitCallback {
- default void onComplete() {
- // Do nothing
- }
+ private static void constructSessionPool() {
+ // Using nodeUrls ensures that when one node goes down, other nodes are automatically connected to retry
+ List nodeUrls = new ArrayList<>();
+ nodeUrls.add("127.0.0.1:6667");
+ nodeUrls.add("127.0.0.1:6668");
+ sessionPool =
+ new SessionPool.Builder()
+ .nodeUrls(nodeUrls)
+ .user("root")
+ .password("root")
+ .maxSize(3)
+ .build();
+ }
- default void onFailure(final Throwable e) {
- // Do nothing
- }
+ public static void insertRecordsExample() throws IoTDBConnectionException, StatementExecutionException {
+ String deviceId = "root.sg1.d1";
+ List measurements = new ArrayList<>();
+ measurements.add("s1");
+ measurements.add("s2");
+ measurements.add("s3");
+ List deviceIds = new ArrayList<>();
+ List> measurementsList = new ArrayList<>();
+ List> valuesList = new ArrayList<>();
+ List timestamps = new ArrayList<>();
+ List> typesList = new ArrayList<>();
+
+ for (long time = 0; time < 500; time++) {
+ List values = new ArrayList<>();
+ List types = new ArrayList<>();
+ values.add(1L);
+ values.add(2L);
+ values.add(3L);
+ types.add(TSDataType.INT64);
+ types.add(TSDataType.INT64);
+ types.add(TSDataType.INT64);
+
+ deviceIds.add(deviceId);
+ measurementsList.add(measurements);
+ valuesList.add(values);
+ typesList.add(types);
+ timestamps.add(time);
+ if (time != 0 && time % 100 == 0) {
+ try {
+ sessionPool.insertRecords(deviceIds, timestamps, measurementsList, typesList, valuesList);
+ } catch (IoTDBConnectionException | StatementExecutionException e) {
+ // solve exception
+ }
+ deviceIds.clear();
+ measurementsList.clear();
+ valuesList.clear();
+ typesList.clear();
+ timestamps.clear();
+ }
+ }
+ try {
+ sessionPool.insertRecords(deviceIds, timestamps, measurementsList, typesList, valuesList);
+ } catch (IoTDBConnectionException | StatementExecutionException e) {
+ // solve exception
+ }
+ }
+
+ public static void closeSessionPool(){
+ sessionPool.close();
+ }
}
```
+##### Single-Device Multi-Row Data Insertion Interface
+Interface Description: Supports inserting multiple rows of data for a single device at once, where each row corresponds to multiple measurement values for a specific timestamp.
-#### 6 Unsubscribe
+Interface List:
-The `SubscriptionPushConsumer` and `SubscriptionPullConsumer` provide the following JAVA native interfaces for unsubscribing and closing the consumer:
+| **Interface Name** | **Function Description** |
+|-----------------------------------------------------------------------------------------|----------------------------|
+| `insertTablet(Tablet tablet)` | Inserts multiple rows of data for a single device, suitable for scenarios where measurements are independently collected. |
-```Java
-// Unsubscribe from topics
-void unsubscribe(String topic) throws Exception;
-void unsubscribe(List topics) throws Exception;
+Code Example:
-// Close consumer
-void close();
-```
-
-- If a consumer unsubscribes from a topic that it has not subscribed to, no error will occur.
-- When a consumer is closed, it will exit the corresponding consumer group and automatically unsubscribe from all topics it is currently subscribed to.
-- Once a consumer is closed, its lifecycle ends, and it cannot be reopened to subscribe to and consume data again.
-
-
-#### 7 Code Examples
-
-##### 7.1 Single Pull Consumer Consuming SessionDataSetsHandler Format Data
-
-```Java
-// Create topics
-try (final SubscriptionSession session = new SubscriptionSession(HOST, PORT)) {
- session.open();
- final Properties config = new Properties();
- config.put(TopicConstant.PATH_KEY, "root.db.**");
- session.createTopic(TOPIC_1, config);
-}
-
-// Subscription: property-style ctor
-final Properties config = new Properties();
-config.put(ConsumerConstant.CONSUMER_ID_KEY, "c1");
-config.put(ConsumerConstant.CONSUMER_GROUP_ID_KEY, "cg1");
-
-final SubscriptionPullConsumer consumer1 = new SubscriptionPullConsumer(config);
-consumer1.open();
-consumer1.subscribe(TOPIC_1);
-while (true) {
- LockSupport.parkNanos(SLEEP_NS); // wait some time
- final List messages = consumer1.poll(POLL_TIMEOUT_MS);
- for (final SubscriptionMessage message : messages) {
- for (final SubscriptionSessionDataSet dataSet : message.getSessionDataSetsHandler()) {
- System.out.println(dataSet.getColumnNames());
- System.out.println(dataSet.getColumnTypes());
- while (dataSet.hasNext()) {
- System.out.println(dataSet.next());
- }
+```java
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import org.apache.iotdb.rpc.IoTDBConnectionException;
+import org.apache.iotdb.rpc.StatementExecutionException;
+import org.apache.iotdb.session.pool.SessionPool;
+import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.write.record.Tablet;
+import org.apache.tsfile.write.schema.MeasurementSchema;
+
+public class SessionPoolExample {
+ private static SessionPool sessionPool;
+ public static void main(String[] args) throws IoTDBConnectionException, StatementExecutionException {
+ // 1. init SessionPool
+ constructSessionPool();
+ // 2. execute insert data
+ insertTabletExample();
+ // 3. close SessionPool
+ closeSessionPool();
+ }
+
+ private static void constructSessionPool() {
+ // Using nodeUrls ensures that when one node goes down, other nodes are automatically connected to retry
+ List nodeUrls = new ArrayList<>();
+ nodeUrls.add("127.0.0.1:6667");
+ nodeUrls.add("127.0.0.1:6668");
+ sessionPool =
+ new SessionPool.Builder()
+ .nodeUrls(nodeUrls)
+ .user("root")
+ .password("root")
+ .maxSize(3)
+ .build();
}
- }
- // Auto commit
-}
-
-// Show topics and subscriptions
-try (final SubscriptionSession session = new SubscriptionSession(HOST, PORT)) {
- session.open();
- session.getTopics().forEach((System.out::println));
- session.getSubscriptions().forEach((System.out::println));
-}
-
-consumer1.unsubscribe(TOPIC_1);
-consumer1.close();
-```
-
-##### 7.2 Multiple Push Consumers Consuming TsFileHandler Format Data
-```Java
-// Create topics
-try (final SubscriptionSession subscriptionSession = new SubscriptionSession(HOST, PORT)) {
- subscriptionSession.open();
- final Properties config = new Properties();
- config.put(TopicConstant.FORMAT_KEY, TopicConstant.FORMAT_TS_FILE_HANDLER_VALUE);
- subscriptionSession.createTopic(TOPIC_2, config);
-}
-
-final List threads = new ArrayList<>();
-for (int i = 0; i < 8; ++i) {
- final int idx = i;
- final Thread thread =
- new Thread(
- () -> {
- // Subscription: builder-style ctor
- try (final SubscriptionPushConsumer consumer2 =
- new SubscriptionPushConsumer.Builder()
- .consumerId("c" + idx)
- .consumerGroupId("cg2")
- .fileSaveDir(System.getProperty("java.io.tmpdir"))
- .ackStrategy(AckStrategy.AFTER_CONSUME)
- .consumeListener(
- message -> {
- doSomething(message.getTsFileHandler());
- return ConsumeResult.SUCCESS;
- })
- .buildPushConsumer()) {
- consumer2.open();
- consumer2.subscribe(TOPIC_2);
- // block the consumer main thread
- Thread.sleep(Long.MAX_VALUE);
- } catch (final IOException | InterruptedException e) {
- throw new RuntimeException(e);
+ private static void insertTabletExample() throws IoTDBConnectionException, StatementExecutionException {
+ /*
+ * A Tablet example:
+ * device1
+ * time s1, s2, s3
+ * 1, 1, 1, 1
+ * 2, 2, 2, 2
+ * 3, 3, 3, 3
+ */
+ // The schema of measurements of one device
+ // only measurementId and data type in MeasurementSchema take effects in Tablet
+ List schemaList = new ArrayList<>();
+ schemaList.add(new MeasurementSchema("s1", TSDataType.INT64));
+ schemaList.add(new MeasurementSchema("s2", TSDataType.INT64));
+ schemaList.add(new MeasurementSchema("s3", TSDataType.INT64));
+
+ Tablet tablet = new Tablet("root.sg.d1", schemaList, 100);
+
+ // Method 1 to add tablet data
+ long timestamp = System.currentTimeMillis();
+
+ Random random = new Random();
+ for (long row = 0; row < 100; row++) {
+ int rowIndex = tablet.rowSize++;
+ tablet.addTimestamp(rowIndex, timestamp);
+ for (int s = 0; s < 3; s++) {
+ long value = random.nextLong();
+ tablet.addValue(schemaList.get(s).getMeasurementId(), rowIndex, value);
}
- });
- thread.start();
- threads.add(thread);
-}
+ if (tablet.rowSize == tablet.getMaxRowNumber()) {
+ sessionPool.insertTablet(tablet);
+ tablet.reset();
+ }
+ timestamp++;
+ }
+ if (tablet.rowSize != 0) {
+ sessionPool.insertTablet(tablet);
+ tablet.reset();
+ }
+ }
-for (final Thread thread : threads) {
- thread.join();
+ public static void closeSessionPool(){
+ sessionPool.close();
+ }
}
```
-### Other Modules (Execute SQL Directly)
-
-* Execute non query statement
-
-``` java
-void executeNonQueryStatement(String sql)
-```
-
-
-### Write Test Interface (to profile network cost)
-
-These methods **don't** insert data into database and server just return after accept the request.
-
-* Test the network and client cost of insertRecord
-
-``` java
-void testInsertRecord(String deviceId, long time, List measurements, List values)
-
-void testInsertRecord(String deviceId, long time, List measurements,
- List types, List values)
-```
+#### 2.3.2 SQL Operations
-* Test the network and client cost of insertRecords
+SQL operations are divided into two categories: queries and non-queries. The corresponding interfaces are executeQuery and executeNonQuery. The difference between them is that the former executes specific query statements and returns a result set, while the latter performs insert, delete, and update operations and does not return a result set.
-``` java
-void testInsertRecords(List deviceIds, List times,
- List> measurementsList, List> valuesList)
-
-void testInsertRecords(List deviceIds, List times,
- List> measurementsList, List> typesList
- List> valuesList)
-```
-
-* Test the network and client cost of insertTablet
-
-``` java
-void testInsertTablet(Tablet tablet)
-```
+```java
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.iotdb.isession.pool.SessionDataSetWrapper;
+import org.apache.iotdb.rpc.IoTDBConnectionException;
+import org.apache.iotdb.rpc.StatementExecutionException;
+import org.apache.iotdb.session.pool.SessionPool;
+
+public class SessionPoolExample {
+ private static SessionPool sessionPool;
+ public static void main(String[] args) throws IoTDBConnectionException, StatementExecutionException {
+ // 1. init SessionPool
+ constructSessionPool();
+ // 2. executes a non-query SQL statement, such as a DDL or DML command.
+ executeQueryExample();
+ // 3. executes a query SQL statement and returns the result set.
+ executeNonQueryExample();
+ // 4. close SessionPool
+ closeSessionPool();
+ }
-* Test the network and client cost of insertTablets
+ private static void executeNonQueryExample() throws IoTDBConnectionException, StatementExecutionException {
+ // 1. create a nonAligned time series
+ sessionPool.executeNonQueryStatement("create timeseries root.test.d1.s1 with dataType = int32");
+ // 2. set ttl
+ sessionPool.executeNonQueryStatement("set TTL to root.test.** 10000");
+ // 3. delete time series
+ sessionPool.executeNonQueryStatement("delete timeseries root.test.d1.s1");
+ private static void executeQueryExample() throws IoTDBConnectionException, StatementExecutionException {
+ // 1. execute normal query
+ try(SessionDataSetWrapper wrapper = sessionPool.executeQueryStatement("select s1 from root.sg1.d1 limit 10")) {
+ while (wrapper.hasNext()) {
+ System.out.println(wrapper.next());
+ }
+ }
+ // 2. execute aggregate query
+ try(SessionDataSetWrapper wrapper = sessionPool.executeQueryStatement("select count(s1) from root.sg1.d1 group by ([0, 40), 5ms) ")) {
+ while (wrapper.hasNext()) {
+ System.out.println(wrapper.next());
+ }
+ }
+ }
+
+ private static void constructSessionPool() {
+ // Using nodeUrls ensures that when one node goes down, other nodes are automatically connected to retry
+ List nodeUrls = new ArrayList<>();
+ nodeUrls.add("127.0.0.1:6667");
+ nodeUrls.add("127.0.0.1:6668");
+ sessionPool =
+ new SessionPool.Builder()
+ .nodeUrls(nodeUrls)
+ .user("root")
+ .password("root")
+ .maxSize(3)
+ .build();
+ }
-``` java
-void testInsertTablets(Map tablets)
+ public static void closeSessionPool(){
+ sessionPool.close();
+ }
+}
```
-
-### Coding Examples
-
-To get more information of the following interfaces, please view session/src/main/java/org/apache/iotdb/session/Session.java
-
-The sample code of using these interfaces is in example/session/src/main/java/org/apache/iotdb/SessionExample.java,which provides an example of how to open an IoTDB session, execute a batch insertion.
-
-For examples of aligned timeseries and measurement template, you can refer to `example/session/src/main/java/org/apache/iotdb/AlignedTimeseriesSessionExample.java`
\ No newline at end of file
+### 3 Native Interface Description
+
+#### 3.1 Parameter List
+The Session class has the following fields, which can be set through the constructor or the Session.Builder method:
+
+| **Field Name** | **Type** | **Description** |
+|--------------------------------|-------------------------------|----------------------------------------------------------------------|
+| `nodeUrls` | `List` | List of URLs for database nodes, supporting multiple node connections |
+| `username` | `String` | Username |
+| `password` | `String` | Password |
+| `fetchSize` | `int` | Default batch size for query results |
+| `useSSL` | `boolean` | Whether to enable SSL |
+| `trustStore` | `String` | Path to the trust store |
+| `trustStorePwd` | `String` | Password for the trust store |
+| `queryTimeoutInMs` | `long` | Query timeout in milliseconds |
+| `enableRPCCompression` | `boolean` | Whether to enable RPC compression |
+| `connectionTimeoutInMs` | `int` | Connection timeout in milliseconds |
+| `zoneId` | `ZoneId` | Time zone setting for the session |
+| `thriftDefaultBufferSize` | `int` | Default buffer size for Thrift Thrift |
+| `thriftMaxFrameSize` | `int` | Maximum frame size for Thrift Thrift |
+| `defaultEndPoint` | `TEndPoint` | Default database endpoint information |
+| `defaultSessionConnection` | `SessionConnection` | Default session connection object |
+| `isClosed` | `boolean` | Whether the current session is closed |
+| `enableRedirection` | `boolean` | Whether to enable redirection |
+| `enableRecordsAutoConvertTablet` | `boolean` | Whether to enable the function of recording the automatic transfer to Tablet |
+| `deviceIdToEndpoint` | `Map` | Mapping of device IDs to database endpoints |
+| `endPointToSessionConnection` | `Map` | Mapping of database endpoints to session connections |
+| `executorService` | `ScheduledExecutorService` | Thread pool for periodically updating the node list |
+| `availableNodes` | `INodeSupplier` | Supplier of available nodes |
+| `enableQueryRedirection` | `boolean` | Whether to enable query redirection |
+| `version` | `Version` | Client version number, used for compatibility judgment with the server |
+| `enableAutoFetch` | `boolean` | Whether to enable automatic fetching |
+| `maxRetryCount` | `int` | Maximum number of retries |
+| `retryIntervalInMs` | `long` | Retry interval in milliseconds |
+
+
+
+#### 3.2 Interface list
+
+##### 3.2.1 Metadata Management
+
+| **Method Name** | **Function Description** | **Parameter Explanation** |
+|-----------------------------------------------------------------------------------------|--------------------------------------------|------------------------------------------------------------------------------------------------------------|
+| `createDatabase(String database)` | Create a database | `database`: The name of the database to be created |
+| `deleteDatabase(String database)` | Delete a specified database | `database`: The name of the database to be deleted |
+| `deleteDatabases(List databases)` | Batch delete databases | `databases`: A list of database names to be deleted |
+| `createTimeseries(String path, TSDataType dataType, TSEncoding encoding, CompressionType compressor)` | Create a single time series | `path`: The path of the time series,`dataType`: The data type,`encoding`: The encoding type,`compressor`: The compression type |
+| `createAlignedTimeseries(...)` | Create aligned time series | Device ID, list of measurement points, list of data types, list of encodings, list of compression types |
+| `createMultiTimeseries(...)` | Batch create time series | Multiple paths, data types, encodings, compression types, properties, tags, aliases, etc. |
+| `deleteTimeseries(String path)` | Delete a time series | `path`: The path of the time series to be deleted |
+| `deleteTimeseries(List paths)` | Batch delete time series | `paths`: A list of time series paths to be deleted |
+| `setSchemaTemplate(String templateName, String prefixPath)` | Set a schema template | `templateName`: The name of template,`prefixPath`: The path where the template is applied |
+| `createSchemaTemplate(Template template)` | Create a schema template | `template`: The template object |
+| `dropSchemaTemplate(String templateName)` | Delete a schema template | `templateName`: The name of template to be deleted |
+| `addAlignedMeasurementsInTemplate(...)` | Add aligned measurements to a template | Template name, list of measurement paths, data type, encoding type, compression type |
+| `addUnalignedMeasurementsInTemplate(...)` | Add unaligned measurements to a template | Same as above |
+| `deleteNodeInTemplate(String templateName, String path)` | Delete a node in a template | `templateName`: The name of template,`path`: The path to be deleted |
+| `countMeasurementsInTemplate(String name)` | Count the number of measurements in a template | `name`: The name of template |
+| `isMeasurementInTemplate(String templateName, String path)` | Check if a measurement exists in a template | `templateName`: The name of template,`path`: The path of the measurement |
+| `isPathExistInTemplate(String templateName, String path)` | Check if a path exists in a template | same as above |
+| `showMeasurementsInTemplate(String templateName)` | Show measurements in a template | `templateName`: The name of template |
+| `showMeasurementsInTemplate(String templateName, String pattern)` | Show measurements in a template by pattern | `templateName`: The name of template,`pattern`: The matching pattern |
+| `showAllTemplates()` | Show all templates | No parameters |
+| `showPathsTemplateSetOn(String templateName)` | Show paths where a template is set | `templateName`: The name of the template |
+| `showPathsTemplateUsingOn(String templateName)` | Show actual paths using a template | Same as above上 |
+| `unsetSchemaTemplate(String prefixPath, String templateName)` | Unset the template setting for a path | `prefixPath`: The path,`templateName`: The name of template |
+
+
+##### 3.2.2 Data Insertion
+
+| **Method Name** | **Function Description** | **Parameter Explanation** |
+|-----------------------------------------------------------------------------------------|--------------------------------------------|------------------------------------------------------------------------------------------------------------|
+| `insertRecord(String deviceId, long time, List measurements, List types, Object... values)` | Insert a single record | `deviceId`: Device ID,`time`: Timestamp,`measurements`: List of measurement points,`types`: List of data types,`values`: List of values |
+| `insertRecord(String deviceId, long time, List measurements, List values)` | Insert a single record | `deviceId`: Device ID,`time`: Timestamp,`measurements`: List of measurement points,`values`: List of values |
+| `insertRecords(List deviceIds, List times, List> measurementsList, List> valuesList)` | Insert multiple records | `deviceIds`: List of device IDs,`times`: List of timestamps,`measurementsList`: List of timestamps,`valuesList`: List of lists of values |
+| `insertRecords(List deviceIds, List times, List> measurementsList, List> typesList, List> valuesList)` | Insert multiple records | Same as above,plus `typesList`: List of lists of data types |
+| `insertRecordsOfOneDevice(String deviceId, List times, List> measurementsList, List> typesList, List> valuesList)` | Insert multiple records for a single device | `deviceId`: Device ID,`times`: List of timestamps,`measurementsList`: List of lists of measurement points,`typesList`: List of lists of types,`valuesList`: List of lists of values |
+| `insertRecordsOfOneDevice(String deviceId, List times, List> measurementsList, List> typesList, List> valuesList, boolean haveSorted)` | Insert sorted multiple records for a single device | Same as above, plus `haveSorted`: Whether the data is already sorted |
+| `insertStringRecordsOfOneDevice(String deviceId, List times, List> measurementsList, List> valuesList)` | Insert string-formatted records for a single device | `deviceId`: Device ID,`times`: List of timestamps,`measurementsList`: List of lists of measurement points,`valuesList`: List of lists of values |
+| `insertStringRecordsOfOneDevice(String deviceId, List times, List> measurementsList, List> valuesList, boolean haveSorted)` | Insert sorted string-formatted records for a single device | Same as above, plus `haveSorted`: Whether the data is already sorted序 |
+| `insertAlignedRecord(String deviceId, long time, List measurements, List types, List values)` | Insert a single aligned record | `deviceId`: Device ID,`time`: Timestamp,`measurements`: List of measurement points,`types`: List of types,`values`: List of values |
+| `insertAlignedRecord(String deviceId, long time, List measurements, List values)` | Insert a single string-formatted aligned record | `deviceId`: Device ID`time`: Timestamp,`measurements`: List of measurement points,`values`: List of values |
+| `insertAlignedRecords(List deviceIds, List times, List> measurementsList, List> valuesList)` | Insert multiple aligned records | `deviceIds`: List of device IDs,`times`: List of timestamps,`measurementsList`: List of lists of measurement points,`valuesList`: List of lists of values |
+| `insertAlignedRecords(List deviceIds, List times, List> measurementsList, List> typesList, List> valuesList)` | Insert multiple aligned records | Same as above, plus `typesList`: List of lists of data types |
+| `insertAlignedRecordsOfOneDevice(String deviceId, List times, List> measurementsList, List> typesList, List> valuesList)` | Insert multiple aligned records for a single device | Same as above |
+| `insertAlignedRecordsOfOneDevice(String deviceId, List times, List> measurementsList, List> typesList, List> valuesList, boolean haveSorted)` | Insert sorted multiple aligned records for a single device | Same as above, plus `haveSorted`: Whether the data is already sorted |
+| `insertAlignedStringRecordsOfOneDevice(String deviceId, List times, List> measurementsList, List> valuesList)` | Insert string-formatted aligned records for a single device | `deviceId`: Device ID,`times`: List of timestamps,`measurementsList`: List of lists of measurement points,`valuesList`: List of lists of values |
+| `insertAlignedStringRecordsOfOneDevice(String deviceId, List times, List> measurementsList, List> valuesList, boolean haveSorted)` | Insert sorted string-formatted aligned records for a single device | Same as above, plus w `haveSorted`: whether the data is already sorted |
+| `insertTablet(Tablet tablet)` | Insert a single Tablet data | `tablet`: The Tablet data to be inserted |
+| `insertTablet(Tablet tablet, boolean sorted)` | Insert a sorted Tablet data | Same as above, plus `sorted`: whether the data is already sorted |
+| `insertAlignedTablet(Tablet tablet)` | Insert an aligned Tablet data | `tablet`: The Tablet data to be inserted |
+| `insertAlignedTablet(Tablet tablet, boolean sorted)` | Insert a sorted aligned Tablet data | Same as above, plus `sorted`: whether the data is already sorted |
+| `insertTablets(Map tablets)` | Insert multiple Tablet data in batch | `tablets`: Mapping from device IDs to Tablet data |
+| `insertTablets(Map tablets, boolean sorted)` | Insert sorted multiple Tablet data in batch | Same as above, plus `sorted`: whether the data is already sorted |
+| `insertAlignedTablets(Map tablets)` | Insert multiple aligned Tablet data in batch | `tablets`: Mapping from device IDs to Tablet data |
+| `insertAlignedTablets(Map tablets, boolean sorted)` | Insert sorted multiple aligned Tablet data in batch | Same as above, plus `sorted`: whether the data is already sorted |
+
+##### 3.2.3 Data Deletion
+
+| **Method Name** | **Function Description** | **Parameter Explanation** |
+|-----------------------------------------------------------------------------------------|--------------------------------------------|------------------------------------------------------------------------------------------------------------|
+| `deleteTimeseries(String path)` | Delete a single time series | `path`: The path of the time series |
+| `deleteTimeseries(List paths)` | Batch delete time series | `paths`: A list of time series paths |
+| `deleteData(String path, long endTime)` | Delete historical data for a specified path | `path`: The path,`endTime`: The end timestamp |
+| `deleteData(List paths, long endTime)` | Batch delete historical data for specified paths | `paths`: A list of paths,`endTime`: The end timestamp |
+| `deleteData(List paths, long startTime, long endTime)` | Delete historical data within a time range for specified paths | Same as above, plus `startTime`: The start timestamp |
+
+
+##### 3.2.4 Data Query
+
+| **Method Name** | **Function Description** | **Parameter Explanation** |
+|-----------------------------------------------------------------------------------------|--------------------------------------------|------------------------------------------------------------------------------------------------------------|
+| `executeQueryStatement(String sql)` | Execute a query statement | `sql`: The query SQL statement |
+| `executeQueryStatement(String sql, long timeoutInMs)` | Execute a query statement with timeout | `sql`: The query SQL statement, `timeoutInMs`: The query timeout (in milliseconds) |
+| `executeRawDataQuery(List paths, long startTime, long endTime)` | Query raw data for specified paths | paths: A list of query paths, `startTime`: The start timestamp, `endTime`: The end timestamp |
+| `executeRawDataQuery(List paths, long startTime, long endTime, long timeOut)` | Query raw data for specified paths (with timeout) | Same as above, plus `timeOut`: The timeout time |
+| `executeLastDataQuery(List paths)` | Query the latest data | `paths`: A list of query paths |
+| `executeLastDataQuery(List paths, long lastTime)` | Query the latest data at a specified time | `paths`: A list of query paths, `lastTime`: The specified timestamp |
+| `executeLastDataQuery(List paths, long lastTime, long timeOut)` | Query the latest data at a specified time (with timeout) | Same as above, plus `timeOut`: The timeout time |
+| `executeLastDataQueryForOneDevice(String db, String device, List sensors, boolean isLegalPathNodes)` | Query the latest data for a single device | `db`: The database name, `device`: The device name, `sensors`: A list of sensors, `isLegalPathNodes`: Whether the path nodes are legal |
+| `executeAggregationQuery(List paths, List aggregations)` | Execute an aggregation query | `paths`: A list of query paths, `aggregations`: A list of aggregation types |
+| `executeAggregationQuery(List paths, List aggregations, long startTime, long endTime)` | Execute an aggregation query with a time range | Same as above, plus `startTime`: The start timestamp, `endTime`:` The end timestamp |
+| `executeAggregationQuery(List paths, List aggregations, long startTime, long endTime, long interval)` | Execute an aggregation query with a time interval | Same as above, plus `interval`: The time interval |
+| `executeAggregationQuery(List paths, List aggregations, long startTime, long endTime, long interval, long slidingStep)` | Execute a sliding window aggregation query | Same as above, plus `slidingStep`: The sliding step |
+| `fetchAllConnections()` | Get information of all active connections | No parameters |
+
+##### 3.2.5 System Status and Backup
+|**Method Name** | **Function Description** | **Parameter Explanation** |
+|-----------------------------------------------------------------------------------------|--------------------------------------------|------------------------------------------------------------------------------------------------------------|
+| `getBackupConfiguration()` | Get backup configuration information | No parameters |
+| `fetchAllConnections()` | Get information of all active connections | No parameters |
+| `getSystemStatus()` | Get the system status | Deprecated, returns `SystemStatus.NORMAL` |
\ No newline at end of file
diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-Data-Subscription.md b/src/UserGuide/V2.0.1/Tree/API/Programming-Data-Subscription.md
new file mode 100644
index 000000000..89dfbb33c
--- /dev/null
+++ b/src/UserGuide/V2.0.1/Tree/API/Programming-Data-Subscription.md
@@ -0,0 +1,244 @@
+
+
+# Data Sync API
+IoTDB provides a powerful data subscription feature, allowing users to obtain newly added data from IoTDB in real-time through the subscription SDK. For detailed functional definitions and introductions:[Data Sync](../../User-Manual/Data-Sync_timecho.md#Data Sync)
+
+## 1 Core Steps
+
+1. Create Topic: Create a Topic that includes the measurement points you wish to subscribe to.
+2. Subscribe to Topic: Before a consumer subscribes to a topic, the topic must have been created, otherwise the subscription will fail. Consumers under the same consumer group will evenly distribute the data.
+3. Consume Data: Only by explicitly subscribing to a specific topic will you receive data from that topic.
+4. Unsubscribe: When a consumer is closed, it will exit the corresponding consumer group and cancel all existing subscriptions.
+
+
+## 2 Detailed Steps
+This section is used to illustrate the core development process and does not demonstrate all parameters and interfaces. For a comprehensive understanding of all features and parameters, please refer to: [Java Native API](./Programming-Java-Native-API.md#Java Native API)
+
+
+### 2.1 Create a Maven project
+Create a Maven project and import the following dependencies(JDK >= 1.8, Maven >= 3.6)
+
+```xml
+
+
+ org.apache.iotdb
+ iotdb-session
+
+ ${project.version}
+
+
+```
+
+### 2.2 Code Example
+#### 2.2.1 Topic operations
+```java
+import java.util.Optional;
+import java.util.Properties;
+import java.util.Set;
+import org.apache.iotdb.rpc.IoTDBConnectionException;
+import org.apache.iotdb.rpc.StatementExecutionException;
+import org.apache.iotdb.rpc.subscription.config.TopicConstant;
+import org.apache.iotdb.session.subscription.SubscriptionSession;
+import org.apache.iotdb.session.subscription.model.Topic;
+
+public class DataConsumerExample {
+
+ public static void main(String[] args) throws IoTDBConnectionException, StatementExecutionException {
+ try (SubscriptionSession session = new SubscriptionSession("127.0.0.1", 6667)) {
+ // 1. open session
+ session.open();
+
+ // 2. create a topic of all data
+ Properties sessionConfig = new Properties();
+ sessionConfig.put(TopicConstant.PATH_KEY, "root.**");
+
+ session.createTopic("allData", sessionConfig);
+
+ // 3. show all topics
+ Set topics = session.getTopics();
+ System.out.println(topics);
+
+ // 4. show a specific topic
+ Optional allData = session.getTopic("allData");
+ System.out.println(allData.get());
+ }
+ }
+}
+```
+#### 2.2.2 Data Consume
+
+##### Scenario-1: Subscribing to newly added real-time data in IoTDB (for scenarios such as dashboard or configuration display)
+
+```java
+import java.io.IOException;
+import java.util.List;
+import java.util.Properties;
+import org.apache.iotdb.rpc.subscription.config.ConsumerConstant;
+import org.apache.iotdb.rpc.subscription.config.TopicConstant;
+import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer;
+import org.apache.iotdb.session.subscription.payload.SubscriptionMessage;
+import org.apache.iotdb.session.subscription.payload.SubscriptionMessageType;
+import org.apache.iotdb.session.subscription.payload.SubscriptionSessionDataSet;
+import org.apache.tsfile.read.common.RowRecord;
+
+public class DataConsumerExample {
+
+ public static void main(String[] args) throws IOException {
+
+ // 5. create a pull consumer, the subscription is automatically cancelled when the logic in the try resources is completed
+ Properties consumerConfig = new Properties();
+ consumerConfig.put(ConsumerConstant.CONSUMER_ID_KEY, "c1");
+ consumerConfig.put(ConsumerConstant.CONSUMER_GROUP_ID_KEY, "cg1");
+ consumerConfig.put(ConsumerConstant.CONSUME_LISTENER_KEY, TopicConstant.FORMAT_SESSION_DATA_SETS_HANDLER_VALUE);
+ try (SubscriptionPullConsumer pullConsumer = new SubscriptionPullConsumer(consumerConfig)) {
+ pullConsumer.open();
+ pullConsumer.subscribe("topic_all");
+ while (true) {
+ List messages = pullConsumer.poll(10000);
+ for (final SubscriptionMessage message : messages) {
+ final short messageType = message.getMessageType();
+ if (SubscriptionMessageType.isValidatedMessageType(messageType)) {
+ for (final SubscriptionSessionDataSet dataSet : message.getSessionDataSetsHandler()) {
+ while (dataSet.hasNext()) {
+ final RowRecord record = dataSet.next();
+ System.out.println(record);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
+```
+##### Scenario-2: Subscribing to newly added TsFiles (for scenarios such as regular data backup)
+
+Prerequisite: The format of the topic to be consumed must be of the TsfileHandler type. For example:`create topic topic_all_tsfile with ('path'='root.**','format'='TsFileHandler')`
+
+```java
+import java.io.IOException;
+import java.util.List;
+import java.util.Properties;
+import org.apache.iotdb.rpc.subscription.config.ConsumerConstant;
+import org.apache.iotdb.rpc.subscription.config.TopicConstant;
+import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer;
+import org.apache.iotdb.session.subscription.payload.SubscriptionMessage;
+
+
+public class DataConsumerExample {
+
+ public static void main(String[] args) throws IOException {
+ // 1. create a pull consumer, the subscription is automatically cancelled when the logic in the try resources is completed
+ Properties consumerConfig = new Properties();
+ consumerConfig.put(ConsumerConstant.CONSUMER_ID_KEY, "c1");
+ consumerConfig.put(ConsumerConstant.CONSUMER_GROUP_ID_KEY, "cg1");
+ // 2. Specify the consumption type as the tsfile type
+ consumerConfig.put(TopicConstant.FORMAT_KEY, TopicConstant.FORMAT_TS_FILE_HANDLER_VALUE);
+ consumerConfig.put(ConsumerConstant.FILE_SAVE_DIR_KEY, "/Users/iotdb/Downloads");
+ try (SubscriptionPullConsumer pullConsumer = new SubscriptionPullConsumer(consumerConfig)) {
+ pullConsumer.open();
+ pullConsumer.subscribe("topic_all_tsfile");
+ while (true) {
+ List messages = pullConsumer.poll(10000);
+ for (final SubscriptionMessage message : messages) {
+ message.getTsFileHandler().copyFile("/Users/iotdb/Downloads/1.tsfile");
+ }
+ }
+ }
+ }
+}
+```
+
+
+
+
+## 3 Java Native API Description
+
+### 3.1 Parameter List
+The consumer-related parameters can be set through the Properties parameter object. The specific parameters are as follows:
+
+#### SubscriptionConsumer
+
+
+| **Parameter** | **required or optional with default** | **Parameter Meaning** |
+| :---------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- |
+| host | optional: 127.0.0.1 | `String`: The RPC host of a DataNode in IoTDB |
+| port | optional: 6667 | `Integer`: The RPC port of a DataNode in IoTDB |
+| node-urls | optional: 127.0.0.1:6667 | `List`: The RPC addresses of all DataNodes in IoTDB, which can be multiple; either host:port or node-urls can be filled. If both host:port and node-urls are filled, the **union** of host:port and node-urls will be taken to form a new node-urls for application |
+| username | optional: root | `String`: The username of the DataNode in IoTDB |
+| password | optional: root | `String`: The password of the DataNode in IoTDB |
+| groupId | optional | `String`: consumer group id,if not specified, it will be randomly assigned (a new consumer group),ensuring that the consumer group id of different consumer groups are all different |
+| consumerId | optional | `String`: consumer client id,if not specified, it will be randomly assigned,ensuring that each consumer client id in the same consumer group is different |
+| heartbeatIntervalMs | optional: 30000 (min: 1000) | `Long`: The interval at which the consumer sends periodic heartbeat requests to the IoTDB DataNode |
+| endpointsSyncIntervalMs | optional: 120000 (min: 5000) | `Long`: The interval at which the consumer detects the expansion or contraction of IoTDB cluster nodes and adjusts the subscription connection |
+| fileSaveDir | optional: Paths.get(System.getProperty("user.dir"), "iotdb-subscription").toString() | `String`: The temporary directory path where the consumer stores the subscribed TsFile files |
+| fileSaveFsync | optional: false | `Boolean`: Whether the consumer actively calls fsync during the subscription of TsFiles |
+
+Special configurations in `SubscriptionPushConsumer` :
+
+| **Parameter** | **required or optional with default** | **Parameter Meaning** |
+| :----------------- | :------------------------------------ | :----------------------------------------------------------------------------- |
+| ackStrategy | optional: `ACKStrategy.AFTER_CONSUME` | The acknowledgment mechanism for consumption progress includes the following options: `ACKStrategy.BEFORE_CONSUME`(the consumer submits the consumption progress immediately upon receiving the data, before `onReceive` )`ACKStrategy.AFTER_CONSUME`(the consumer submits the consumption progress after consuming the data, after `onReceive` ) |
+| consumeListener | optional | The callback function for consuming data, which needs to implement the `ConsumeListener` interface, defining the processing logic for consuming `SessionDataSetsHandler` and `TsFileHandler` formatted data |
+| autoPollIntervalMs | optional: 5000 (min: 500) | Long: The time interval at which the consumer automatically pulls data, in **ms** |
+| autoPollTimeoutMs | optional: 10000 (min: 1000) | Long: The timeout duration for the consumer to pull data each time, in **ms** |
+
+Special configurations in `SubscriptionPullConsumer` :
+
+| **Parameter** | **required or optional with default** | **Parameter Meaning** |
+| :-------------------------------------------- | :--------------------------------- | :----------------------------------------------------------- |
+| autoCommit | optional: true | Boolean: Whether to automatically commit the consumption progress. If this parameter is set to false, the `commit` method needs to be called manually to submit the consumption progress |
+| autoCommitInterval | optional: 5000 (min: 500) | Long: The time interval for automatically committing the consumption progress, in **ms** .This parameter only takes effect when the `autoCommit` parameter is set to true |
+
+
+### 3.2 Function List
+#### Data Sync
+##### SubscriptionPullConsumer
+
+| **Function name** | **Description** | **Parameter** |
+|-------------------------------------|--------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `open()` | Opens the consumer connection and starts message consumption. If `autoCommit` is enabled, it will start the automatic commit worker. | None |
+| `close()` | Closes the consumer connection. If `autoCommit` is enabled, it will commit all uncommitted messages before closing. | None |
+| `poll(final Duration timeout)` | Pulls messages with a specified timeout. | `timeout` : The timeout duration. |
+| `poll(final long timeoutMs)` | Pulls messages with a specified timeout in milliseconds. | `timeoutMs` : The timeout duration in milliseconds. |
+| `poll(final Set topicNames, final Duration timeout)` | Pulls messages from specified topics with a specified timeout. | `topicNames` : The set of topics to pull messages from. `timeout`: The timeout duration。 |
+| `poll(final Set topicNames, final long timeoutMs)` | Pulls messages from specified topics with a specified timeout in milliseconds. | `topicNames` : The set of topics to pull messages from.`timeoutMs`: The timeout duration in milliseconds. |
+| `commitSync(final SubscriptionMessage message)` | Synchronously commits a single message. | `message` : The message object to be committed. |
+| `commitSync(final Iterable messages)` | Synchronously commits multiple messages. | `messages` : The collection of message objects to be committed. |
+| `commitAsync(final SubscriptionMessage message)` | Asynchronously commits a single message. | `message` : The message object to be committed. |
+| `commitAsync(final Iterable messages)` | Asynchronously commits multiple messages. | `messages` : The collection of message objects to be committed. |
+| `commitAsync(final SubscriptionMessage message, final AsyncCommitCallback callback)` | Asynchronously commits a single message with a specified callback. | `message` : The message object to be committed. `callback` : The callback function to be executed after asynchronous commit. |
+| `commitAsync(final Iterable messages, final AsyncCommitCallback callback)` | Asynchronously commits multiple messages with a specified callback. | `messages` : The collection of message objects to be committed.`callback` : The callback function to be executed after asynchronous commit. |
+
+##### SubscriptionPushConsumer
+
+| **Function name** | **Description** | **Parameter** |
+|-------------------------------------|----------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `open()` | Opens the consumer connection, starts message consumption, and submits the automatic polling worker. | None |
+| `close()` | Closes the consumer connection and stops message consumption. | None |
+| `toString()` | Returns the core configuration information of the consumer object. | None |
+| `coreReportMessage()` | Obtains the key-value representation of the consumer's core configuration. | None |
+| `allReportMessage()` | Obtains the key-value representation of all the consumer's configurations. | None |
+| `buildPushConsumer()` | Builds a `SubscriptionPushConsumer` instance through the `Builder` | None |
+| `ackStrategy(final AckStrategy ackStrategy)` | Configures the message acknowledgment strategy for the consumer. | `ackStrategy`: The specified message acknowledgment strategy. |
+| `consumeListener(final ConsumeListener consumeListener)` |Configures the message consumption logic for the consumer. | `consumeListener`: The processing logic when the consumer receives messages. |
+| `autoPollIntervalMs(final long autoPollIntervalMs)` | Configures the interval for automatic polling. | `autoPollIntervalMs` : The interval for automatic polling, in milliseconds. |
+| `autoPollTimeoutMs(final long autoPollTimeoutMs)` | Configures the timeout for automatic polling.间。 | `autoPollTimeoutMs`: The timeout for automatic polling, in milliseconds. |
\ No newline at end of file
diff --git a/src/UserGuide/V2.0.1/Tree/API/Programming-Java-Native-API.md b/src/UserGuide/V2.0.1/Tree/API/Programming-Java-Native-API.md
index 08c0cb1f6..e4c04fa89 100644
--- a/src/UserGuide/V2.0.1/Tree/API/Programming-Java-Native-API.md
+++ b/src/UserGuide/V2.0.1/Tree/API/Programming-Java-Native-API.md
@@ -1,842 +1,468 @@
-# Java Native API
+# Session Native API
+
+In the native API of IoTDB, the `Session` is the core interface for interacting with the database. It integrates a rich set of methods that support data writing, querying, and metadata operations. By instantiating a `Session`, you can establish a connection to the IoTDB server and perform various database operations within the environment constructed by this connection. The `Session` is not thread-safe and should not be called simultaneously by multiple threads.
-## Installation
+`SessionPool` is a connection pool for `Session`, and it is recommended to use `SessionPool` for programming. In scenarios with multi-threaded concurrency, `SessionPool` can manage and allocate connection resources effectively, thereby improving system performance and resource utilization efficiency.
-### Dependencies
+## 1 Overview of Steps
+1. Create a Connection Pool Instance: Initialize a SessionPool object to manage multiple Session instances.
+2. Perform Operations: Directly obtain a Session instance from the SessionPool and execute database operations, without the need to open and close connections each time.
+3. Close Connection Pool Resources: When database operations are no longer needed, close the SessionPool to release all related resources.
-* JDK >= 1.8
-* Maven >= 3.6
+## 2 Detailed Steps
+This section provides an overview of the core development process and does not demonstrate all parameters and interfaces. For a complete list of functionalities and parameters, please refer to:[Java Native API](./Programming-Java-Native-API.md#3-native-interface-description) or check the: [Source Code](https://github.com/apache/iotdb/tree/master/example/session/src/main/java/org/apache/iotdb)
-### Using IoTDB Java Native API with Maven
+### 2.1 Create a Maven Project
+Create a Maven project and add the following dependencies to the pom.xml file (JDK >= 1.8, Maven >= 3.6):
```xml
org.apache.iotdb
iotdb-session
- 1.0.0
+
+ ${project.version}
```
+### 2.2 Creating a Connection Pool Instance
-## Syntax Convention
-
-- **IoTDB-SQL interface:** The input SQL parameter needs to conform to the [syntax conventions](../Reference/Syntax-Rule.md#Literal-Values) and be escaped for JAVA strings. For example, you need to add a backslash before the double-quotes. (That is: after JAVA escaping, it is consistent with the SQL statement executed on the command line.)
-- **Other interfaces:**
- - The node names in path or path prefix as parameter: The node names which should be escaped by backticks (`) in the SQL statement, escaping is required here.
- - Identifiers (such as template names) as parameters: The identifiers which should be escaped by backticks (`) in the SQL statement, and escaping is not required here.
-- **Code example for syntax convention could be found at:** `example/session/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java`
-
-## Native APIs
-
-Here we show the commonly used interfaces and their parameters in the Native API:
-
-### Session Management
-
-* Initialize a Session
-
-``` java
-// use default configuration
-session = new Session.Builder.build();
-
-// initialize with a single node
-session =
- new Session.Builder()
- .host(String host)
- .port(int port)
- .build();
-
-// initialize with multiple nodes
-session =
- new Session.Builder()
- .nodeUrls(List nodeUrls)
- .build();
-
-// other configurations
-session =
- new Session.Builder()
- .fetchSize(int fetchSize)
- .username(String username)
- .password(String password)
- .thriftDefaultBufferSize(int thriftDefaultBufferSize)
- .thriftMaxFrameSize(int thriftMaxFrameSize)
- .enableRedirection(boolean enableRedirection)
- .version(Version version)
- .build();
-```
-
-Version represents the SQL semantic version used by the client, which is used to be compatible with the SQL semantics of 0.12 when upgrading 0.13. The possible values are: `V_0_12`, `V_0_13`, `V_1_0`, etc.
-
-
-* Open a Session
-
-``` java
-void open()
-```
-
-* Open a session, with a parameter to specify whether to enable RPC compression
-
-``` java
-void open(boolean enableRPCCompression)
-```
-
-Notice: this RPC compression status of client must comply with that of IoTDB server
-
-* Close a Session
-
-``` java
-void close()
-```
-
-* SessionPool
-
-We provide a connection pool (`SessionPool) for Native API.
-Using the interface, you need to define the pool size.
-
-If you can not get a session connection in 60 seconds, there is a warning log but the program will hang.
-
-If a session has finished an operation, it will be put back to the pool automatically.
-If a session connection is broken, the session will be removed automatically and the pool will try
-to create a new session and redo the operation.
-You can also specify an url list of multiple reachable nodes when creating a SessionPool, just as you would when creating a Session. To ensure high availability of clients in distributed cluster.
-
-For query operations:
-
-1. When using SessionPool to query data, the result set is `SessionDataSetWrapper`;
-2. Given a `SessionDataSetWrapper`, if you have not scanned all the data in it and stop to use it,
-you have to call `SessionPool.closeResultSet(wrapper)` manually;
-3. When you call `hasNext()` and `next()` of a `SessionDataSetWrapper` and there is an exception, then
-you have to call `SessionPool.closeResultSet(wrapper)` manually;
-4. You can call `getColumnNames()` of `SessionDataSetWrapper` to get the column names of query result;
-
-Examples: ```session/src/test/java/org/apache/iotdb/session/pool/SessionPoolTest.java```
-
-Or `example/session/src/main/java/org/apache/iotdb/SessionPoolExample.java`
-
-
-### Database & Timeseries Management API
-
-#### Database Management
-
-* CREATE DATABASE
-
-``` java
-void setStorageGroup(String storageGroupId)
-```
-
-* Delete one or several databases
-
-``` java
-void deleteStorageGroup(String storageGroup)
-void deleteStorageGroups(List storageGroups)
-```
-
-#### Timeseries Management
-
-* Create one or multiple timeseries
-
-``` java
-void createTimeseries(String path, TSDataType dataType,
- TSEncoding encoding, CompressionType compressor, Map props,
- Map tags, Map attributes, String measurementAlias)
-
-void createMultiTimeseries(List paths, List dataTypes,
- List encodings, List compressors,
- List> propsList, List> tagsList,
- List> attributesList, List measurementAliasList)
-```
-
-* Create aligned timeseries
-```
-void createAlignedTimeseries(String prefixPath, List measurements,
- List dataTypes, List encodings,
- List compressors, List measurementAliasList);
-```
-
-Attention: Alias of measurements are **not supported** currently.
-
-* Delete one or several timeseries
-
-``` java
-void deleteTimeseries(String path)
-void deleteTimeseries(List paths)
-```
-
-* Check whether the specific timeseries exists.
-
-``` java
-boolean checkTimeseriesExists(String path)
-```
-
-#### Schema Template
-
-
-Create a schema template for massive identical devices will help to improve memory performance. You can use Template, InternalNode and MeasurementNode to depict the structure of the template, and use belowed interface to create it inside session.
-
-``` java
-public void createSchemaTemplate(Template template);
-
-Class Template {
- private String name;
- private boolean directShareTime;
- Map children;
- public Template(String name, boolean isShareTime);
-
- public void addToTemplate(Node node);
- public void deleteFromTemplate(String name);
- public void setShareTime(boolean shareTime);
-}
-
-Abstract Class Node {
- private String name;
- public void addChild(Node node);
- public void deleteChild(Node node);
-}
-
-Class MeasurementNode extends Node {
- TSDataType dataType;
- TSEncoding encoding;
- CompressionType compressor;
- public MeasurementNode(String name,
- TSDataType dataType,
- TSEncoding encoding,
- CompressionType compressor);
-}
-```
-
-We strongly suggest you implement templates only with flat-measurement (like object 'flatTemplate' in belowed snippet), since tree-structured template may not be a long-term supported feature in further version of IoTDB.
-
-A snippet of using above Method and Class:
-
-``` java
-MeasurementNode nodeX = new MeasurementNode("x", TSDataType.FLOAT, TSEncoding.RLE, CompressionType.SNAPPY);
-MeasurementNode nodeY = new MeasurementNode("y", TSDataType.FLOAT, TSEncoding.RLE, CompressionType.SNAPPY);
-MeasurementNode nodeSpeed = new MeasurementNode("speed", TSDataType.DOUBLE, TSEncoding.GORILLA, CompressionType.SNAPPY);
-
-// This is the template we suggest to implement
-Template flatTemplate = new Template("flatTemplate");
-template.addToTemplate(nodeX);
-template.addToTemplate(nodeY);
-template.addToTemplate(nodeSpeed);
-
-createSchemaTemplate(flatTemplate);
-```
-
-You can query measurement inside templates with these APIS:
```java
-// Return the amount of measurements inside a template
-public int countMeasurementsInTemplate(String templateName);
-
-// Return true if path points to a measurement, otherwise returne false
-public boolean isMeasurementInTemplate(String templateName, String path);
-
-// Return true if path exists in template, otherwise return false
-public boolean isPathExistInTemplate(String templateName, String path);
-
-// Return all measurements paths inside template
-public List showMeasurementsInTemplate(String templateName);
-
-// Return all measurements paths under the designated patter inside template
-public List showMeasurementsInTemplate(String templateName, String pattern);
-```
-
-To implement schema template, you can set the measurement template named 'templateName' at path 'prefixPath'.
-
-**Please notice that, we strongly recommend not setting templates on the nodes above the database to accommodate future updates and collaboration between modules.**
-
-``` java
-void setSchemaTemplate(String templateName, String prefixPath)
-```
-
-Before setting template, you should firstly create the template using
-
-``` java
-void createSchemaTemplate(Template template)
-```
-
-After setting template to a certain path, you can use the template to create timeseries on given device paths through the following interface, or you can write data directly to trigger timeseries auto creation using schema template under target devices.
-
-``` java
-void createTimeseriesUsingSchemaTemplate(List devicePathList)
-```
-
-After setting template to a certain path, you can query for info about template using belowed interface in session:
-
-``` java
-/** @return All template names. */
-public List showAllTemplates();
-
-/** @return All paths have been set to designated template. */
-public List showPathsTemplateSetOn(String templateName);
-
-/** @return All paths are using designated template. */
-public List showPathsTemplateUsingOn(String templateName)
-```
-
-If you are ready to get rid of schema template, you can drop it with belowed interface. Make sure the template to drop has been unset from MTree.
-
-``` java
-void unsetSchemaTemplate(String prefixPath, String templateName);
-public void dropSchemaTemplate(String templateName);
-```
-
-Unset the measurement template named 'templateName' from path 'prefixPath'. When you issue this interface, you should assure that there is a template named 'templateName' set at the path 'prefixPath'.
-
-Attention: Unsetting the template named 'templateName' from node at path 'prefixPath' or descendant nodes which have already inserted records using template is **not supported**.
-
-
-### Data Manipulation Interface (DML Interface)
-
-### Data Insert API
-
-It is recommended to use insertTablet to help improve write efficiency.
-
-* Insert a Tablet,which is multiple rows of a device, each row has the same measurements
- * **Better Write Performance**
- * **Support batch write**
- * **Support null values**: fill the null value with any value, and then mark the null value via BitMap
-
-``` java
-void insertTablet(Tablet tablet)
-
-public class Tablet {
- /** deviceId of this tablet */
- public String prefixPath;
- /** the list of measurement schemas for creating the tablet */
- private List schemas;
- /** timestamps in this tablet */
- public long[] timestamps;
- /** each object is a primitive type array, which represents values of one measurement */
- public Object[] values;
- /** each bitmap represents the existence of each value in the current column. */
- public BitMap[] bitMaps;
- /** the number of rows to include in this tablet */
- public int rowSize;
- /** the maximum number of rows for this tablet */
- private int maxRowNumber;
- /** whether this tablet store data of aligned timeseries or not */
- private boolean isAligned;
-}
-```
-
-* Insert multiple Tablets
-
-``` java
-void insertTablets(Map tablet)
-```
-
-* Insert a Record, which contains multiple measurement value of a device at a timestamp. This method is equivalent to providing a common interface for multiple data types of values. Later, the value can be cast to the original type through TSDataType.
-
- The correspondence between the Object type and the TSDataType type is shown in the following table.
-
- | TSDataType | Object |
- |------------|--------------|
- | BOOLEAN | Boolean |
- | INT32 | Integer |
- | DATE | LocalDate |
- | INT64 | Long |
- | TIMESTAMP | Long |
- | FLOAT | Float |
- | DOUBLE | Double |
- | TEXT | String, Binary |
- | STRING | String, Binary |
- | BLOB | Binary |
-``` java
-void insertRecord(String deviceId, long time, List measurements,
- List types, List values)
-```
-
-* Insert multiple Records
-
-``` java
-void insertRecords(List deviceIds, List times,
- List> measurementsList, List> typesList,
- List> valuesList)
-```
-* Insert multiple Records that belong to the same device.
- With type info the server has no need to do type inference, which leads a better performance
-
-``` java
-void insertRecordsOfOneDevice(String deviceId, List times,
- List> measurementsList, List