diff --git a/src/.vuepress/sidebar/V1.3.x/en.ts b/src/.vuepress/sidebar/V1.3.x/en.ts
index 99e5c9ac5..2bdfc6031 100644
--- a/src/.vuepress/sidebar/V1.3.x/en.ts
+++ b/src/.vuepress/sidebar/V1.3.x/en.ts
@@ -83,6 +83,7 @@ export const enSidebar = {
{ text: 'Stream Processing', link: 'Streaming_apache' },
{ text: 'Data Sync', link: 'Data-Sync_apache' },
{ text: 'Database Programming', link: 'Database-Programming' },
+ { text: 'UDF', link: 'User-defined-function' },
{ text: 'Database Administration', link: 'Authority-Management' },
{ text: 'Maintennance', link: 'Maintennance' },
],
@@ -164,6 +165,7 @@ export const enSidebar = {
// children: 'structure',
children: [
{ text: 'UDF Libraries', link: 'UDF-Libraries' },
+ { text: 'UDF development', link: 'UDF-development' },
{ text: 'Function and Expression', link: 'Function-and-Expression' },
{ text: 'Common Config Manual', link: 'Common-Config-Manual' },
{ text: 'Status Codes', link: 'Status-Codes' },
diff --git a/src/.vuepress/sidebar/V1.3.x/zh.ts b/src/.vuepress/sidebar/V1.3.x/zh.ts
index 4f94062a4..1dbcd3d7d 100644
--- a/src/.vuepress/sidebar/V1.3.x/zh.ts
+++ b/src/.vuepress/sidebar/V1.3.x/zh.ts
@@ -83,6 +83,7 @@ export const zhSidebar = {
{ text: '流处理框架', link: 'Streaming_apache' },
{ text: '数据同步', link: 'Data-Sync_apache' },
{ text: '数据库编程', link: 'Database-Programming' },
+ { text: '用户自定义函数', link: 'User-defined-function' },
{ text: '权限管理', link: 'Authority-Management' },
{ text: '运维语句', link: 'Maintennance' },
],
@@ -164,6 +165,7 @@ export const zhSidebar = {
// children: 'structure',
children: [
{ text: 'UDF函数库', link: 'UDF-Libraries' },
+ { text: 'UDF开发', link: 'UDF-development' },
{ text: '内置函数与表达式', link: 'Function-and-Expression' },
{ text: '配置参数', link: 'Common-Config-Manual' },
{ text: 'ConfigNode配置参数', link: 'ConfigNode-Config-Manual' },
diff --git a/src/.vuepress/sidebar_timecho/V1.3.x/en.ts b/src/.vuepress/sidebar_timecho/V1.3.x/en.ts
index e1c20151f..07140747b 100644
--- a/src/.vuepress/sidebar_timecho/V1.3.x/en.ts
+++ b/src/.vuepress/sidebar_timecho/V1.3.x/en.ts
@@ -90,6 +90,7 @@ export const enSidebar = {
{ text: 'View', link: 'IoTDB-View_timecho' },
{ text: 'AI Capability', link: 'AINode_timecho' },
{ text: 'Database Programming', link: 'Database-Programming' },
+ { text: 'UDF', link: 'User-defined-function' },
{ text: 'Security Management', link: 'Security-Management_timecho' },
{ text: 'Database Administration', link: 'Authority-Management' },
{ text: 'Maintennance', link: 'Maintennance' },
@@ -175,6 +176,7 @@ export const enSidebar = {
// children: 'structure',
children: [
{ text: 'UDF Libraries', link: 'UDF-Libraries' },
+ { text: 'UDF development', link: 'UDF-development' },
{ text: 'Function and Expression', link: 'Function-and-Expression' },
{ text: 'Common Config Manual', link: 'Common-Config-Manual' },
{ text: 'ConfigNode Config Manual', link: 'ConfigNode-Config-Manual' },
diff --git a/src/.vuepress/sidebar_timecho/V1.3.x/zh.ts b/src/.vuepress/sidebar_timecho/V1.3.x/zh.ts
index 3785d1f03..d9e7e3520 100644
--- a/src/.vuepress/sidebar_timecho/V1.3.x/zh.ts
+++ b/src/.vuepress/sidebar_timecho/V1.3.x/zh.ts
@@ -90,6 +90,7 @@ export const zhSidebar = {
{ text: '视图', link: 'IoTDB-View_timecho' },
{ text: 'AI能力', link: 'AINode_timecho' },
{ text: '数据库编程', link: 'Database-Programming' },
+ { text: '用户自定义函数', link: 'User-defined-function' },
{ text: '安全控制', link: 'Security-Management_timecho' },
{ text: '权限管理', link: 'Authority-Management' },
{ text: '运维语句', link: 'Maintennance' },
@@ -175,6 +176,7 @@ export const zhSidebar = {
// children: 'structure',
children: [
{ text: 'UDF函数库', link: 'UDF-Libraries' },
+ { text: 'UDF开发', link: 'UDF-development' },
{ text: '内置函数与表达式', link: 'Function-and-Expression' },
{ text: '配置参数', link: 'Common-Config-Manual' },
{ text: 'ConfigNode配置参数', link: 'ConfigNode-Config-Manual' },
diff --git a/src/UserGuide/Master/Reference/UDF-Libraries.md b/src/UserGuide/Master/Reference/UDF-Libraries.md
index e3364d513..9361ac142 100644
--- a/src/UserGuide/Master/Reference/UDF-Libraries.md
+++ b/src/UserGuide/Master/Reference/UDF-Libraries.md
@@ -21,10 +21,39 @@
# UDF Libraries
+# UDF Libraries
+
+Based on the ability of user-defined functions, IoTDB provides a series of functions for temporal data processing, including data quality, data profiling, anomaly detection, frequency domain analysis, data matching, data repairing, sequence discovery, machine learning, etc., which can meet the needs of industrial fields for temporal data processing.
+
+## Installation steps
+
+1. Please obtain the compressed file of the UDF library JAR package that is compatible with the IoTDB version.
+
+ | UDF libraries version | Supported IoTDB versions | Download link |
+ | --------------- | ----------------- | ------------------------------------------------------------ |
+ | UDF-1.3.3.zip | V1.3.3 and above | [UDF.zip](https://alioss.timecho.com/upload/UDF-1.3.3.zip) |
+ | UDF-1.3.2.zip | V1.0.0~V1.3.2 | [UDF.zip](https://alioss.timecho.com/upload/UDF-1.3.2.zip) |
+
+2. Place the library-udf.jar file from the obtained compressed package in the path of IoTDB at `/iotdb-enterprise-x.x.x.x-bin/ext/udf`
+3. In the SQL command line terminal (CLI) or visualization console (Workbench) SQL operation interface of IoTDB, execute the corresponding function registration statement as follows.
+4. Batch registration: Two registration methods: registration script or SQL full statement
+- Register Script
+ - Copy the registration script (register-UDF.sh or register-UDF.bat) from the compressed package to the `tools` directory of IoTDB as needed, and modify the parameters in the script (default is host=127.0.0.1, rpcPort=6667, user=root, pass=root);
+ - Start IoTDB service, run registration script to batch register UDF
+
+- All SQL statements
+ - Open the SQl file in the compressed package, copy all SQL statements, and execute all SQl statements in the SQL command line terminal (CLI) of IoTDB or the SQL operation interface of the visualization console (Workbench) to batch register UDF
+
## Data Quality
### Completeness
+#### Registration statement
+
+```sql
+create function completeness as 'org.apache.iotdb.library.dquality.UDTFCompleteness'
+```
+
#### Usage
This function is used to calculate the completeness of time series. The input series are divided into several continuous and non overlapping windows. The timestamp of the first data point and the completeness of each window will be output.
@@ -150,6 +179,12 @@ Output series:
### Consistency
+#### Registration statement
+
+```sql
+create function consistency as 'org.apache.iotdb.library.dquality.UDTFConsistency'
+```
+
#### Usage
This function is used to calculate the consistency of time series. The input series are divided into several continuous and non overlapping windows. The timestamp of the first data point and the consistency of each window will be output.
@@ -274,6 +309,12 @@ Output series:
### Timeliness
+#### Registration statement
+
+```sql
+create function timeliness as 'org.apache.iotdb.library.dquality.UDTFTimeliness'
+```
+
#### Usage
This function is used to calculate the timeliness of time series. The input series are divided into several continuous and non overlapping windows. The timestamp of the first data point and the timeliness of each window will be output.
@@ -398,6 +439,12 @@ Output series:
### Validity
+#### Registration statement
+
+```sql
+create function validity as 'org.apache.iotdb.library.dquality.UDTFValidity'
+```
+
#### Usage
This function is used to calculate the Validity of time series. The input series are divided into several continuous and non overlapping windows. The timestamp of the first data point and the Validity of each window will be output.
@@ -547,11 +594,17 @@ Output series:
### ACF
+#### Registration statement
+
+```sql
+create function acf as 'org.apache.iotdb.library.dprofile.UDTFACF'
+```
+
#### Usage
This function is used to calculate the auto-correlation factor of the input time series,
which equals to cross correlation between the same series.
-For more information, please refer to [XCorr](./UDF-Libraries.md#XCorr) function.
+For more information, please refer to [XCorr](./UDF-Libraries.md#xcorr) function.
**Name:** ACF
@@ -606,6 +659,12 @@ Output series:
### Distinct
+#### Registration statement
+
+```sql
+create function distinct as 'org.apache.iotdb.library.dprofile.UDTFDistinct'
+```
+
#### Usage
This function returns all unique values in time series.
@@ -659,6 +718,12 @@ Output series:
### Histogram
+#### Registration statement
+
+```sql
+create function histogram as 'org.apache.iotdb.library.dprofile.UDTFHistogram'
+```
+
#### Usage
This function is used to calculate the distribution histogram of a single column of numerical data.
@@ -738,6 +803,12 @@ Output series:
### Integral
+#### Registration statement
+
+```sql
+create function integral as 'org.apache.iotdb.library.dprofile.UDAFIntegral'
+```
+
#### Usage
This function is used to calculate the integration of time series,
@@ -829,6 +900,12 @@ $$\frac{1}{2\times 60}[(1+2) \times 1 + (2+5) \times 1 + (5+6) \times 1 + (6+7)
### IntegralAvg
+#### Registration statement
+
+```sql
+create function integralavg as 'org.apache.iotdb.library.dprofile.UDAFIntegralAvg'
+```
+
#### Usage
This function is used to calculate the function average of time series.
@@ -890,6 +967,12 @@ $$\frac{1}{2}[(1+2) \times 1 + (2+5) \times 1 + (5+6) \times 1 + (6+7) \times 1
### Mad
+#### Registration statement
+
+```sql
+create function mad as 'org.apache.iotdb.library.dprofile.UDAFMad'
+```
+
#### Usage
The function is used to compute the exact or approximate median absolute deviation (MAD) of a numeric time series. MAD is the median of the deviation of each element from the elements' median.
@@ -988,6 +1071,12 @@ Output series:
### Median
+#### Registration statement
+
+```sql
+create function median as 'org.apache.iotdb.library.dprofile.UDAFMedian'
+```
+
#### Usage
The function is used to compute the exact or approximate median of a numeric time series. Median is the value separating the higher half from the lower half of a data sample.
@@ -1058,6 +1147,12 @@ Output series:
### MinMax
+#### Registration statement
+
+```sql
+create function minmax as 'org.apache.iotdb.library.dprofile.UDTFMinMax'
+```
+
#### Usage
This function is used to standardize the input series with min-max. Minimum value is transformed to 0; maximum value is transformed to 1.
@@ -1197,6 +1292,12 @@ Output series:
### MvAvg
+#### Registration statement
+
+```sql
+create function mvavg as 'org.apache.iotdb.library.dprofile.UDTFMvAvg'
+```
+
#### Usage
This function is used to calculate moving average of input series.
@@ -1277,6 +1378,12 @@ Output series:
### PACF
+#### Registration statement
+
+```sql
+create function pacf as 'org.apache.iotdb.library.dprofile.UDTFPACF'
+```
+
#### Usage
This function is used to calculate partial autocorrelation of input series by solving Yule-Walker equation. For some cases, the equation may not be solved, and NaN will be output.
@@ -1346,6 +1453,12 @@ Output series:
### Percentile
+#### Registration statement
+
+```sql
+create function percentile as 'org.apache.iotdb.library.dprofile.UDAFPercentile'
+```
+
#### Usage
The function is used to compute the exact or approximate percentile of a numeric time series. A percentile is value of element in the certain rank of the sorted series.
@@ -1419,6 +1532,12 @@ Output series:
### Quantile
+#### Registration statement
+
+```sql
+create function quantile as 'org.apache.iotdb.library.dprofile.UDAFQuantile'
+```
+
#### Usage
The function is used to compute the approximate quantile of a numeric time series. A quantile is value of element in the certain rank of the sorted series.
@@ -1492,6 +1611,12 @@ Output series:
### Period
+#### Registration statement
+
+```sql
+create function period as 'org.apache.iotdb.library.dprofile.UDAFPeriod'
+```
+
#### Usage
The function is used to compute the period of a numeric time series.
@@ -1541,6 +1666,12 @@ Output series:
### QLB
+#### Registration statement
+
+```sql
+create function qlb as 'org.apache.iotdb.library.dprofile.UDTFQLB'
+```
+
#### Usage
This function is used to calculate Ljung-Box statistics $Q_{LB}$ for time series, and convert it to p value.
@@ -1625,6 +1756,12 @@ Output series:
### Resample
+#### Registration statement
+
+```sql
+create function re_sample as 'org.apache.iotdb.library.dprofile.UDTFResample'
+```
+
#### Usage
This function is used to resample the input series according to a given frequency,
@@ -1754,6 +1891,12 @@ Output series:
### Sample
+#### Registration statement
+
+```sql
+create function sample as 'org.apache.iotdb.library.dprofile.UDTFSample'
+```
+
#### Usage
This function is used to sample the input series,
@@ -1852,6 +1995,12 @@ Output series:
### Segment
+#### Registration statement
+
+```sql
+create function segment as 'org.apache.iotdb.library.dprofile.UDTFSegment'
+```
+
#### Usage
This function is used to segment a time series into subsequences according to linear trend, and returns linear fitted values of first values in each subsequence or every data point.
@@ -1944,6 +2093,12 @@ Output series:
### Skew
+#### Registration statement
+
+```sql
+create function skew as 'org.apache.iotdb.library.dprofile.UDAFSkew'
+```
+
#### Usage
This function is used to calculate the population skewness.
@@ -2005,6 +2160,12 @@ Output series:
### Spline
+#### Registration statement
+
+```sql
+create function spline as 'org.apache.iotdb.library.dprofile.UDTFSpline'
+```
+
#### Usage
This function is used to calculate cubic spline interpolation of input series.
@@ -2210,6 +2371,12 @@ Output series:
### Spread
+#### Registration statement
+
+```sql
+create function spread as 'org.apache.iotdb.library.dprofile.UDAFSpread'
+```
+
#### Usage
This function is used to calculate the spread of time series, that is, the maximum value minus the minimum value.
@@ -2327,6 +2494,12 @@ Output series:
### ZScore
+#### Registration statement
+
+```sql
+create function zscore as 'org.apache.iotdb.library.dprofile.UDTFZScore'
+```
+
#### Usage
This function is used to standardize the input series with z-score.
@@ -2433,6 +2606,12 @@ Output series:
### IQR
+#### Registration statement
+
+```sql
+create function iqr as 'org.apache.iotdb.library.anomaly.UDTFIQR'
+```
+
#### Usage
This function is used to detect anomalies based on IQR. Points distributing beyond 1.5 times IQR are selected.
@@ -2500,6 +2679,12 @@ Output series:
### KSigma
+#### Registration statement
+
+```sql
+create function ksigma as 'org.apache.iotdb.library.anomaly.UDTFKSigma'
+```
+
#### Usage
This function is used to detect anomalies based on the Dynamic K-Sigma Algorithm.
@@ -2565,6 +2750,12 @@ Output series:
### LOF
+#### Registration statement
+
+```sql
+create function LOF as 'org.apache.iotdb.library.anomaly.UDTFLOF'
+```
+
#### Usage
This function is used to detect density anomaly of time series. According to k-th distance calculation parameter and local outlier factor (lof) threshold, the function judges if a set of input values is an density anomaly, and a bool mark of anomaly values will be output.
@@ -2691,6 +2882,12 @@ Output series:
### MissDetect
+#### Registration statement
+
+```sql
+create function missdetect as 'org.apache.iotdb.library.anomaly.UDTFMissDetect'
+```
+
#### Usage
This function is used to detect missing anomalies.
@@ -2779,6 +2976,12 @@ Output series:
### Range
+#### Registration statement
+
+```sql
+create function range as 'org.apache.iotdb.library.anomaly.UDTFRange'
+```
+
#### Usage
This function is used to detect range anomaly of time series. According to upper bound and lower bound parameters, the function judges if a input value is beyond range, aka range anomaly, and a new time series of anomaly will be output.
@@ -2844,6 +3047,12 @@ Output series:
### TwoSidedFilter
+#### Registration statement
+
+```sql
+create function twosidedfilter as 'org.apache.iotdb.library.anomaly.UDTFTwoSidedFilter'
+```
+
#### Usage
The function is used to filter anomalies of a numeric time series based on two-sided window detection.
@@ -2937,6 +3146,12 @@ Output series:
### Outlier
+#### Registration statement
+
+```sql
+create function outlier as 'org.apache.iotdb.library.anomaly.UDTFOutlier'
+```
+
#### Usage
This function is used to detect distance-based outliers. For each point in the current window, if the number of its neighbors within the distance of neighbor distance threshold is less than the neighbor count threshold, the point in detected as an outlier.
@@ -3260,6 +3475,12 @@ Output series:
### Conv
+#### Registration statement
+
+```sql
+create function conv as 'org.apache.iotdb.library.frequency.UDTFConv'
+```
+
#### Usage
This function is used to calculate the convolution, i.e. polynomial multiplication.
@@ -3307,6 +3528,12 @@ Output series:
### Deconv
+#### Registration statement
+
+```sql
+create function deconv as 'org.apache.iotdb.library.frequency.UDTFDeconv'
+```
+
#### Usage
This function is used to calculate the deconvolution, i.e. polynomial division.
@@ -3387,6 +3614,12 @@ Output series:
### DWT
+#### Registration statement
+
+```sql
+create function dwt as 'org.apache.iotdb.library.frequency.UDTFDWT'
+```
+
#### Usage
This function is used to calculate 1d discrete wavelet transform of a numerical series.
@@ -3468,6 +3701,12 @@ Output series:
### FFT
+#### Registration statement
+
+```sql
+create function fft as 'org.apache.iotdb.library.frequency.UDTFFFT'
+```
+
#### Usage
This function is used to calculate the fast Fourier transform (FFT) of a numerical series.
@@ -3592,6 +3831,12 @@ The last data point is reserved to indicate the length of the series.
### HighPass
+#### Registration statement
+
+```sql
+create function highpass as 'org.apache.iotdb.library.frequency.UDTFHighPass'
+```
+
#### Usage
This function performs low-pass filtering on the input series and extracts components above the cutoff frequency.
@@ -3679,6 +3924,12 @@ Note: The input is $y=sin(2\pi t/4)+2sin(2\pi t/5)$ with a length of 20. Thus, t
### IFFT
+#### Registration statement
+
+```sql
+create function ifft as 'org.apache.iotdb.library.frequency.UDTFIFFT'
+```
+
#### Usage
This function treats the two input series as the real and imaginary part of a complex series, performs an inverse fast Fourier transform (IFFT), and outputs the real part of the result.
@@ -3756,6 +4007,12 @@ Output series:
### LowPass
+#### Registration statement
+
+```sql
+create function lowpass as 'org.apache.iotdb.library.frequency.UDTFLowPass'
+```
+
#### Usage
This function performs low-pass filtering on the input series and extracts components below the cutoff frequency.
@@ -3866,6 +4123,12 @@ Note: The input is $y=sin(2\pi t/4)+2sin(2\pi t/5)$ with a length of 20. Thus, t
### Cov
+#### Registration statement
+
+```sql
+create function cov as 'org.apache.iotdb.library.dmatch.UDAFCov'
+```
+
#### Usage
This function is used to calculate the population covariance.
@@ -3927,6 +4190,12 @@ Output series:
### DTW
+#### Registration statement
+
+```sql
+create function dtw as 'org.apache.iotdb.library.dmatch.UDAFDtw'
+```
+
#### Usage
This function is used to calculate the DTW distance between two input series.
@@ -3992,6 +4261,12 @@ Output series:
### Pearson
+#### Registration statement
+
+```sql
+create function pearson as 'org.apache.iotdb.library.dmatch.UDAFPearson'
+```
+
#### Usage
This function is used to calculate the Pearson Correlation Coefficient.
@@ -4053,6 +4328,12 @@ Output series:
### PtnSym
+#### Registration statement
+
+```sql
+create function ptnsym as 'org.apache.iotdb.library.dmatch.UDTFPtnSym'
+```
+
#### Usage
This function is used to find all symmetric subseries in the input whose degree of symmetry is less than the threshold.
@@ -4113,6 +4394,12 @@ Output series:
### XCorr
+#### Registration statement
+
+```sql
+create function xcorr as 'org.apache.iotdb.library.dmatch.UDTFXCorr'
+```
+
#### Usage
This function is used to calculate the cross correlation function of given two time series.
@@ -4202,6 +4489,14 @@ Output series:
### TimestampRepair
+#### Registration statement
+
+```sql
+create function timestamprepair as 'org.apache.iotdb.library.drepair.UDTFTimestampRepair'
+```
+
+#### Usage
+
This function is used for timestamp repair.
According to the given standard time interval,
the method of minimizing the repair cost is adopted.
@@ -4303,6 +4598,12 @@ Output series:
### ValueFill
+#### Registration statement
+
+```sql
+create function valuefill as 'org.apache.iotdb.library.drepair.UDTFValueFill'
+```
+
#### Usage
This function is used to impute time series. Several methods are supported.
@@ -4415,6 +4716,12 @@ Output series:
### ValueRepair
+#### Registration statement
+
+```sql
+create function valuerepair as 'org.apache.iotdb.library.drepair.UDTFValueRepair'
+```
+
#### Usage
This function is used to repair the value of the time series.
@@ -4723,6 +5030,12 @@ Output series:
### ConsecutiveSequences
+#### Registration statement
+
+```sql
+create function consecutivesequences as 'org.apache.iotdb.library.series.UDTFConsecutiveSequences'
+```
+
#### Usage
This function is used to find locally longest consecutive subsequences in strictly equispaced multidimensional data.
@@ -4811,6 +5124,12 @@ Output series:
### ConsecutiveWindows
+#### Registration statement
+
+```sql
+create function consecutivewindows as 'org.apache.iotdb.library.series.UDTFConsecutiveWindows'
+```
+
#### Usage
This function is used to find consecutive windows of specified length in strictly equispaced multidimensional data.
@@ -4897,6 +5216,12 @@ Output series:
### AR
+#### Registration statement
+
+```sql
+create function ar as 'org.apache.iotdb.library.dlearn.UDTFAR'
+```
+
#### Usage
This function is used to learn the coefficients of the autoregressive models for a time series.
diff --git a/src/UserGuide/Master/Reference/UDF-development.md b/src/UserGuide/Master/Reference/UDF-development.md
new file mode 100644
index 000000000..8e14f31db
--- /dev/null
+++ b/src/UserGuide/Master/Reference/UDF-development.md
@@ -0,0 +1,646 @@
+ # UDF development
+
+## UDF development
+
+### UDF Development Dependencies
+
+If you use [Maven](http://search.maven.org/), you can search for the development dependencies listed below from the [Maven repository](http://search.maven.org/) . Please note that you must select the same dependency version as the target IoTDB server version for development.
+
+``` xml
+
The framework will call the `transform` method once for each row of raw data input, with k columns of time series and 1 row of data input, and 1 column of time series and 1 row of data output. It can be used in any clause and expression where scalar functions appear, such as select clauses, where clauses, etc. | void transform(Column[] columns, ColumnBuilder builder) throws ExceptionObject transform(Row row) throws Exception |
+| RowByRowAccessStrategy | Customize time series generation function to process raw data line by line.
The framework will call the `transform` method once for each row of raw data input, inputting k columns of time series and 1 row of data, and outputting 1 column of time series and n rows of data.
When a sequence is input, the row serves as a data point for the input sequence.
When multiple sequences are input, after aligning the input sequences in time, each row serves as a data point for the input sequence.
(In a row of data, there may be a column with a `null` value, but not all columns are `null`) | void transform(Row row, PointCollector collector) throws Exception |
+| SlidingTimeWindowAccessStrategy | Customize time series generation functions to process raw data in a sliding time window manner.
The framework will call the `transform` method once for each raw data input window, input k columns of time series m rows of data, and output 1 column of time series n rows of data.
A window may contain multiple rows of data, and after aligning the input sequence in time, each window serves as a data point for the input sequence.
(Each window may have i rows, and each row of data may have a column with a `null` value, but not all of them are `null`) | void transform(RowWindow rowWindow, PointCollector collector) throws Exception |
+| SlidingSizeWindowAccessStrategy | Customize the time series generation function to process raw data in a fixed number of rows, meaning that each data processing window will contain a fixed number of rows of data (except for the last window).
The framework will call the `transform` method once for each raw data input window, input k columns of time series m rows of data, and output 1 column of time series n rows of data.
A window may contain multiple rows of data, and after aligning the input sequence in time, each window serves as a data point for the input sequence.
(Each window may have i rows, and each row of data may have a column with a `null` value, but not all of them are `null`) | void transform(RowWindow rowWindow, PointCollector collector) throws Exception |
+| SessionTimeWindowAccessStrategy | Customize time series generation functions to process raw data in a session window format.
The framework will call the `transform` method once for each raw data input window, input k columns of time series m rows of data, and output 1 column of time series n rows of data.
A window may contain multiple rows of data, and after aligning the input sequence in time, each window serves as a data point for the input sequence.
(Each window may have i rows, and each row of data may have a column with a `null` value, but not all of them are `null`) | void transform(RowWindow rowWindow, PointCollector collector) throws Exception |
+| StateWindowAccessStrategy | Customize time series generation functions to process raw data in a state window format.
he framework will call the `transform` method once for each raw data input window, inputting 1 column of time series m rows of data and outputting 1 column of time series n rows of data.
A window may contain multiple rows of data, and currently only supports opening windows for one physical quantity, which is one column of data. | void transform(RowWindow rowWindow, PointCollector collector) throws Exception |
+
+
+#### Interface Description:
+
+- `RowByRowAccessStrategy`: The construction of `RowByRowAccessStrategy` does not require any parameters.
+
+- `SlidingTimeWindowAccessStrategy`
+
+Window opening diagram:
+
+
+
+`SlidingTimeWindowAccessStrategy`: `SlidingTimeWindowAccessStrategy` has many constructors, you can pass 3 types of parameters to them:
+
+- Parameter 1: The display window on the time axis
+
+The first type of parameters are optional. If the parameters are not provided, the beginning time of the display window will be set to the same as the minimum timestamp of the query result set, and the ending time of the display window will be set to the same as the maximum timestamp of the query result set.
+
+- Parameter 2: Time interval for dividing the time axis (should be positive)
+- Parameter 3: Time sliding step (not required to be greater than or equal to the time interval, but must be a positive number)
+
+The sliding step parameter is also optional. If the parameter is not provided, the sliding step will be set to the same as the time interval for dividing the time axis.
+
+The relationship between the three types of parameters can be seen in the figure below. Please see the Javadoc for more details.
+
+

+
+`SlidingSizeWindowAccessStrategy`: `SlidingSizeWindowAccessStrategy` has many constructors, you can pass 2 types of parameters to them:
+
+* Parameter 1: Window size. This parameter specifies the number of data rows contained in a data processing window. Note that the number of data rows in some of the last time windows may be less than the specified number of data rows.
+* Parameter 2: Sliding step. This parameter means the number of rows between the first point of the next window and the first point of the current window. (This parameter is not required to be greater than or equal to the window size, but must be a positive number)
+
+The sliding step parameter is optional. If the parameter is not provided, the sliding step will be set to the same as the window size.
+
+- `SessionTimeWindowAccessStrategy`
+
+Window opening diagram: **Time intervals less than or equal to the given minimum time interval `sessionGap` are assigned in one group.**
+
+
+
+`SessionTimeWindowAccessStrategy`: `SessionTimeWindowAccessStrategy` has many constructors, you can pass 2 types of parameters to them:
+
+- Parameter 1: The display window on the time axis.
+- Parameter 2: The minimum time interval `sessionGap` of two adjacent windows.
+
+- `StateWindowAccessStrategy`
+
+Window opening diagram: **For numerical data, if the state difference is less than or equal to the given threshold `delta`, it will be assigned in one group.**
+
+
+
+`StateWindowAccessStrategy` has four constructors.
+
+- Constructor 1: For numerical data, there are 3 parameters: the time axis can display the start and end time of the time window and the threshold `delta` for the allowable change within a single window.
+- Constructor 2: For text data and boolean data, there are 3 parameters: the time axis can be provided to display the start and end time of the time window. For both data types, the data within a single window is same, and there is no need to provide an allowable change threshold.
+- Constructor 3: For numerical data, there are 1 parameters: you can only provide the threshold delta that is allowed to change within a single window. The start time of the time axis display time window will be defined as the smallest timestamp in the entire query result set, and the time axis display time window end time will be defined as The largest timestamp in the entire query result set.
+- Constructor 4: For text data and boolean data, you can provide no parameter. The start and end timestamps are explained in Constructor 3.
+
+StateWindowAccessStrategy can only take one column as input for now.
+
+Please see the Javadoc for more details.
+
+ 2.2.2 **setOutputDataType**
+
+Note that the type of output sequence you set here determines the type of data that the `PointCollector` can actually receive in the `transform` method. The relationship between the output data type set in `setOutputDataType` and the actual data output type that `PointCollector` can receive is as follows:
+
+| Output Data Type Set in `setOutputDataType` | Data Type that `PointCollector` Can Receive |
+| :------------------------------------------ | :----------------------------------------------------------- |
+| INT32 | int |
+| INT64 | long |
+| FLOAT | float |
+| DOUBLE | double |
+| BOOLEAN | boolean |
+| TEXT | java.lang.String and org.apache.iotdb.udf.api.type.Binar` |
+
+The type of output time series of a UDTF is determined at runtime, which means that a UDTF can dynamically determine the type of output time series according to the type of input time series.
+Here is a simple example:
+
+```java
+void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) throws Exception {
+ // do something
+ // ...
+
+ configurations
+ .setAccessStrategy(new RowByRowAccessStrategy())
+ .setOutputDataType(parameters.getDataType(0));
+}
+```
+
+3. **void transform(Row row, PointCollector collector) throws Exception**
+
+You need to implement this method when you specify the strategy of UDF to read the original data as `RowByRowAccessStrategy`.
+
+This method processes the raw data one row at a time. The raw data is input from `Row` and output by `PointCollector`. You can output any number of data points in one `transform` method call. It should be noted that the type of output data points must be the same as you set in the `beforeStart` method, and the timestamps of output data points must be strictly monotonically increasing.
+
+The following is a complete UDF example that implements the `void transform(Row row, PointCollector collector) throws Exception` method. It is an adder that receives two columns of time series as input. When two data points in a row are not `null`, this UDF will output the algebraic sum of these two data points.
+
+``` java
+import org.apache.iotdb.udf.api.UDTF;
+import org.apache.iotdb.udf.api.access.Row;
+import org.apache.iotdb.udf.api.collector.PointCollector;
+import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
+import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
+import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
+import org.apache.iotdb.udf.api.type.Type;
+
+public class Adder implements UDTF {
+
+ @Override
+ public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
+ configurations
+ .setOutputDataType(TSDataType.INT64)
+ .setAccessStrategy(new RowByRowAccessStrategy());
+ }
+
+ @Override
+ public void transform(Row row, PointCollector collector) throws Exception {
+ if (row.isNull(0) || row.isNull(1)) {
+ return;
+ }
+ collector.putLong(row.getTime(), row.getLong(0) + row.getLong(1));
+ }
+}
+```
+
+4. **void transform(RowWindow rowWindow, PointCollector collector) throws Exception**
+
+You need to implement this method when you specify the strategy of UDF to read the original data as `SlidingTimeWindowAccessStrategy` or `SlidingSizeWindowAccessStrategy`.
+
+This method processes a batch of data in a fixed number of rows or a fixed time interval each time, and we call the container containing this batch of data a window. The raw data is input from `RowWindow` and output by `PointCollector`. `RowWindow` can help you access a batch of `Row`, it provides a set of interfaces for random access and iterative access to this batch of `Row`. You can output any number of data points in one `transform` method call. It should be noted that the type of output data points must be the same as you set in the `beforeStart` method, and the timestamps of output data points must be strictly monotonically increasing.
+
+Below is a complete UDF example that implements the `void transform(RowWindow rowWindow, PointCollector collector) throws Exception` method. It is a counter that receives any number of time series as input, and its function is to count and output the number of data rows in each time window within a specified time range.
+
+```java
+import java.io.IOException;
+import org.apache.iotdb.udf.api.UDTF;
+import org.apache.iotdb.udf.api.access.Row;
+import org.apache.iotdb.udf.api.access.RowWindow;
+import org.apache.iotdb.udf.api.collector.PointCollector;
+import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
+import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
+import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy;
+import org.apache.iotdb.udf.api.type.Type;
+
+public class Counter implements UDTF {
+
+ @Override
+ public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
+ configurations
+ .setOutputDataType(TSDataType.INT32)
+ .setAccessStrategy(new SlidingTimeWindowAccessStrategy(
+ parameters.getLong("time_interval"),
+ parameters.getLong("sliding_step"),
+ parameters.getLong("display_window_begin"),
+ parameters.getLong("display_window_end")));
+ }
+
+ @Override
+ public void transform(RowWindow rowWindow, PointCollector collector) {
+ if (rowWindow.windowSize() != 0) {
+ collector.putInt(rowWindow.windowStartTime(), rowWindow.windowSize());
+ }
+ }
+}
+```
+
+5. **void terminate(PointCollector collector) throws Exception**
+
+In some scenarios, a UDF needs to traverse all the original data to calculate the final output data points. The `terminate` interface provides support for those scenarios.
+
+This method is called after all `transform` calls are executed and before the `beforeDestory` method is executed. You can implement the `transform` method to perform pure data processing (without outputting any data points), and implement the `terminate` method to output the processing results.
+
+The processing results need to be output by the `PointCollector`. You can output any number of data points in one `terminate` method call. It should be noted that the type of output data points must be the same as you set in the `beforeStart` method, and the timestamps of output data points must be strictly monotonically increasing.
+
+Below is a complete UDF example that implements the `void terminate(PointCollector collector) throws Exception` method. It takes one time series whose data type is `INT32` as input, and outputs the maximum value point of the series.
+
+```java
+import java.io.IOException;
+import org.apache.iotdb.udf.api.UDTF;
+import org.apache.iotdb.udf.api.access.Row;
+import org.apache.iotdb.udf.api.collector.PointCollector;
+import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
+import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
+import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
+import org.apache.iotdb.udf.api.type.Type;
+
+public class Max implements UDTF {
+
+ private Long time;
+ private int value;
+
+ @Override
+ public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
+ configurations
+ .setOutputDataType(TSDataType.INT32)
+ .setAccessStrategy(new RowByRowAccessStrategy());
+ }
+
+ @Override
+ public void transform(Row row, PointCollector collector) {
+ if (row.isNull(0)) {
+ return;
+ }
+ int candidateValue = row.getInt(0);
+ if (time == null || value < candidateValue) {
+ time = row.getTime();
+ value = candidateValue;
+ }
+ }
+
+ @Override
+ public void terminate(PointCollector collector) throws IOException {
+ if (time != null) {
+ collector.putInt(time, value);
+ }
+ }
+}
+```
+
+6. **void beforeDestroy()**
+
+The method for terminating a UDF.
+
+This method is called by the framework. For a UDF instance, `beforeDestroy` will be called after the last record is processed. In the entire life cycle of the instance, `beforeDestroy` will only be called once.
+
+
+
+### UDAF (User Defined Aggregation Function)
+
+A complete definition of UDAF involves two classes, `State` and `UDAF`.
+
+#### State Class
+
+To write your own `State`, you need to implement the `org.apache.iotdb.udf.api.State` interface.
+
+#### Interface Description:
+
+| Interface Definition | Description | Required to Implement |
+| -------------------------------- | ------------------------------------------------------------ | --------------------- |
+| void reset() | To reset the `State` object to its initial state, you need to fill in the initial values of the fields in the `State` class within this method as if you were writing a constructor. | Required |
+| byte[] serialize() | Serializes `State` to binary data. This method is used for IoTDB internal `State` passing. Note that the order of serialization must be consistent with the following deserialization methods. | Required |
+| void deserialize(byte[] bytes) | Deserializes binary data to `State`. This method is used for IoTDB internal `State` passing. Note that the order of deserialization must be consistent with the serialization method above. | Required |
+
+#### Detailed interface introduction:
+
+1. **void reset()**
+
+This method resets the `State` to its initial state, you need to fill in the initial values of the fields in the `State` object in this method. For optimization reasons, IoTDB reuses `State` as much as possible internally, rather than creating a new `State` for each group, which would introduce unnecessary overhead. When `State` has finished updating the data in a group, this method is called to reset to the initial state as a way to process the next group.
+
+In the case of `State` for averaging (aka `avg`), for example, you would need the sum of the data, `sum`, and the number of entries in the data, `count`, and initialize both to 0 in the `reset()` method.
+
+```java
+class AvgState implements State {
+ double sum;
+
+ long count;
+
+ @Override
+ public void reset() {
+ sum = 0;
+ count = 0;
+ }
+
+ // other methods
+}
+```
+
+2. **byte[] serialize()/void deserialize(byte[] bytes)**
+
+These methods serialize the `State` into binary data, and deserialize the `State` from the binary data. IoTDB, as a distributed database, involves passing data among different nodes, so you need to write these two methods to enable the passing of the State among different nodes. Note that the order of serialization and deserialization must be the consistent.
+
+In the case of `State` for averaging (aka `avg`), for example, you can convert the content of State to `byte[]` array and read out the content of State from `byte[]` array in any way you want, the following shows the code for serialization/deserialization using `ByteBuffer` introduced by Java8:
+
+```java
+@Override
+public byte[] serialize() {
+ ByteBuffer buffer = ByteBuffer.allocate(Double.BYTES + Long.BYTES);
+ buffer.putDouble(sum);
+ buffer.putLong(count);
+
+ return buffer.array();
+}
+
+@Override
+public void deserialize(byte[] bytes) {
+ ByteBuffer buffer = ByteBuffer.wrap(bytes);
+ sum = buffer.getDouble();
+ count = buffer.getLong();
+}
+```
+
+
+
+#### UDAF Classes
+
+To write a UDAF, you need to implement the `org.apache.iotdb.udf.api.UDAF` interface.
+
+#### Interface Description:
+
+| Interface definition | Description | Required to Implement |
+| ------------------------------------------------------------ | ------------------------------------------------------------ | --------------------- |
+| void validate(UDFParameterValidator validator) throws Exception | This method is mainly used to validate `UDFParameters` and it is executed before `beforeStart(UDFParameters, UDTFConfigurations)` is called. | Optional |
+| void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception | Initialization method that invokes user-defined initialization behavior before UDAF processes the input data. Unlike UDTF, configuration is of type `UDAFConfiguration`. | Required |
+| State createState() | To create a `State` object, usually just call the default constructor and modify the default initial value as needed. | Required |
+| void addInput(State state, Column[] columns, BitMap bitMap) | Update `State` object according to the incoming data `Column[]` in batch, note that last column `columns[columns.length - 1]` always represents the time column. In addition, `BitMap` represents the data that has been filtered out before, you need to manually determine whether the corresponding data has been filtered out when writing this method. | Required |
+| void combineState(State state, State rhs) | Merge `rhs` state into `state` state. In a distributed scenario, the same set of data may be distributed on different nodes, IoTDB generates a `State` object for the partial data on each node, and then calls this method to merge it into the complete `State`. | Required |
+| void outputFinal(State state, ResultValue resultValue) | Computes the final aggregated result based on the data in `State`. Note that according to the semantics of the aggregation, only one value can be output per group. | Required |
+| void beforeDestroy() | This method is called by the framework after the last input data is processed, and will only be called once in the life cycle of each UDF instance. | Optional |
+
+In the life cycle of a UDAF instance, the calling sequence of each method is as follows:
+
+1. State createState()
+2. void validate(UDFParameterValidator validator) throws Exception
+3. void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception
+4. void addInput(State state, Column[] columns, BitMap bitMap)
+5. void combineState(State state, State rhs)
+6. void outputFinal(State state, ResultValue resultValue)
+7. void beforeDestroy()
+
+Similar to UDTF, every time the framework executes a UDAF query, a new UDF instance will be constructed. When the query ends, the corresponding instance will be destroyed. Therefore, the internal data of the instances in different UDAF queries (even in the same SQL statement) are isolated. You can maintain some state data in the UDAF without considering the influence of concurrency and other factors.
+
+#### Detailed interface introduction:
+
+
+1. **void validate(UDFParameterValidator validator) throws Exception**
+
+Same as UDTF, the `validate` method is used to validate the parameters entered by the user.
+
+In this method, you can limit the number and types of input time series, check the attributes of user input, or perform any custom verification.
+
+2. **void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception**
+
+ The `beforeStart` method does the same thing as the UDAF:
+
+1. Use UDFParameters to get the time series paths and parse key-value pair attributes entered by the user.
+2. Set the strategy to access the raw data and set the output data type in UDAFConfigurations.
+3. Create resources, such as establishing external connections, opening files, etc.
+
+The role of the `UDFParameters` type can be seen above.
+
+2.2 **UDTFConfigurations**
+
+The difference from UDTF is that UDAF uses `UDAFConfigurations` as the type of `configuration` object.
+
+Currently, this class only supports setting the type of output data.
+
+```java
+void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception {
+ // parameters
+ // ...
+
+ // configurations
+ configurations
+ .setOutputDataType(Type.INT32); }
+}
+```
+
+The relationship between the output type set in `setOutputDataType` and the type of data output that `ResultValue` can actually receive is as follows:
+
+| The output type set in `setOutputDataType` | The output type that `ResultValue` can actually receive |
+| ------------------------------------------ | ------------------------------------------------------- |
+| INT32 | int |
+| INT64 | long |
+| FLOAT | float |
+| DOUBLE | double |
+| BOOLEAN | boolean |
+| TEXT | org.apache.iotdb.udf.api.type.Binary |
+
+The output type of the UDAF is determined at runtime. You can dynamically determine the output sequence type based on the input type.
+
+Here is a simple example:
+
+```java
+void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception {
+ // do something
+ // ...
+
+ configurations
+ .setOutputDataType(parameters.getDataType(0));
+}
+```
+
+3. **State createState()**
+
+
+This method creates and initializes a `State` object for UDAF. Due to the limitations of the Java language, you can only call the default constructor for the `State` class. The default constructor assigns a default initial value to all the fields in the class, and if that initial value does not meet your requirements, you need to initialize them manually within this method.
+
+The following is an example that includes manual initialization. Suppose you want to implement an aggregate function that multiply all numbers in the group, then your initial `State` value should be set to 1, but the default constructor initializes it to 0, so you need to initialize `State` manually after calling the default constructor:
+
+```java
+public State createState() {
+ MultiplyState state = new MultiplyState();
+ state.result = 1;
+ return state;
+}
+```
+
+4. **void addInput(State state, Column[] columns, BitMap bitMap)**
+
+This method updates the `State` object with the raw input data. For performance reasons, also to align with the IoTDB vectorized query engine, the raw input data is no longer a data point, but an array of columns ``Column[]``. Note that the last column (i.e. `columns[columns.length - 1]`) is always the time column, so you can also do different operations in UDAF depending on the time.
+
+Since the input parameter is not of a single data point type, but of multiple columns, you need to manually filter some of the data in the columns, which is why the third parameter, `BitMap`, exists. It identifies which of these columns have been filtered out, so you don't have to think about the filtered data in any case.
+
+Here's an example of `addInput()` that counts the number of items (aka count). It shows how you can use `BitMap` to ignore data that has been filtered out. Note that due to the limitations of the Java language, you need to do the explicit cast the `State` object from type defined in the interface to a custom `State` type at the beginning of the method, otherwise you won't be able to use the `State` object.
+
+```java
+public void addInput(State state, Column[] columns, BitMap bitMap) {
+ CountState countState = (CountState) state;
+
+ int count = columns[0].getPositionCount();
+ for (int i = 0; i < count; i++) {
+ if (bitMap != null && !bitMap.isMarked(i)) {
+ continue;
+ }
+ if (!columns[0].isNull(i)) {
+ countState.count++;
+ }
+ }
+}
+```
+
+5. **void combineState(State state, State rhs)**
+
+
+This method combines two `State`s, or more precisely, updates the first `State` object with the second `State` object. IoTDB is a distributed database, and the data of the same group may be distributed on different nodes. For performance reasons, IoTDB will first aggregate some of the data on each node into `State`, and then merge the `State`s on different nodes that belong to the same group, which is what `combineState` does.
+
+Here's an example of `combineState()` for averaging (aka avg). Similar to `addInput`, you need to do an explicit type conversion for the two `State`s at the beginning. Also note that you are updating the value of the first `State` with the contents of the second `State`.
+
+```java
+public void combineState(State state, State rhs) {
+ AvgState avgState = (AvgState) state;
+ AvgState avgRhs = (AvgState) rhs;
+
+ avgState.count += avgRhs.count;
+ avgState.sum += avgRhs.sum;
+}
+```
+
+6. **void outputFinal(State state, ResultValue resultValue)**
+
+This method works by calculating the final result from `State`. You need to access the various fields in `State`, derive the final result, and set the final result into the `ResultValue` object.IoTDB internally calls this method once at the end for each group. Note that according to the semantics of aggregation, the final result can only be one value.
+
+Here is another `outputFinal` example for averaging (aka avg). In addition to the forced type conversion at the beginning, you will also see a specific use of the `ResultValue` object, where the final result is set by `setXXX` (where `XXX` is the type name).
+
+```java
+public void outputFinal(State state, ResultValue resultValue) {
+ AvgState avgState = (AvgState) state;
+
+ if (avgState.count != 0) {
+ resultValue.setDouble(avgState.sum / avgState.count);
+ } else {
+ resultValue.setNull();
+ }
+}
+```
+
+7. **void beforeDestroy()**
+
+
+The method for terminating a UDF.
+
+This method is called by the framework. For a UDF instance, `beforeDestroy` will be called after the last record is processed. In the entire life cycle of the instance, `beforeDestroy` will only be called once.
+
+
+### Maven Project Example
+
+If you use Maven, you can build your own UDF project referring to our **udf-example** module. You can find the project [here](https://github.com/apache/iotdb/tree/master/example/udf).
+
+
+## Contribute universal built-in UDF functions to iotdb
+
+This part mainly introduces how external users can contribute their own UDFs to the IoTDB community.
+
+#### Prerequisites
+
+1. UDFs must be universal.
+
+ The "universal" mentioned here refers to: UDFs can be widely used in some scenarios. In other words, the UDF function must have reuse value and may be directly used by other users in the community.
+
+ If you are not sure whether the UDF you want to contribute is universal, you can send an email to `dev@iotdb.apache.org` or create an issue to initiate a discussion.
+
+2. The UDF you are going to contribute has been well tested and can run normally in the production environment.
+
+
+#### What you need to prepare
+
+1. UDF source code
+2. Test cases
+3. Instructions
+
+#### UDF Source Code
+
+1. Create the UDF main class and related classes in `iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/builtin` or in its subfolders.
+2. Register your UDF in `iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/builtin/BuiltinTimeSeriesGeneratingFunction.java`.
+
+#### Test Cases
+
+At a minimum, you need to write integration tests for the UDF.
+
+You can add a test class in `integration-test/src/test/java/org/apache/iotdb/db/it/udf`.
+
+
+#### Instructions
+
+The instructions need to include: the name and the function of the UDF, the attribute parameters that must be provided when the UDF is executed, the applicable scenarios, and the usage examples, etc.
+
+The instructions for use should include both Chinese and English versions. Instructions for use should be added separately in `docs/zh/UserGuide/Operation Manual/DML Data Manipulation Language.md` and `docs/UserGuide/Operation Manual/DML Data Manipulation Language.md`.
+
+#### Submit a PR
+
+When you have prepared the UDF source code, test cases, and instructions, you are ready to submit a Pull Request (PR) on [Github](https://github.com/apache/iotdb). You can refer to our code contribution guide to submit a PR: [Development Guide](https://iotdb.apache.org/Community/Development-Guide.html).
+
+
+After the PR review is approved and merged, your UDF has already contributed to the IoTDB community!
diff --git a/src/UserGuide/Master/User-Manual/Database-Programming.md b/src/UserGuide/Master/User-Manual/Database-Programming.md
index 98f097c2d..9367c865e 100644
--- a/src/UserGuide/Master/User-Manual/Database-Programming.md
+++ b/src/UserGuide/Master/User-Manual/Database-Programming.md
@@ -1037,889 +1037,3 @@ SELECT avg(count_s1) from root.sg_count.d;
| `continuous_query_submit_thread` | The number of threads in the scheduled thread pool that submit continuous query tasks periodically | int32 | 2 |
| `continuous_query_min_every_interval_in_ms` | The minimum value of the continuous query execution time interval | duration | 1000 |
-## USER-DEFINED FUNCTION (UDF)
-
-IoTDB provides a variety of built-in functions to meet your computing needs, and you can also create user defined functions to meet more computing needs.
-
-This document describes how to write, register and use a UDF.
-
-
-### UDF Types
-
-In IoTDB, you can expand two types of UDF:
-
-| UDF Class | Description |
-| --------------------------------------------------- | ------------------------------------------------------------ |
-| UDTF(User Defined Timeseries Generating Function) | This type of function can take **multiple** time series as input, and output **one** time series, which can have any number of data points. |
-| UDAF(User Defined Aggregation Function) | Custom Aggregation Functions. This type of function can take one time series as input, and output **one** aggregated data point for each group based on the GROUP BY type. |
-
-### UDF Development Dependencies
-
-If you use [Maven](http://search.maven.org/), you can search for the development dependencies listed below from the [Maven repository](http://search.maven.org/) . Please note that you must select the same dependency version as the target IoTDB server version for development.
-
-``` xml
-
-
-`SlidingTimeWindowAccessStrategy`: `SlidingTimeWindowAccessStrategy` has many constructors, you can pass 3 types of parameters to them:
-
-- Parameter 1: The display window on the time axis
-- Parameter 2: Time interval for dividing the time axis (should be positive)
-- Parameter 3: Time sliding step (not required to be greater than or equal to the time interval, but must be a positive number)
-
-The first type of parameters are optional. If the parameters are not provided, the beginning time of the display window will be set to the same as the minimum timestamp of the query result set, and the ending time of the display window will be set to the same as the maximum timestamp of the query result set.
-
-The sliding step parameter is also optional. If the parameter is not provided, the sliding step will be set to the same as the time interval for dividing the time axis.
-
-The relationship between the three types of parameters can be seen in the figure below. Please see the Javadoc for more details.
-
-
-
-`SlidingSizeWindowAccessStrategy`: `SlidingSizeWindowAccessStrategy` has many constructors, you can pass 2 types of parameters to them:
-
-* Parameter 1: Window size. This parameter specifies the number of data rows contained in a data processing window. Note that the number of data rows in some of the last time windows may be less than the specified number of data rows.
-* Parameter 2: Sliding step. This parameter means the number of rows between the first point of the next window and the first point of the current window. (This parameter is not required to be greater than or equal to the window size, but must be a positive number)
-
-The sliding step parameter is optional. If the parameter is not provided, the sliding step will be set to the same as the window size.
-
-The `SessionTimeWindowAccessStrategy` is shown schematically below. **Time intervals less than or equal to the given minimum time interval `sessionGap` are assigned in one group**
-
-
-`SessionTimeWindowAccessStrategy`: `SessionTimeWindowAccessStrategy` has many constructors, you can pass 2 types of parameters to them:
-
-- Parameter 1: The display window on the time axis.
-- Parameter 2: The minimum time interval `sessionGap` of two adjacent windows.
-
-
-The `StateWindowAccessStrategy` is shown schematically below. **For numerical data, if the state difference is less than or equal to the given threshold `delta`, it will be assigned in one group. **
-
-
-`StateWindowAccessStrategy` has four constructors.
-
-- Constructor 1: For numerical data, there are 3 parameters: the time axis can display the start and end time of the time window and the threshold `delta` for the allowable change within a single window.
-- Constructor 2: For text data and boolean data, there are 3 parameters: the time axis can be provided to display the start and end time of the time window. For both data types, the data within a single window is same, and there is no need to provide an allowable change threshold.
-- Constructor 3: For numerical data, there are 1 parameters: you can only provide the threshold delta that is allowed to change within a single window. The start time of the time axis display time window will be defined as the smallest timestamp in the entire query result set, and the time axis display time window end time will be defined as The largest timestamp in the entire query result set.
-- Constructor 4: For text data and boolean data, you can provide no parameter. The start and end timestamps are explained in Constructor 3.
-
-StateWindowAccessStrategy can only take one column as input for now.
-
-Please see the Javadoc for more details.
-
-
-
-###### setOutputDataType
-
-Note that the type of output sequence you set here determines the type of data that the `PointCollector` can actually receive in the `transform` method. The relationship between the output data type set in `setOutputDataType` and the actual data output type that `PointCollector` can receive is as follows:
-
-| Output Data Type Set in `setOutputDataType` | Data Type that `PointCollector` Can Receive |
-| :------------------------------------------ | :----------------------------------------------------------- |
-| `INT32` | `int` |
-| `INT64` | `long` |
-| `FLOAT` | `float` |
-| `DOUBLE` | `double` |
-| `BOOLEAN` | `boolean` |
-| `TEXT` | `java.lang.String` and `org.apache.iotdb.udf.api.type.Binary` |
-
-The type of output time series of a UDTF is determined at runtime, which means that a UDTF can dynamically determine the type of output time series according to the type of input time series.
-Here is a simple example:
-
-```java
-void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) throws Exception {
- // do something
- // ...
-
- configurations
- .setAccessStrategy(new RowByRowAccessStrategy())
- .setOutputDataType(parameters.getDataType(0));
-}
-```
-
-
-
-#### void transform(Row row, PointCollector collector) throws Exception
-
-You need to implement this method when you specify the strategy of UDF to read the original data as `RowByRowAccessStrategy`.
-
-This method processes the raw data one row at a time. The raw data is input from `Row` and output by `PointCollector`. You can output any number of data points in one `transform` method call. It should be noted that the type of output data points must be the same as you set in the `beforeStart` method, and the timestamps of output data points must be strictly monotonically increasing.
-
-The following is a complete UDF example that implements the `void transform(Row row, PointCollector collector) throws Exception` method. It is an adder that receives two columns of time series as input. When two data points in a row are not `null`, this UDF will output the algebraic sum of these two data points.
-
-``` java
-import org.apache.iotdb.udf.api.UDTF;
-import org.apache.iotdb.udf.api.access.Row;
-import org.apache.iotdb.udf.api.collector.PointCollector;
-import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
-import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
-import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
-import org.apache.iotdb.udf.api.type.Type;
-
-public class Adder implements UDTF {
-
- @Override
- public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
- configurations
- .setOutputDataType(TSDataType.INT64)
- .setAccessStrategy(new RowByRowAccessStrategy());
- }
-
- @Override
- public void transform(Row row, PointCollector collector) throws Exception {
- if (row.isNull(0) || row.isNull(1)) {
- return;
- }
- collector.putLong(row.getTime(), row.getLong(0) + row.getLong(1));
- }
-}
-```
-
-
-
-#### void transform(RowWindow rowWindow, PointCollector collector) throws Exception
-
-You need to implement this method when you specify the strategy of UDF to read the original data as `SlidingTimeWindowAccessStrategy` or `SlidingSizeWindowAccessStrategy`.
-
-This method processes a batch of data in a fixed number of rows or a fixed time interval each time, and we call the container containing this batch of data a window. The raw data is input from `RowWindow` and output by `PointCollector`. `RowWindow` can help you access a batch of `Row`, it provides a set of interfaces for random access and iterative access to this batch of `Row`. You can output any number of data points in one `transform` method call. It should be noted that the type of output data points must be the same as you set in the `beforeStart` method, and the timestamps of output data points must be strictly monotonically increasing.
-
-Below is a complete UDF example that implements the `void transform(RowWindow rowWindow, PointCollector collector) throws Exception` method. It is a counter that receives any number of time series as input, and its function is to count and output the number of data rows in each time window within a specified time range.
-
-```java
-import java.io.IOException;
-import org.apache.iotdb.udf.api.UDTF;
-import org.apache.iotdb.udf.api.access.Row;
-import org.apache.iotdb.udf.api.access.RowWindow;
-import org.apache.iotdb.udf.api.collector.PointCollector;
-import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
-import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
-import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy;
-import org.apache.iotdb.udf.api.type.Type;
-
-public class Counter implements UDTF {
-
- @Override
- public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
- configurations
- .setOutputDataType(TSDataType.INT32)
- .setAccessStrategy(new SlidingTimeWindowAccessStrategy(
- parameters.getLong("time_interval"),
- parameters.getLong("sliding_step"),
- parameters.getLong("display_window_begin"),
- parameters.getLong("display_window_end")));
- }
-
- @Override
- public void transform(RowWindow rowWindow, PointCollector collector) {
- if (rowWindow.windowSize() != 0) {
- collector.putInt(rowWindow.windowStartTime(), rowWindow.windowSize());
- }
- }
-}
-```
-
-
-
-#### void terminate(PointCollector collector) throws Exception
-
-In some scenarios, a UDF needs to traverse all the original data to calculate the final output data points. The `terminate` interface provides support for those scenarios.
-
-This method is called after all `transform` calls are executed and before the `beforeDestory` method is executed. You can implement the `transform` method to perform pure data processing (without outputting any data points), and implement the `terminate` method to output the processing results.
-
-The processing results need to be output by the `PointCollector`. You can output any number of data points in one `terminate` method call. It should be noted that the type of output data points must be the same as you set in the `beforeStart` method, and the timestamps of output data points must be strictly monotonically increasing.
-
-Below is a complete UDF example that implements the `void terminate(PointCollector collector) throws Exception` method. It takes one time series whose data type is `INT32` as input, and outputs the maximum value point of the series.
-
-```java
-import java.io.IOException;
-import org.apache.iotdb.udf.api.UDTF;
-import org.apache.iotdb.udf.api.access.Row;
-import org.apache.iotdb.udf.api.collector.PointCollector;
-import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
-import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
-import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
-import org.apache.iotdb.udf.api.type.Type;
-
-public class Max implements UDTF {
-
- private Long time;
- private int value;
-
- @Override
- public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
- configurations
- .setOutputDataType(TSDataType.INT32)
- .setAccessStrategy(new RowByRowAccessStrategy());
- }
-
- @Override
- public void transform(Row row, PointCollector collector) {
- if (row.isNull(0)) {
- return;
- }
- int candidateValue = row.getInt(0);
- if (time == null || value < candidateValue) {
- time = row.getTime();
- value = candidateValue;
- }
- }
-
- @Override
- public void terminate(PointCollector collector) throws IOException {
- if (time != null) {
- collector.putInt(time, value);
- }
- }
-}
-```
-
-
-
-#### void beforeDestroy()
-
-The method for terminating a UDF.
-
-This method is called by the framework. For a UDF instance, `beforeDestroy` will be called after the last record is processed. In the entire life cycle of the instance, `beforeDestroy` will only be called once.
-
-
-
-### UDAF (User Defined Aggregation Function)
-
-A complete definition of UDAF involves two classes, `State` and `UDAF`.
-
-#### State Class
-
-To write your own `State`, you need to implement the `org.apache.iotdb.udf.api.State` interface.
-
-The following table shows all the interfaces available for user implementation.
-
-| Interface Definition | Description | Required to Implement |
-| -------------------------------- | ------------------------------------------------------------ | --------------------- |
-| `void reset()` | To reset the `State` object to its initial state, you need to fill in the initial values of the fields in the `State` class within this method as if you were writing a constructor. | Required |
-| `byte[] serialize()` | Serializes `State` to binary data. This method is used for IoTDB internal `State` passing. Note that the order of serialization must be consistent with the following deserialization methods. | Required |
-| `void deserialize(byte[] bytes)` | Deserializes binary data to `State`. This method is used for IoTDB internal `State` passing. Note that the order of deserialization must be consistent with the serialization method above. | Required |
-
-The following section describes the usage of each interface in detail.
-
-
-
-##### void reset()
-
-This method resets the `State` to its initial state, you need to fill in the initial values of the fields in the `State` object in this method. For optimization reasons, IoTDB reuses `State` as much as possible internally, rather than creating a new `State` for each group, which would introduce unnecessary overhead. When `State` has finished updating the data in a group, this method is called to reset to the initial state as a way to process the next group.
-
-In the case of `State` for averaging (aka `avg`), for example, you would need the sum of the data, `sum`, and the number of entries in the data, `count`, and initialize both to 0 in the `reset()` method.
-
-```java
-class AvgState implements State {
- double sum;
-
- long count;
-
- @Override
- public void reset() {
- sum = 0;
- count = 0;
- }
-
- // other methods
-}
-```
-
-
-
-##### byte[] serialize()/void deserialize(byte[] bytes)
-
-These methods serialize the `State` into binary data, and deserialize the `State` from the binary data. IoTDB, as a distributed database, involves passing data among different nodes, so you need to write these two methods to enable the passing of the State among different nodes. Note that the order of serialization and deserialization must be the consistent.
-
-In the case of `State` for averaging (aka `avg`), for example, you can convert the content of State to `byte[]` array and read out the content of State from `byte[]` array in any way you want, the following shows the code for serialization/deserialization using `ByteBuffer` introduced by Java8:
-
-```java
-@Override
-public byte[] serialize() {
- ByteBuffer buffer = ByteBuffer.allocate(Double.BYTES + Long.BYTES);
- buffer.putDouble(sum);
- buffer.putLong(count);
-
- return buffer.array();
-}
-
-@Override
-public void deserialize(byte[] bytes) {
- ByteBuffer buffer = ByteBuffer.wrap(bytes);
- sum = buffer.getDouble();
- count = buffer.getLong();
-}
-```
-
-
-
-#### UDAF Classes
-
-To write a UDAF, you need to implement the `org.apache.iotdb.udf.api.UDAF` interface.
-
-The following table shows all the interfaces available for user implementation.
-
-| Interface definition | Description | Required to Implement |
-| ------------------------------------------------------------ | ------------------------------------------------------------ | --------------------- |
-| `void validate(UDFParameterValidator validator) throws Exception` | This method is mainly used to validate `UDFParameters` and it is executed before `beforeStart(UDFParameters, UDTFConfigurations)` is called. | Optional |
-| `void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception` | Initialization method that invokes user-defined initialization behavior before UDAF processes the input data. Unlike UDTF, configuration is of type `UDAFConfiguration`. | Required |
-| `State createState()` | To create a `State` object, usually just call the default constructor and modify the default initial value as needed. | Required |
-| `void addInput(State state, Column[] columns, BitMap bitMap)` | Update `State` object according to the incoming data `Column[]` in batch, note that last column `columns[columns.length - 1]` always represents the time column. In addition, `BitMap` represents the data that has been filtered out before, you need to manually determine whether the corresponding data has been filtered out when writing this method. | Required |
-| `void combineState(State state, State rhs)` | Merge `rhs` state into `state` state. In a distributed scenario, the same set of data may be distributed on different nodes, IoTDB generates a `State` object for the partial data on each node, and then calls this method to merge it into the complete `State`. | Required |
-| `void outputFinal(State state, ResultValue resultValue)` | Computes the final aggregated result based on the data in `State`. Note that according to the semantics of the aggregation, only one value can be output per group. | Required |
-| `void beforeDestroy() ` | This method is called by the framework after the last input data is processed, and will only be called once in the life cycle of each UDF instance. | Optional |
-
-In the life cycle of a UDAF instance, the calling sequence of each method is as follows:
-
-1. `State createState()`
-2. `void validate(UDFParameterValidator validator) throws Exception`
-3. `void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception`
-4. `void addInput(State state, Column[] columns, BitMap bitMap)`
-5. `void combineState(State state, State rhs)`
-6. `void outputFinal(State state, ResultValue resultValue)`
-7. `void beforeDestroy()`
-
-Similar to UDTF, every time the framework executes a UDAF query, a new UDF instance will be constructed. When the query ends, the corresponding instance will be destroyed. Therefore, the internal data of the instances in different UDAF queries (even in the same SQL statement) are isolated. You can maintain some state data in the UDAF without considering the influence of concurrency and other factors.
-
-The usage of each interface will be described in detail below.
-
-
-
-##### void validate(UDFParameterValidator validator) throws Exception
-
-Same as UDTF, the `validate` method is used to validate the parameters entered by the user.
-
-In this method, you can limit the number and types of input time series, check the attributes of user input, or perform any custom verification.
-
-
-
-##### void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception
-
- The `beforeStart` method does the same thing as the UDAF:
-
-1. Use UDFParameters to get the time series paths and parse key-value pair attributes entered by the user.
-2. Set the strategy to access the raw data and set the output data type in UDAFConfigurations.
-3. Create resources, such as establishing external connections, opening files, etc.
-
-The role of the `UDFParameters` type can be seen above.
-
-###### UDAFConfigurations
-
-The difference from UDTF is that UDAF uses `UDAFConfigurations` as the type of `configuration` object.
-
-Currently, this class only supports setting the type of output data.
-
-```java
-void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception {
- // parameters
- // ...
-
- // configurations
- configurations
- .setOutputDataType(Type.INT32); }
-}
-```
-
-The relationship between the output type set in `setOutputDataType` and the type of data output that `ResultValue` can actually receive is as follows:
-
-| The output type set in `setOutputDataType` | The output type that `ResultValue` can actually receive |
-| ------------------------------------------ | ------------------------------------------------------- |
-| `INT32` | `int` |
-| `INT64` | `long` |
-| `FLOAT` | `float` |
-| `DOUBLE` | `double` |
-| `BOOLEAN` | `boolean` |
-| `TEXT` | `org.apache.iotdb.udf.api.type.Binary` |
-
-The output type of the UDAF is determined at runtime. You can dynamically determine the output sequence type based on the input type.
-
-Here is a simple example:
-
-```java
-void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception {
- // do something
- // ...
-
- configurations
- .setOutputDataType(parameters.getDataType(0));
-}
-```
-
-
-
-##### State createState()
-
-This method creates and initializes a `State` object for UDAF. Due to the limitations of the Java language, you can only call the default constructor for the `State` class. The default constructor assigns a default initial value to all the fields in the class, and if that initial value does not meet your requirements, you need to initialize them manually within this method.
-
-The following is an example that includes manual initialization. Suppose you want to implement an aggregate function that multiply all numbers in the group, then your initial `State` value should be set to 1, but the default constructor initializes it to 0, so you need to initialize `State` manually after calling the default constructor:
-
-```java
-public State createState() {
- MultiplyState state = new MultiplyState();
- state.result = 1;
- return state;
-}
-```
-
-
-
-##### void addInput(State state, Column[] columns, BitMap bitMap)
-
-This method updates the `State` object with the raw input data. For performance reasons, also to align with the IoTDB vectorized query engine, the raw input data is no longer a data point, but an array of columns ``Column[]``. Note that the last column (i.e. `columns[columns.length - 1]`) is always the time column, so you can also do different operations in UDAF depending on the time.
-
-Since the input parameter is not of a single data point type, but of multiple columns, you need to manually filter some of the data in the columns, which is why the third parameter, `BitMap`, exists. It identifies which of these columns have been filtered out, so you don't have to think about the filtered data in any case.
-
-Here's an example of `addInput()` that counts the number of items (aka count). It shows how you can use `BitMap` to ignore data that has been filtered out. Note that due to the limitations of the Java language, you need to do the explicit cast the `State` object from type defined in the interface to a custom `State` type at the beginning of the method, otherwise you won't be able to use the `State` object.
-
-```java
-public void addInput(State state, Column[] columns, BitMap bitMap) {
- CountState countState = (CountState) state;
-
- int count = columns[0].getPositionCount();
- for (int i = 0; i < count; i++) {
- if (bitMap != null && !bitMap.isMarked(i)) {
- continue;
- }
- if (!columns[0].isNull(i)) {
- countState.count++;
- }
- }
-}
-```
-
-
-
-##### void combineState(State state, State rhs)
-
-This method combines two `State`s, or more precisely, updates the first `State` object with the second `State` object. IoTDB is a distributed database, and the data of the same group may be distributed on different nodes. For performance reasons, IoTDB will first aggregate some of the data on each node into `State`, and then merge the `State`s on different nodes that belong to the same group, which is what `combineState` does.
-
-Here's an example of `combineState()` for averaging (aka avg). Similar to `addInput`, you need to do an explicit type conversion for the two `State`s at the beginning. Also note that you are updating the value of the first `State` with the contents of the second `State`.
-
-```java
-public void combineState(State state, State rhs) {
- AvgState avgState = (AvgState) state;
- AvgState avgRhs = (AvgState) rhs;
-
- avgState.count += avgRhs.count;
- avgState.sum += avgRhs.sum;
-}
-```
-
-
-
-##### void outputFinal(State state, ResultValue resultValue)
-
-This method works by calculating the final result from `State`. You need to access the various fields in `State`, derive the final result, and set the final result into the `ResultValue` object.IoTDB internally calls this method once at the end for each group. Note that according to the semantics of aggregation, the final result can only be one value.
-
-Here is another `outputFinal` example for averaging (aka avg). In addition to the forced type conversion at the beginning, you will also see a specific use of the `ResultValue` object, where the final result is set by `setXXX` (where `XXX` is the type name).
-
-```java
-public void outputFinal(State state, ResultValue resultValue) {
- AvgState avgState = (AvgState) state;
-
- if (avgState.count != 0) {
- resultValue.setDouble(avgState.sum / avgState.count);
- } else {
- resultValue.setNull();
- }
-}
-```
-
-
-
-##### void beforeDestroy()
-
-The method for terminating a UDF.
-
-This method is called by the framework. For a UDF instance, `beforeDestroy` will be called after the last record is processed. In the entire life cycle of the instance, `beforeDestroy` will only be called once.
-
-
-
-### Maven Project Example
-
-If you use Maven, you can build your own UDF project referring to our **udf-example** module. You can find the project [here](https://github.com/apache/iotdb/tree/master/example/udf).
-
-
-
-### UDF Registration
-
-The process of registering a UDF in IoTDB is as follows:
-
-1. Implement a complete UDF class, assuming the full class name of this class is `org.apache.iotdb.udf.ExampleUDTF`.
-2. Package your project into a JAR. If you use Maven to manage your project, you can refer to the Maven project example above.
-3. Make preparations for registration according to the registration mode. For details, see the following example.
-4. You can use following SQL to register UDF.
-
-```sql
-CREATE FUNCTION | UDF Class | +AccessStrategy | +Description | +
|---|---|---|
| UDTF | +MAPPABLE_ROW_BY_ROW | +Custom scalar function, input k columns of time series and 1 row of data, output 1 column of time series and 1 row of data, can be used in any clause and expression that appears in the scalar function, such as select clause, where clause, etc. | +
| ROW_BY_ROW SLIDING_TIME_WINDOW SLIDING_SIZE_WINDOW SESSION_TIME_WINDOW STATE_WINDOW |
+ Custom time series generation function, input k columns of time series m rows of data, output 1 column of time series n rows of data, the number of input rows m can be different from the number of output rows n, and can only be used in SELECT clauses. | +|
| UDAF | +- | +Custom aggregation function, input k columns of time series m rows of data, output 1 column of time series 1 row of data, can be used in any clause and expression that appears in the aggregation function, such as select clause, having clause, etc. | +
+
+`SlidingTimeWindowAccessStrategy`: `SlidingTimeWindowAccessStrategy` has many constructors, you can pass 3 types of parameters to them:
+
+- Parameter 1: The display window on the time axis
+
+The first type of parameters are optional. If the parameters are not provided, the beginning time of the display window will be set to the same as the minimum timestamp of the query result set, and the ending time of the display window will be set to the same as the maximum timestamp of the query result set.
+
+- Parameter 2: Time interval for dividing the time axis (should be positive)
+- Parameter 3: Time sliding step (not required to be greater than or equal to the time interval, but must be a positive number)
+
+The sliding step parameter is also optional. If the parameter is not provided, the sliding step will be set to the same as the time interval for dividing the time axis.
+
+The relationship between the three types of parameters can be seen in the figure below. Please see the Javadoc for more details.
+
+
+
+`SlidingSizeWindowAccessStrategy`: `SlidingSizeWindowAccessStrategy` has many constructors, you can pass 2 types of parameters to them:
+
+* Parameter 1: Window size. This parameter specifies the number of data rows contained in a data processing window. Note that the number of data rows in some of the last time windows may be less than the specified number of data rows.
+* Parameter 2: Sliding step. This parameter means the number of rows between the first point of the next window and the first point of the current window. (This parameter is not required to be greater than or equal to the window size, but must be a positive number)
+
+The sliding step parameter is optional. If the parameter is not provided, the sliding step will be set to the same as the window size.
+
+- `SessionTimeWindowAccessStrategy`
+
+Window opening diagram: **Time intervals less than or equal to the given minimum time interval `sessionGap` are assigned in one group.**
+
+
+
+`SessionTimeWindowAccessStrategy`: `SessionTimeWindowAccessStrategy` has many constructors, you can pass 2 types of parameters to them:
+
+- Parameter 1: The display window on the time axis.
+- Parameter 2: The minimum time interval `sessionGap` of two adjacent windows.
+
+- `StateWindowAccessStrategy`
+
+Window opening diagram: **For numerical data, if the state difference is less than or equal to the given threshold `delta`, it will be assigned in one group.**
+
+
+
+`StateWindowAccessStrategy` has four constructors.
+
+- Constructor 1: For numerical data, there are 3 parameters: the time axis can display the start and end time of the time window and the threshold `delta` for the allowable change within a single window.
+- Constructor 2: For text data and boolean data, there are 3 parameters: the time axis can be provided to display the start and end time of the time window. For both data types, the data within a single window is same, and there is no need to provide an allowable change threshold.
+- Constructor 3: For numerical data, there are 1 parameters: you can only provide the threshold delta that is allowed to change within a single window. The start time of the time axis display time window will be defined as the smallest timestamp in the entire query result set, and the time axis display time window end time will be defined as The largest timestamp in the entire query result set.
+- Constructor 4: For text data and boolean data, you can provide no parameter. The start and end timestamps are explained in Constructor 3.
+
+StateWindowAccessStrategy can only take one column as input for now.
+
+Please see the Javadoc for more details.
+
+ 2.2.2 **setOutputDataType**
+
+Note that the type of output sequence you set here determines the type of data that the `PointCollector` can actually receive in the `transform` method. The relationship between the output data type set in `setOutputDataType` and the actual data output type that `PointCollector` can receive is as follows:
+
+| Output Data Type Set in `setOutputDataType` | Data Type that `PointCollector` Can Receive |
+| :------------------------------------------ | :----------------------------------------------------------- |
+| INT32 | int |
+| INT64 | long |
+| FLOAT | float |
+| DOUBLE | double |
+| BOOLEAN | boolean |
+| TEXT | java.lang.String and org.apache.iotdb.udf.api.type.Binar` |
+
+The type of output time series of a UDTF is determined at runtime, which means that a UDTF can dynamically determine the type of output time series according to the type of input time series.
+Here is a simple example:
+
+```java
+void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) throws Exception {
+ // do something
+ // ...
+
+ configurations
+ .setAccessStrategy(new RowByRowAccessStrategy())
+ .setOutputDataType(parameters.getDataType(0));
+}
+```
+
+3. **void transform(Row row, PointCollector collector) throws Exception**
+
+You need to implement this method when you specify the strategy of UDF to read the original data as `RowByRowAccessStrategy`.
+
+This method processes the raw data one row at a time. The raw data is input from `Row` and output by `PointCollector`. You can output any number of data points in one `transform` method call. It should be noted that the type of output data points must be the same as you set in the `beforeStart` method, and the timestamps of output data points must be strictly monotonically increasing.
+
+The following is a complete UDF example that implements the `void transform(Row row, PointCollector collector) throws Exception` method. It is an adder that receives two columns of time series as input. When two data points in a row are not `null`, this UDF will output the algebraic sum of these two data points.
+
+``` java
+import org.apache.iotdb.udf.api.UDTF;
+import org.apache.iotdb.udf.api.access.Row;
+import org.apache.iotdb.udf.api.collector.PointCollector;
+import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
+import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
+import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
+import org.apache.iotdb.udf.api.type.Type;
+
+public class Adder implements UDTF {
+
+ @Override
+ public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
+ configurations
+ .setOutputDataType(TSDataType.INT64)
+ .setAccessStrategy(new RowByRowAccessStrategy());
+ }
+
+ @Override
+ public void transform(Row row, PointCollector collector) throws Exception {
+ if (row.isNull(0) || row.isNull(1)) {
+ return;
+ }
+ collector.putLong(row.getTime(), row.getLong(0) + row.getLong(1));
+ }
+}
+```
+
+4. **void transform(RowWindow rowWindow, PointCollector collector) throws Exception**
+
+You need to implement this method when you specify the strategy of UDF to read the original data as `SlidingTimeWindowAccessStrategy` or `SlidingSizeWindowAccessStrategy`.
+
+This method processes a batch of data in a fixed number of rows or a fixed time interval each time, and we call the container containing this batch of data a window. The raw data is input from `RowWindow` and output by `PointCollector`. `RowWindow` can help you access a batch of `Row`, it provides a set of interfaces for random access and iterative access to this batch of `Row`. You can output any number of data points in one `transform` method call. It should be noted that the type of output data points must be the same as you set in the `beforeStart` method, and the timestamps of output data points must be strictly monotonically increasing.
+
+Below is a complete UDF example that implements the `void transform(RowWindow rowWindow, PointCollector collector) throws Exception` method. It is a counter that receives any number of time series as input, and its function is to count and output the number of data rows in each time window within a specified time range.
+
+```java
+import java.io.IOException;
+import org.apache.iotdb.udf.api.UDTF;
+import org.apache.iotdb.udf.api.access.Row;
+import org.apache.iotdb.udf.api.access.RowWindow;
+import org.apache.iotdb.udf.api.collector.PointCollector;
+import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
+import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
+import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy;
+import org.apache.iotdb.udf.api.type.Type;
+
+public class Counter implements UDTF {
+
+ @Override
+ public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
+ configurations
+ .setOutputDataType(TSDataType.INT32)
+ .setAccessStrategy(new SlidingTimeWindowAccessStrategy(
+ parameters.getLong("time_interval"),
+ parameters.getLong("sliding_step"),
+ parameters.getLong("display_window_begin"),
+ parameters.getLong("display_window_end")));
+ }
+
+ @Override
+ public void transform(RowWindow rowWindow, PointCollector collector) {
+ if (rowWindow.windowSize() != 0) {
+ collector.putInt(rowWindow.windowStartTime(), rowWindow.windowSize());
+ }
+ }
+}
+```
+
+5. **void terminate(PointCollector collector) throws Exception**
+
+In some scenarios, a UDF needs to traverse all the original data to calculate the final output data points. The `terminate` interface provides support for those scenarios.
+
+This method is called after all `transform` calls are executed and before the `beforeDestory` method is executed. You can implement the `transform` method to perform pure data processing (without outputting any data points), and implement the `terminate` method to output the processing results.
+
+The processing results need to be output by the `PointCollector`. You can output any number of data points in one `terminate` method call. It should be noted that the type of output data points must be the same as you set in the `beforeStart` method, and the timestamps of output data points must be strictly monotonically increasing.
+
+Below is a complete UDF example that implements the `void terminate(PointCollector collector) throws Exception` method. It takes one time series whose data type is `INT32` as input, and outputs the maximum value point of the series.
+
+```java
+import java.io.IOException;
+import org.apache.iotdb.udf.api.UDTF;
+import org.apache.iotdb.udf.api.access.Row;
+import org.apache.iotdb.udf.api.collector.PointCollector;
+import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
+import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
+import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
+import org.apache.iotdb.udf.api.type.Type;
+
+public class Max implements UDTF {
+
+ private Long time;
+ private int value;
+
+ @Override
+ public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
+ configurations
+ .setOutputDataType(TSDataType.INT32)
+ .setAccessStrategy(new RowByRowAccessStrategy());
+ }
+
+ @Override
+ public void transform(Row row, PointCollector collector) {
+ if (row.isNull(0)) {
+ return;
+ }
+ int candidateValue = row.getInt(0);
+ if (time == null || value < candidateValue) {
+ time = row.getTime();
+ value = candidateValue;
+ }
+ }
+
+ @Override
+ public void terminate(PointCollector collector) throws IOException {
+ if (time != null) {
+ collector.putInt(time, value);
+ }
+ }
+}
+```
+
+6. **void beforeDestroy()**
+
+The method for terminating a UDF.
+
+This method is called by the framework. For a UDF instance, `beforeDestroy` will be called after the last record is processed. In the entire life cycle of the instance, `beforeDestroy` will only be called once.
+
+
+
+### UDAF (User Defined Aggregation Function)
+
+A complete definition of UDAF involves two classes, `State` and `UDAF`.
+
+#### State Class
+
+To write your own `State`, you need to implement the `org.apache.iotdb.udf.api.State` interface.
+
+#### Interface Description:
+
+| Interface Definition | Description | Required to Implement |
+| -------------------------------- | ------------------------------------------------------------ | --------------------- |
+| void reset() | To reset the `State` object to its initial state, you need to fill in the initial values of the fields in the `State` class within this method as if you were writing a constructor. | Required |
+| byte[] serialize() | Serializes `State` to binary data. This method is used for IoTDB internal `State` passing. Note that the order of serialization must be consistent with the following deserialization methods. | Required |
+| void deserialize(byte[] bytes) | Deserializes binary data to `State`. This method is used for IoTDB internal `State` passing. Note that the order of deserialization must be consistent with the serialization method above. | Required |
+
+#### Detailed interface introduction:
+
+1. **void reset()**
+
+This method resets the `State` to its initial state, you need to fill in the initial values of the fields in the `State` object in this method. For optimization reasons, IoTDB reuses `State` as much as possible internally, rather than creating a new `State` for each group, which would introduce unnecessary overhead. When `State` has finished updating the data in a group, this method is called to reset to the initial state as a way to process the next group.
+
+In the case of `State` for averaging (aka `avg`), for example, you would need the sum of the data, `sum`, and the number of entries in the data, `count`, and initialize both to 0 in the `reset()` method.
+
+```java
+class AvgState implements State {
+ double sum;
+
+ long count;
+
+ @Override
+ public void reset() {
+ sum = 0;
+ count = 0;
+ }
+
+ // other methods
+}
+```
+
+2. **byte[] serialize()/void deserialize(byte[] bytes)**
+
+These methods serialize the `State` into binary data, and deserialize the `State` from the binary data. IoTDB, as a distributed database, involves passing data among different nodes, so you need to write these two methods to enable the passing of the State among different nodes. Note that the order of serialization and deserialization must be the consistent.
+
+In the case of `State` for averaging (aka `avg`), for example, you can convert the content of State to `byte[]` array and read out the content of State from `byte[]` array in any way you want, the following shows the code for serialization/deserialization using `ByteBuffer` introduced by Java8:
+
+```java
+@Override
+public byte[] serialize() {
+ ByteBuffer buffer = ByteBuffer.allocate(Double.BYTES + Long.BYTES);
+ buffer.putDouble(sum);
+ buffer.putLong(count);
+
+ return buffer.array();
+}
+
+@Override
+public void deserialize(byte[] bytes) {
+ ByteBuffer buffer = ByteBuffer.wrap(bytes);
+ sum = buffer.getDouble();
+ count = buffer.getLong();
+}
+```
+
+
+
+#### UDAF Classes
+
+To write a UDAF, you need to implement the `org.apache.iotdb.udf.api.UDAF` interface.
+
+#### Interface Description:
+
+| Interface definition | Description | Required to Implement |
+| ------------------------------------------------------------ | ------------------------------------------------------------ | --------------------- |
+| void validate(UDFParameterValidator validator) throws Exception | This method is mainly used to validate `UDFParameters` and it is executed before `beforeStart(UDFParameters, UDTFConfigurations)` is called. | Optional |
+| void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception | Initialization method that invokes user-defined initialization behavior before UDAF processes the input data. Unlike UDTF, configuration is of type `UDAFConfiguration`. | Required |
+| State createState() | To create a `State` object, usually just call the default constructor and modify the default initial value as needed. | Required |
+| void addInput(State state, Column[] columns, BitMap bitMap) | Update `State` object according to the incoming data `Column[]` in batch, note that last column `columns[columns.length - 1]` always represents the time column. In addition, `BitMap` represents the data that has been filtered out before, you need to manually determine whether the corresponding data has been filtered out when writing this method. | Required |
+| void combineState(State state, State rhs) | Merge `rhs` state into `state` state. In a distributed scenario, the same set of data may be distributed on different nodes, IoTDB generates a `State` object for the partial data on each node, and then calls this method to merge it into the complete `State`. | Required |
+| void outputFinal(State state, ResultValue resultValue) | Computes the final aggregated result based on the data in `State`. Note that according to the semantics of the aggregation, only one value can be output per group. | Required |
+| void beforeDestroy() | This method is called by the framework after the last input data is processed, and will only be called once in the life cycle of each UDF instance. | Optional |
+
+In the life cycle of a UDAF instance, the calling sequence of each method is as follows:
+
+1. State createState()
+2. void validate(UDFParameterValidator validator) throws Exception
+3. void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception
+4. void addInput(State state, Column[] columns, BitMap bitMap)
+5. void combineState(State state, State rhs)
+6. void outputFinal(State state, ResultValue resultValue)
+7. void beforeDestroy()
+
+Similar to UDTF, every time the framework executes a UDAF query, a new UDF instance will be constructed. When the query ends, the corresponding instance will be destroyed. Therefore, the internal data of the instances in different UDAF queries (even in the same SQL statement) are isolated. You can maintain some state data in the UDAF without considering the influence of concurrency and other factors.
+
+#### Detailed interface introduction:
+
+
+1. **void validate(UDFParameterValidator validator) throws Exception**
+
+Same as UDTF, the `validate` method is used to validate the parameters entered by the user.
+
+In this method, you can limit the number and types of input time series, check the attributes of user input, or perform any custom verification.
+
+2. **void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception**
+
+ The `beforeStart` method does the same thing as the UDAF:
+
+1. Use UDFParameters to get the time series paths and parse key-value pair attributes entered by the user.
+2. Set the strategy to access the raw data and set the output data type in UDAFConfigurations.
+3. Create resources, such as establishing external connections, opening files, etc.
+
+The role of the `UDFParameters` type can be seen above.
+
+2.2 **UDTFConfigurations**
+
+The difference from UDTF is that UDAF uses `UDAFConfigurations` as the type of `configuration` object.
+
+Currently, this class only supports setting the type of output data.
+
+```java
+void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception {
+ // parameters
+ // ...
+
+ // configurations
+ configurations
+ .setOutputDataType(Type.INT32); }
+}
+```
+
+The relationship between the output type set in `setOutputDataType` and the type of data output that `ResultValue` can actually receive is as follows:
+
+| The output type set in `setOutputDataType` | The output type that `ResultValue` can actually receive |
+| ------------------------------------------ | ------------------------------------------------------- |
+| INT32 | int |
+| INT64 | long |
+| FLOAT | float |
+| DOUBLE | double |
+| BOOLEAN | boolean |
+| TEXT | org.apache.iotdb.udf.api.type.Binary |
+
+The output type of the UDAF is determined at runtime. You can dynamically determine the output sequence type based on the input type.
+
+Here is a simple example:
+
+```java
+void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception {
+ // do something
+ // ...
+
+ configurations
+ .setOutputDataType(parameters.getDataType(0));
+}
+```
+
+3. **State createState()**
+
+
+This method creates and initializes a `State` object for UDAF. Due to the limitations of the Java language, you can only call the default constructor for the `State` class. The default constructor assigns a default initial value to all the fields in the class, and if that initial value does not meet your requirements, you need to initialize them manually within this method.
+
+The following is an example that includes manual initialization. Suppose you want to implement an aggregate function that multiply all numbers in the group, then your initial `State` value should be set to 1, but the default constructor initializes it to 0, so you need to initialize `State` manually after calling the default constructor:
+
+```java
+public State createState() {
+ MultiplyState state = new MultiplyState();
+ state.result = 1;
+ return state;
+}
+```
+
+4. **void addInput(State state, Column[] columns, BitMap bitMap)**
+
+This method updates the `State` object with the raw input data. For performance reasons, also to align with the IoTDB vectorized query engine, the raw input data is no longer a data point, but an array of columns ``Column[]``. Note that the last column (i.e. `columns[columns.length - 1]`) is always the time column, so you can also do different operations in UDAF depending on the time.
+
+Since the input parameter is not of a single data point type, but of multiple columns, you need to manually filter some of the data in the columns, which is why the third parameter, `BitMap`, exists. It identifies which of these columns have been filtered out, so you don't have to think about the filtered data in any case.
+
+Here's an example of `addInput()` that counts the number of items (aka count). It shows how you can use `BitMap` to ignore data that has been filtered out. Note that due to the limitations of the Java language, you need to do the explicit cast the `State` object from type defined in the interface to a custom `State` type at the beginning of the method, otherwise you won't be able to use the `State` object.
+
+```java
+public void addInput(State state, Column[] columns, BitMap bitMap) {
+ CountState countState = (CountState) state;
+
+ int count = columns[0].getPositionCount();
+ for (int i = 0; i < count; i++) {
+ if (bitMap != null && !bitMap.isMarked(i)) {
+ continue;
+ }
+ if (!columns[0].isNull(i)) {
+ countState.count++;
+ }
+ }
+}
+```
+
+5. **void combineState(State state, State rhs)**
+
+
+This method combines two `State`s, or more precisely, updates the first `State` object with the second `State` object. IoTDB is a distributed database, and the data of the same group may be distributed on different nodes. For performance reasons, IoTDB will first aggregate some of the data on each node into `State`, and then merge the `State`s on different nodes that belong to the same group, which is what `combineState` does.
+
+Here's an example of `combineState()` for averaging (aka avg). Similar to `addInput`, you need to do an explicit type conversion for the two `State`s at the beginning. Also note that you are updating the value of the first `State` with the contents of the second `State`.
+
+```java
+public void combineState(State state, State rhs) {
+ AvgState avgState = (AvgState) state;
+ AvgState avgRhs = (AvgState) rhs;
+
+ avgState.count += avgRhs.count;
+ avgState.sum += avgRhs.sum;
+}
+```
+
+6. **void outputFinal(State state, ResultValue resultValue)**
+
+This method works by calculating the final result from `State`. You need to access the various fields in `State`, derive the final result, and set the final result into the `ResultValue` object.IoTDB internally calls this method once at the end for each group. Note that according to the semantics of aggregation, the final result can only be one value.
+
+Here is another `outputFinal` example for averaging (aka avg). In addition to the forced type conversion at the beginning, you will also see a specific use of the `ResultValue` object, where the final result is set by `setXXX` (where `XXX` is the type name).
+
+```java
+public void outputFinal(State state, ResultValue resultValue) {
+ AvgState avgState = (AvgState) state;
+
+ if (avgState.count != 0) {
+ resultValue.setDouble(avgState.sum / avgState.count);
+ } else {
+ resultValue.setNull();
+ }
+}
+```
+
+7. **void beforeDestroy()**
+
+
+The method for terminating a UDF.
+
+This method is called by the framework. For a UDF instance, `beforeDestroy` will be called after the last record is processed. In the entire life cycle of the instance, `beforeDestroy` will only be called once.
+
+
+### Maven Project Example
+
+If you use Maven, you can build your own UDF project referring to our **udf-example** module. You can find the project [here](https://github.com/apache/iotdb/tree/master/example/udf).
+
+
+## Contribute universal built-in UDF functions to iotdb
+
+This part mainly introduces how external users can contribute their own UDFs to the IoTDB community.
+
+#### Prerequisites
+
+1. UDFs must be universal.
+
+ The "universal" mentioned here refers to: UDFs can be widely used in some scenarios. In other words, the UDF function must have reuse value and may be directly used by other users in the community.
+
+ If you are not sure whether the UDF you want to contribute is universal, you can send an email to `dev@iotdb.apache.org` or create an issue to initiate a discussion.
+
+2. The UDF you are going to contribute has been well tested and can run normally in the production environment.
+
+
+#### What you need to prepare
+
+1. UDF source code
+2. Test cases
+3. Instructions
+
+#### UDF Source Code
+
+1. Create the UDF main class and related classes in `iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/builtin` or in its subfolders.
+2. Register your UDF in `iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/builtin/BuiltinTimeSeriesGeneratingFunction.java`.
+
+#### Test Cases
+
+At a minimum, you need to write integration tests for the UDF.
+
+You can add a test class in `integration-test/src/test/java/org/apache/iotdb/db/it/udf`.
+
+
+#### Instructions
+
+The instructions need to include: the name and the function of the UDF, the attribute parameters that must be provided when the UDF is executed, the applicable scenarios, and the usage examples, etc.
+
+The instructions for use should include both Chinese and English versions. Instructions for use should be added separately in `docs/zh/UserGuide/Operation Manual/DML Data Manipulation Language.md` and `docs/UserGuide/Operation Manual/DML Data Manipulation Language.md`.
+
+#### Submit a PR
+
+When you have prepared the UDF source code, test cases, and instructions, you are ready to submit a Pull Request (PR) on [Github](https://github.com/apache/iotdb). You can refer to our code contribution guide to submit a PR: [Development Guide](https://iotdb.apache.org/Community/Development-Guide.html).
+
+
+After the PR review is approved and merged, your UDF has already contributed to the IoTDB community!
diff --git a/src/UserGuide/latest/User-Manual/Database-Programming.md b/src/UserGuide/latest/User-Manual/Database-Programming.md
index ce100e750..2386a55b4 100644
--- a/src/UserGuide/latest/User-Manual/Database-Programming.md
+++ b/src/UserGuide/latest/User-Manual/Database-Programming.md
@@ -1036,890 +1036,3 @@ SELECT avg(count_s1) from root.sg_count.d;
| :------------------------------------------ | ------------------------------------------------------------ | --------- | ------------- |
| `continuous_query_submit_thread` | The number of threads in the scheduled thread pool that submit continuous query tasks periodically | int32 | 2 |
| `continuous_query_min_every_interval_in_ms` | The minimum value of the continuous query execution time interval | duration | 1000 |
-
-## USER-DEFINED FUNCTION (UDF)
-
-IoTDB provides a variety of built-in functions to meet your computing needs, and you can also create user defined functions to meet more computing needs.
-
-This document describes how to write, register and use a UDF.
-
-
-### UDF Types
-
-In IoTDB, you can expand two types of UDF:
-
-| UDF Class | Description |
-| --------------------------------------------------- | ------------------------------------------------------------ |
-| UDTF(User Defined Timeseries Generating Function) | This type of function can take **multiple** time series as input, and output **one** time series, which can have any number of data points. |
-| UDAF(User Defined Aggregation Function) | Custom Aggregation Functions. This type of function can take one time series as input, and output **one** aggregated data point for each group based on the GROUP BY type. |
-
-### UDF Development Dependencies
-
-If you use [Maven](http://search.maven.org/), you can search for the development dependencies listed below from the [Maven repository](http://search.maven.org/) . Please note that you must select the same dependency version as the target IoTDB server version for development.
-
-``` xml
-
-
-`SlidingTimeWindowAccessStrategy`: `SlidingTimeWindowAccessStrategy` has many constructors, you can pass 3 types of parameters to them:
-
-- Parameter 1: The display window on the time axis
-- Parameter 2: Time interval for dividing the time axis (should be positive)
-- Parameter 3: Time sliding step (not required to be greater than or equal to the time interval, but must be a positive number)
-
-The first type of parameters are optional. If the parameters are not provided, the beginning time of the display window will be set to the same as the minimum timestamp of the query result set, and the ending time of the display window will be set to the same as the maximum timestamp of the query result set.
-
-The sliding step parameter is also optional. If the parameter is not provided, the sliding step will be set to the same as the time interval for dividing the time axis.
-
-The relationship between the three types of parameters can be seen in the figure below. Please see the Javadoc for more details.
-
-
-
-`SlidingSizeWindowAccessStrategy`: `SlidingSizeWindowAccessStrategy` has many constructors, you can pass 2 types of parameters to them:
-
-* Parameter 1: Window size. This parameter specifies the number of data rows contained in a data processing window. Note that the number of data rows in some of the last time windows may be less than the specified number of data rows.
-* Parameter 2: Sliding step. This parameter means the number of rows between the first point of the next window and the first point of the current window. (This parameter is not required to be greater than or equal to the window size, but must be a positive number)
-
-The sliding step parameter is optional. If the parameter is not provided, the sliding step will be set to the same as the window size.
-
-The `SessionTimeWindowAccessStrategy` is shown schematically below. **Time intervals less than or equal to the given minimum time interval `sessionGap` are assigned in one group**
-
-
-`SessionTimeWindowAccessStrategy`: `SessionTimeWindowAccessStrategy` has many constructors, you can pass 2 types of parameters to them:
-
-- Parameter 1: The display window on the time axis.
-- Parameter 2: The minimum time interval `sessionGap` of two adjacent windows.
-
-
-The `StateWindowAccessStrategy` is shown schematically below. **For numerical data, if the state difference is less than or equal to the given threshold `delta`, it will be assigned in one group. **
-
-
-`StateWindowAccessStrategy` has four constructors.
-
-- Constructor 1: For numerical data, there are 3 parameters: the time axis can display the start and end time of the time window and the threshold `delta` for the allowable change within a single window.
-- Constructor 2: For text data and boolean data, there are 3 parameters: the time axis can be provided to display the start and end time of the time window. For both data types, the data within a single window is same, and there is no need to provide an allowable change threshold.
-- Constructor 3: For numerical data, there are 1 parameters: you can only provide the threshold delta that is allowed to change within a single window. The start time of the time axis display time window will be defined as the smallest timestamp in the entire query result set, and the time axis display time window end time will be defined as The largest timestamp in the entire query result set.
-- Constructor 4: For text data and boolean data, you can provide no parameter. The start and end timestamps are explained in Constructor 3.
-
-StateWindowAccessStrategy can only take one column as input for now.
-
-Please see the Javadoc for more details.
-
-
-
-###### setOutputDataType
-
-Note that the type of output sequence you set here determines the type of data that the `PointCollector` can actually receive in the `transform` method. The relationship between the output data type set in `setOutputDataType` and the actual data output type that `PointCollector` can receive is as follows:
-
-| Output Data Type Set in `setOutputDataType` | Data Type that `PointCollector` Can Receive |
-| :------------------------------------------ | :----------------------------------------------------------- |
-| `INT32` | `int` |
-| `INT64` | `long` |
-| `FLOAT` | `float` |
-| `DOUBLE` | `double` |
-| `BOOLEAN` | `boolean` |
-| `TEXT` | `java.lang.String` and `org.apache.iotdb.udf.api.type.Binary` |
-
-The type of output time series of a UDTF is determined at runtime, which means that a UDTF can dynamically determine the type of output time series according to the type of input time series.
-Here is a simple example:
-
-```java
-void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) throws Exception {
- // do something
- // ...
-
- configurations
- .setAccessStrategy(new RowByRowAccessStrategy())
- .setOutputDataType(parameters.getDataType(0));
-}
-```
-
-
-
-#### void transform(Row row, PointCollector collector) throws Exception
-
-You need to implement this method when you specify the strategy of UDF to read the original data as `RowByRowAccessStrategy`.
-
-This method processes the raw data one row at a time. The raw data is input from `Row` and output by `PointCollector`. You can output any number of data points in one `transform` method call. It should be noted that the type of output data points must be the same as you set in the `beforeStart` method, and the timestamps of output data points must be strictly monotonically increasing.
-
-The following is a complete UDF example that implements the `void transform(Row row, PointCollector collector) throws Exception` method. It is an adder that receives two columns of time series as input. When two data points in a row are not `null`, this UDF will output the algebraic sum of these two data points.
-
-``` java
-import org.apache.iotdb.udf.api.UDTF;
-import org.apache.iotdb.udf.api.access.Row;
-import org.apache.iotdb.udf.api.collector.PointCollector;
-import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
-import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
-import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
-import org.apache.iotdb.udf.api.type.Type;
-
-public class Adder implements UDTF {
-
- @Override
- public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
- configurations
- .setOutputDataType(TSDataType.INT64)
- .setAccessStrategy(new RowByRowAccessStrategy());
- }
-
- @Override
- public void transform(Row row, PointCollector collector) throws Exception {
- if (row.isNull(0) || row.isNull(1)) {
- return;
- }
- collector.putLong(row.getTime(), row.getLong(0) + row.getLong(1));
- }
-}
-```
-
-
-
-#### void transform(RowWindow rowWindow, PointCollector collector) throws Exception
-
-You need to implement this method when you specify the strategy of UDF to read the original data as `SlidingTimeWindowAccessStrategy` or `SlidingSizeWindowAccessStrategy`.
-
-This method processes a batch of data in a fixed number of rows or a fixed time interval each time, and we call the container containing this batch of data a window. The raw data is input from `RowWindow` and output by `PointCollector`. `RowWindow` can help you access a batch of `Row`, it provides a set of interfaces for random access and iterative access to this batch of `Row`. You can output any number of data points in one `transform` method call. It should be noted that the type of output data points must be the same as you set in the `beforeStart` method, and the timestamps of output data points must be strictly monotonically increasing.
-
-Below is a complete UDF example that implements the `void transform(RowWindow rowWindow, PointCollector collector) throws Exception` method. It is a counter that receives any number of time series as input, and its function is to count and output the number of data rows in each time window within a specified time range.
-
-```java
-import java.io.IOException;
-import org.apache.iotdb.udf.api.UDTF;
-import org.apache.iotdb.udf.api.access.Row;
-import org.apache.iotdb.udf.api.access.RowWindow;
-import org.apache.iotdb.udf.api.collector.PointCollector;
-import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
-import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
-import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy;
-import org.apache.iotdb.udf.api.type.Type;
-
-public class Counter implements UDTF {
-
- @Override
- public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
- configurations
- .setOutputDataType(TSDataType.INT32)
- .setAccessStrategy(new SlidingTimeWindowAccessStrategy(
- parameters.getLong("time_interval"),
- parameters.getLong("sliding_step"),
- parameters.getLong("display_window_begin"),
- parameters.getLong("display_window_end")));
- }
-
- @Override
- public void transform(RowWindow rowWindow, PointCollector collector) {
- if (rowWindow.windowSize() != 0) {
- collector.putInt(rowWindow.windowStartTime(), rowWindow.windowSize());
- }
- }
-}
-```
-
-
-
-#### void terminate(PointCollector collector) throws Exception
-
-In some scenarios, a UDF needs to traverse all the original data to calculate the final output data points. The `terminate` interface provides support for those scenarios.
-
-This method is called after all `transform` calls are executed and before the `beforeDestory` method is executed. You can implement the `transform` method to perform pure data processing (without outputting any data points), and implement the `terminate` method to output the processing results.
-
-The processing results need to be output by the `PointCollector`. You can output any number of data points in one `terminate` method call. It should be noted that the type of output data points must be the same as you set in the `beforeStart` method, and the timestamps of output data points must be strictly monotonically increasing.
-
-Below is a complete UDF example that implements the `void terminate(PointCollector collector) throws Exception` method. It takes one time series whose data type is `INT32` as input, and outputs the maximum value point of the series.
-
-```java
-import java.io.IOException;
-import org.apache.iotdb.udf.api.UDTF;
-import org.apache.iotdb.udf.api.access.Row;
-import org.apache.iotdb.udf.api.collector.PointCollector;
-import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
-import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
-import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
-import org.apache.iotdb.udf.api.type.Type;
-
-public class Max implements UDTF {
-
- private Long time;
- private int value;
-
- @Override
- public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
- configurations
- .setOutputDataType(TSDataType.INT32)
- .setAccessStrategy(new RowByRowAccessStrategy());
- }
-
- @Override
- public void transform(Row row, PointCollector collector) {
- if (row.isNull(0)) {
- return;
- }
- int candidateValue = row.getInt(0);
- if (time == null || value < candidateValue) {
- time = row.getTime();
- value = candidateValue;
- }
- }
-
- @Override
- public void terminate(PointCollector collector) throws IOException {
- if (time != null) {
- collector.putInt(time, value);
- }
- }
-}
-```
-
-
-
-#### void beforeDestroy()
-
-The method for terminating a UDF.
-
-This method is called by the framework. For a UDF instance, `beforeDestroy` will be called after the last record is processed. In the entire life cycle of the instance, `beforeDestroy` will only be called once.
-
-
-
-### UDAF (User Defined Aggregation Function)
-
-A complete definition of UDAF involves two classes, `State` and `UDAF`.
-
-#### State Class
-
-To write your own `State`, you need to implement the `org.apache.iotdb.udf.api.State` interface.
-
-The following table shows all the interfaces available for user implementation.
-
-| Interface Definition | Description | Required to Implement |
-| -------------------------------- | ------------------------------------------------------------ | --------------------- |
-| `void reset()` | To reset the `State` object to its initial state, you need to fill in the initial values of the fields in the `State` class within this method as if you were writing a constructor. | Required |
-| `byte[] serialize()` | Serializes `State` to binary data. This method is used for IoTDB internal `State` passing. Note that the order of serialization must be consistent with the following deserialization methods. | Required |
-| `void deserialize(byte[] bytes)` | Deserializes binary data to `State`. This method is used for IoTDB internal `State` passing. Note that the order of deserialization must be consistent with the serialization method above. | Required |
-
-The following section describes the usage of each interface in detail.
-
-
-
-##### void reset()
-
-This method resets the `State` to its initial state, you need to fill in the initial values of the fields in the `State` object in this method. For optimization reasons, IoTDB reuses `State` as much as possible internally, rather than creating a new `State` for each group, which would introduce unnecessary overhead. When `State` has finished updating the data in a group, this method is called to reset to the initial state as a way to process the next group.
-
-In the case of `State` for averaging (aka `avg`), for example, you would need the sum of the data, `sum`, and the number of entries in the data, `count`, and initialize both to 0 in the `reset()` method.
-
-```java
-class AvgState implements State {
- double sum;
-
- long count;
-
- @Override
- public void reset() {
- sum = 0;
- count = 0;
- }
-
- // other methods
-}
-```
-
-
-
-##### byte[] serialize()/void deserialize(byte[] bytes)
-
-These methods serialize the `State` into binary data, and deserialize the `State` from the binary data. IoTDB, as a distributed database, involves passing data among different nodes, so you need to write these two methods to enable the passing of the State among different nodes. Note that the order of serialization and deserialization must be the consistent.
-
-In the case of `State` for averaging (aka `avg`), for example, you can convert the content of State to `byte[]` array and read out the content of State from `byte[]` array in any way you want, the following shows the code for serialization/deserialization using `ByteBuffer` introduced by Java8:
-
-```java
-@Override
-public byte[] serialize() {
- ByteBuffer buffer = ByteBuffer.allocate(Double.BYTES + Long.BYTES);
- buffer.putDouble(sum);
- buffer.putLong(count);
-
- return buffer.array();
-}
-
-@Override
-public void deserialize(byte[] bytes) {
- ByteBuffer buffer = ByteBuffer.wrap(bytes);
- sum = buffer.getDouble();
- count = buffer.getLong();
-}
-```
-
-
-
-#### UDAF Classes
-
-To write a UDAF, you need to implement the `org.apache.iotdb.udf.api.UDAF` interface.
-
-The following table shows all the interfaces available for user implementation.
-
-| Interface definition | Description | Required to Implement |
-| ------------------------------------------------------------ | ------------------------------------------------------------ | --------------------- |
-| `void validate(UDFParameterValidator validator) throws Exception` | This method is mainly used to validate `UDFParameters` and it is executed before `beforeStart(UDFParameters, UDTFConfigurations)` is called. | Optional |
-| `void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception` | Initialization method that invokes user-defined initialization behavior before UDAF processes the input data. Unlike UDTF, configuration is of type `UDAFConfiguration`. | Required |
-| `State createState()` | To create a `State` object, usually just call the default constructor and modify the default initial value as needed. | Required |
-| `void addInput(State state, Column[] columns, BitMap bitMap)` | Update `State` object according to the incoming data `Column[]` in batch, note that last column `columns[columns.length - 1]` always represents the time column. In addition, `BitMap` represents the data that has been filtered out before, you need to manually determine whether the corresponding data has been filtered out when writing this method. | Required |
-| `void combineState(State state, State rhs)` | Merge `rhs` state into `state` state. In a distributed scenario, the same set of data may be distributed on different nodes, IoTDB generates a `State` object for the partial data on each node, and then calls this method to merge it into the complete `State`. | Required |
-| `void outputFinal(State state, ResultValue resultValue)` | Computes the final aggregated result based on the data in `State`. Note that according to the semantics of the aggregation, only one value can be output per group. | Required |
-| `void beforeDestroy() ` | This method is called by the framework after the last input data is processed, and will only be called once in the life cycle of each UDF instance. | Optional |
-
-In the life cycle of a UDAF instance, the calling sequence of each method is as follows:
-
-1. `State createState()`
-2. `void validate(UDFParameterValidator validator) throws Exception`
-3. `void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception`
-4. `void addInput(State state, Column[] columns, BitMap bitMap)`
-5. `void combineState(State state, State rhs)`
-6. `void outputFinal(State state, ResultValue resultValue)`
-7. `void beforeDestroy()`
-
-Similar to UDTF, every time the framework executes a UDAF query, a new UDF instance will be constructed. When the query ends, the corresponding instance will be destroyed. Therefore, the internal data of the instances in different UDAF queries (even in the same SQL statement) are isolated. You can maintain some state data in the UDAF without considering the influence of concurrency and other factors.
-
-The usage of each interface will be described in detail below.
-
-
-
-##### void validate(UDFParameterValidator validator) throws Exception
-
-Same as UDTF, the `validate` method is used to validate the parameters entered by the user.
-
-In this method, you can limit the number and types of input time series, check the attributes of user input, or perform any custom verification.
-
-
-
-##### void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception
-
- The `beforeStart` method does the same thing as the UDAF:
-
-1. Use UDFParameters to get the time series paths and parse key-value pair attributes entered by the user.
-2. Set the strategy to access the raw data and set the output data type in UDAFConfigurations.
-3. Create resources, such as establishing external connections, opening files, etc.
-
-The role of the `UDFParameters` type can be seen above.
-
-###### UDAFConfigurations
-
-The difference from UDTF is that UDAF uses `UDAFConfigurations` as the type of `configuration` object.
-
-Currently, this class only supports setting the type of output data.
-
-```java
-void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception {
- // parameters
- // ...
-
- // configurations
- configurations
- .setOutputDataType(Type.INT32); }
-}
-```
-
-The relationship between the output type set in `setOutputDataType` and the type of data output that `ResultValue` can actually receive is as follows:
-
-| The output type set in `setOutputDataType` | The output type that `ResultValue` can actually receive |
-| ------------------------------------------ | ------------------------------------------------------- |
-| `INT32` | `int` |
-| `INT64` | `long` |
-| `FLOAT` | `float` |
-| `DOUBLE` | `double` |
-| `BOOLEAN` | `boolean` |
-| `TEXT` | `org.apache.iotdb.udf.api.type.Binary` |
-
-The output type of the UDAF is determined at runtime. You can dynamically determine the output sequence type based on the input type.
-
-Here is a simple example:
-
-```java
-void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception {
- // do something
- // ...
-
- configurations
- .setOutputDataType(parameters.getDataType(0));
-}
-```
-
-
-
-##### State createState()
-
-This method creates and initializes a `State` object for UDAF. Due to the limitations of the Java language, you can only call the default constructor for the `State` class. The default constructor assigns a default initial value to all the fields in the class, and if that initial value does not meet your requirements, you need to initialize them manually within this method.
-
-The following is an example that includes manual initialization. Suppose you want to implement an aggregate function that multiply all numbers in the group, then your initial `State` value should be set to 1, but the default constructor initializes it to 0, so you need to initialize `State` manually after calling the default constructor:
-
-```java
-public State createState() {
- MultiplyState state = new MultiplyState();
- state.result = 1;
- return state;
-}
-```
-
-
-
-##### void addInput(State state, Column[] columns, BitMap bitMap)
-
-This method updates the `State` object with the raw input data. For performance reasons, also to align with the IoTDB vectorized query engine, the raw input data is no longer a data point, but an array of columns ``Column[]``. Note that the last column (i.e. `columns[columns.length - 1]`) is always the time column, so you can also do different operations in UDAF depending on the time.
-
-Since the input parameter is not of a single data point type, but of multiple columns, you need to manually filter some of the data in the columns, which is why the third parameter, `BitMap`, exists. It identifies which of these columns have been filtered out, so you don't have to think about the filtered data in any case.
-
-Here's an example of `addInput()` that counts the number of items (aka count). It shows how you can use `BitMap` to ignore data that has been filtered out. Note that due to the limitations of the Java language, you need to do the explicit cast the `State` object from type defined in the interface to a custom `State` type at the beginning of the method, otherwise you won't be able to use the `State` object.
-
-```java
-public void addInput(State state, Column[] columns, BitMap bitMap) {
- CountState countState = (CountState) state;
-
- int count = columns[0].getPositionCount();
- for (int i = 0; i < count; i++) {
- if (bitMap != null && !bitMap.isMarked(i)) {
- continue;
- }
- if (!columns[0].isNull(i)) {
- countState.count++;
- }
- }
-}
-```
-
-
-
-##### void combineState(State state, State rhs)
-
-This method combines two `State`s, or more precisely, updates the first `State` object with the second `State` object. IoTDB is a distributed database, and the data of the same group may be distributed on different nodes. For performance reasons, IoTDB will first aggregate some of the data on each node into `State`, and then merge the `State`s on different nodes that belong to the same group, which is what `combineState` does.
-
-Here's an example of `combineState()` for averaging (aka avg). Similar to `addInput`, you need to do an explicit type conversion for the two `State`s at the beginning. Also note that you are updating the value of the first `State` with the contents of the second `State`.
-
-```java
-public void combineState(State state, State rhs) {
- AvgState avgState = (AvgState) state;
- AvgState avgRhs = (AvgState) rhs;
-
- avgState.count += avgRhs.count;
- avgState.sum += avgRhs.sum;
-}
-```
-
-
-
-##### void outputFinal(State state, ResultValue resultValue)
-
-This method works by calculating the final result from `State`. You need to access the various fields in `State`, derive the final result, and set the final result into the `ResultValue` object.IoTDB internally calls this method once at the end for each group. Note that according to the semantics of aggregation, the final result can only be one value.
-
-Here is another `outputFinal` example for averaging (aka avg). In addition to the forced type conversion at the beginning, you will also see a specific use of the `ResultValue` object, where the final result is set by `setXXX` (where `XXX` is the type name).
-
-```java
-public void outputFinal(State state, ResultValue resultValue) {
- AvgState avgState = (AvgState) state;
-
- if (avgState.count != 0) {
- resultValue.setDouble(avgState.sum / avgState.count);
- } else {
- resultValue.setNull();
- }
-}
-```
-
-
-
-##### void beforeDestroy()
-
-The method for terminating a UDF.
-
-This method is called by the framework. For a UDF instance, `beforeDestroy` will be called after the last record is processed. In the entire life cycle of the instance, `beforeDestroy` will only be called once.
-
-
-
-### Maven Project Example
-
-If you use Maven, you can build your own UDF project referring to our **udf-example** module. You can find the project [here](https://github.com/apache/iotdb/tree/master/example/udf).
-
-
-
-### UDF Registration
-
-The process of registering a UDF in IoTDB is as follows:
-
-1. Implement a complete UDF class, assuming the full class name of this class is `org.apache.iotdb.udf.ExampleUDTF`.
-2. Package your project into a JAR. If you use Maven to manage your project, you can refer to the Maven project example above.
-3. Make preparations for registration according to the registration mode. For details, see the following example.
-4. You can use following SQL to register UDF.
-
-```sql
-CREATE FUNCTION | UDF Class | +AccessStrategy | +Description | +
|---|---|---|
| UDTF | +MAPPABLE_ROW_BY_ROW | +Custom scalar function, input k columns of time series and 1 row of data, output 1 column of time series and 1 row of data, can be used in any clause and expression that appears in the scalar function, such as select clause, where clause, etc. | +
| ROW_BY_ROW SLIDING_TIME_WINDOW SLIDING_SIZE_WINDOW SESSION_TIME_WINDOW STATE_WINDOW |
+ Custom time series generation function, input k columns of time series m rows of data, output 1 column of time series n rows of data, the number of input rows m can be different from the number of output rows n, and can only be used in SELECT clauses. | +|
| UDAF | +- | +Custom aggregation function, input k columns of time series m rows of data, output 1 column of time series 1 row of data, can be used in any clause and expression that appears in the aggregation function, such as select clause, having clause, etc. | +
| UDF 分类 | +数据访问策略 | +描述 | +
|---|---|---|
| UDTF | +MAPPABLE_ROW_BY_ROW | +自定义标量函数,输入 k 列时间序列 1 行数据,输出 1 列时间序列 1 行数据,可用于标量函数出现的任何子句和表达式中,如select子句、where子句等。 | +
| ROW_BY_ROW SLIDING_TIME_WINDOW SLIDING_SIZE_WINDOW SESSION_TIME_WINDOW STATE_WINDOW |
+ 自定义时间序列生成函数,输入 k 列时间序列 m 行数据,输出 1 列时间序列 n 行数据,输入行数 m 可以与输出行数 n 不相同,只能用于SELECT子句中。 | +|
| UDAF | +- | +自定义聚合函数,输入 k 列时间序列 m 行数据,输出 1 列时间序列 1 行数据,可用于聚合函数出现的任何子句和表达式中,如select子句、having子句等。 | +
+
+`SlidingTimeWindowAccessStrategy`有多种构造方法,您可以向构造方法提供 3 类参数:
+
+1. 时间轴显示时间窗开始和结束时间
+
+时间轴显示时间窗开始和结束时间不是必须要提供的。当您不提供这类参数时,时间轴显示时间窗开始时间会被定义为整个查询结果集中最小的时间戳,时间轴显示时间窗结束时间会被定义为整个查询结果集中最大的时间戳。
+
+2. 划分时间轴的时间间隔参数(必须为正数)
+3. 滑动步长(不要求大于等于时间间隔,但是必须为正数)
+
+滑动步长参数也不是必须的。当您不提供滑动步长参数时,滑动步长会被设定为划分时间轴的时间间隔。
+
+3 类参数的关系可见下图。策略的构造方法详见 Javadoc。
+
+
+
+> 注意,最后的一些时间窗口的实际时间间隔可能小于规定的时间间隔参数。另外,可能存在某些时间窗口内数据行数量为 0 的情况,这种情况框架也会为该窗口调用一次`transform`方法。
+
+- `SlidingSizeWindowAccessStrategy`
+
+开窗示意图:
+
+
+
+`SlidingSizeWindowAccessStrategy`有多种构造方法,您可以向构造方法提供 2 个参数:
+
+1. 窗口大小,即一个数据处理窗口包含的数据行数。注意,最后一些窗口的数据行数可能少于规定的数据行数。
+2. 滑动步长,即下一窗口第一个数据行与当前窗口第一个数据行间的数据行数(不要求大于等于窗口大小,但是必须为正数)
+
+滑动步长参数不是必须的。当您不提供滑动步长参数时,滑动步长会被设定为窗口大小。
+
+- `SessionTimeWindowAccessStrategy`
+
+开窗示意图:**时间间隔小于等于给定的最小时间间隔 sessionGap 则分为一组。**
+
+
+
+
+`SessionTimeWindowAccessStrategy`有多种构造方法,您可以向构造方法提供 2 类参数:
+
+1. 时间轴显示时间窗开始和结束时间。
+2. 会话窗口之间的最小时间间隔。
+
+- `StateWindowAccessStrategy`
+
+开窗示意图:**对于数值型数据,状态差值小于等于给定的阈值 delta 则分为一组。**
+
+
+
+`StateWindowAccessStrategy`有四种构造方法:
+
+1. 针对数值型数据,可以提供时间轴显示时间窗开始和结束时间以及对于单个窗口内部允许变化的阈值delta。
+2. 针对文本数据以及布尔数据,可以提供时间轴显示时间窗开始和结束时间。对于这两种数据类型,单个窗口内的数据是相同的,不需要提供变化阈值。
+3. 针对数值型数据,可以只提供单个窗口内部允许变化的阈值delta,时间轴显示时间窗开始时间会被定义为整个查询结果集中最小的时间戳,时间轴显示时间窗结束时间会被定义为整个查询结果集中最大的时间戳。
+4. 针对文本数据以及布尔数据,可以不提供任何参数,开始与结束时间戳见3中解释。
+
+StateWindowAccessStrategy 目前只能接收一列输入。策略的构造方法详见 Javadoc。
+
+ 2.2.2 **setOutputDataType**
+
+注意,您在此处设定的输出结果序列的类型,决定了`transform`方法中`PointCollector`实际能够接收的数据类型。`setOutputDataType`中设定的输出类型和`PointCollector`实际能够接收的数据输出类型关系如下:
+
+| `setOutputDataType`中设定的输出类型 | `PointCollector`实际能够接收的输出类型 |
+| :---------------------------------- | :----------------------------------------------------------- |
+| INT32 | int |
+| INT64 | long |
+| FLOAT | float |
+| DOUBLE | double |
+| BOOLEAN | boolean |
+| TEXT | java.lang.String 和 org.apache.iotdb.udf.api.type.Binary |
+
+UDTF 输出序列的类型是运行时决定的。您可以根据输入序列类型动态决定输出序列类型。
+
+示例:
+
+```java
+void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) throws Exception {
+ // do something
+ // ...
+
+ configurations
+ .setAccessStrategy(new RowByRowAccessStrategy())
+ .setOutputDataType(parameters.getDataType(0));
+}
+```
+
+3. **void transform(Row row, PointCollector collector) throws Exception**
+
+当您在`beforeStart`方法中指定 UDF 读取原始数据的策略为 `RowByRowAccessStrategy`,您就需要实现该方法,在该方法中增加对原始数据处理的逻辑。
+
+该方法每次处理原始数据的一行。原始数据由`Row`读入,由`PointCollector`输出。您可以选择在一次`transform`方法调用中输出任意数量的数据点。需要注意的是,输出数据点的类型必须与您在`beforeStart`方法中设置的一致,而输出数据点的时间戳必须是严格单调递增的。
+
+下面是一个实现了`void transform(Row row, PointCollector collector) throws Exception`方法的完整 UDF 示例。它是一个加法器,接收两列时间序列输入,当这两个数据点都不为`null`时,输出这两个数据点的代数和。
+
+``` java
+import org.apache.iotdb.udf.api.UDTF;
+import org.apache.iotdb.udf.api.access.Row;
+import org.apache.iotdb.udf.api.collector.PointCollector;
+import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
+import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
+import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
+import org.apache.iotdb.udf.api.type.Type;
+
+public class Adder implements UDTF {
+
+ @Override
+ public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
+ configurations
+ .setOutputDataType(Type.INT64)
+ .setAccessStrategy(new RowByRowAccessStrategy());
+ }
+
+ @Override
+ public void transform(Row row, PointCollector collector) throws Exception {
+ if (row.isNull(0) || row.isNull(1)) {
+ return;
+ }
+ collector.putLong(row.getTime(), row.getLong(0) + row.getLong(1));
+ }
+}
+```
+
+4. **void transform(RowWindow rowWindow, PointCollector collector) throws Exception**
+
+当您在`beforeStart`方法中指定 UDF 读取原始数据的策略为 `SlidingTimeWindowAccessStrategy`或者`SlidingSizeWindowAccessStrategy`时,您就需要实现该方法,在该方法中增加对原始数据处理的逻辑。
+
+该方法每次处理固定行数或者固定时间间隔内的一批数据,我们称包含这一批数据的容器为窗口。原始数据由`RowWindow`读入,由`PointCollector`输出。`RowWindow`能够帮助您访问某一批次的`Row`,它提供了对这一批次的`Row`进行随机访问和迭代访问的接口。您可以选择在一次`transform`方法调用中输出任意数量的数据点,需要注意的是,输出数据点的类型必须与您在`beforeStart`方法中设置的一致,而输出数据点的时间戳必须是严格单调递增的。
+
+下面是一个实现了`void transform(RowWindow rowWindow, PointCollector collector) throws Exception`方法的完整 UDF 示例。它是一个计数器,接收任意列数的时间序列输入,作用是统计并输出指定时间范围内每一个时间窗口中的数据行数。
+
+```java
+import java.io.IOException;
+import org.apache.iotdb.udf.api.UDTF;
+import org.apache.iotdb.udf.api.access.RowWindow;
+import org.apache.iotdb.udf.api.collector.PointCollector;
+import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
+import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
+import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy;
+import org.apache.iotdb.udf.api.type.Type;
+
+public class Counter implements UDTF {
+
+ @Override
+ public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
+ configurations
+ .setOutputDataType(Type.INT32)
+ .setAccessStrategy(new SlidingTimeWindowAccessStrategy(
+ parameters.getLong("time_interval"),
+ parameters.getLong("sliding_step"),
+ parameters.getLong("display_window_begin"),
+ parameters.getLong("display_window_end")));
+ }
+
+ @Override
+ public void transform(RowWindow rowWindow, PointCollector collector) throws Exception {
+ if (rowWindow.windowSize() != 0) {
+ collector.putInt(rowWindow.windowStartTime(), rowWindow.windowSize());
+ }
+ }
+}
+```
+
+5. **void terminate(PointCollector collector) throws Exception**
+
+在一些场景下,UDF 需要遍历完所有的原始数据后才能得到最后的输出结果。`terminate`接口为这类 UDF 提供了支持。
+
+该方法会在所有的`transform`调用执行完成后,在`beforeDestory`方法执行前被调用。您可以选择使用`transform`方法进行单纯的数据处理,最后使用`terminate`将处理结果输出。
+
+结果需要由`PointCollector`输出。您可以选择在一次`terminate`方法调用中输出任意数量的数据点。需要注意的是,输出数据点的类型必须与您在`beforeStart`方法中设置的一致,而输出数据点的时间戳必须是严格单调递增的。
+
+下面是一个实现了`void terminate(PointCollector collector) throws Exception`方法的完整 UDF 示例。它接收一个`INT32`类型的时间序列输入,作用是输出该序列的最大值点。
+
+```java
+import java.io.IOException;
+import org.apache.iotdb.udf.api.UDTF;
+import org.apache.iotdb.udf.api.access.Row;
+import org.apache.iotdb.udf.api.collector.PointCollector;
+import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
+import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
+import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
+import org.apache.iotdb.udf.api.type.Type;
+
+public class Max implements UDTF {
+
+ private Long time;
+ private int value;
+
+ @Override
+ public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
+ configurations
+ .setOutputDataType(TSDataType.INT32)
+ .setAccessStrategy(new RowByRowAccessStrategy());
+ }
+
+ @Override
+ public void transform(Row row, PointCollector collector) {
+ if (row.isNull(0)) {
+ return;
+ }
+ int candidateValue = row.getInt(0);
+ if (time == null || value < candidateValue) {
+ time = row.getTime();
+ value = candidateValue;
+ }
+ }
+
+ @Override
+ public void terminate(PointCollector collector) throws IOException {
+ if (time != null) {
+ collector.putInt(time, value);
+ }
+ }
+}
+```
+
+6. **void beforeDestroy()**
+
+UDTF 的结束方法,您可以在此方法中进行一些资源释放等的操作。
+
+此方法由框架调用。对于一个 UDF 类实例而言,生命周期中会且只会被调用一次,即在处理完最后一条记录之后被调用。
+
+### UDAF(User Defined Aggregation Function)
+
+一个完整的 UDAF 定义涉及到 State 和 UDAF 两个类。
+
+#### State 类
+
+编写一个 State 类需要实现`org.apache.iotdb.udf.api.State`接口,下表是需要实现的方法说明。
+
+#### 接口说明:
+
+| 接口定义 | 描述 | 是否必须 |
+| -------------------------------- | ------------------------------------------------------------ | -------- |
+| void reset() | 将 `State` 对象重置为初始的状态,您需要像编写构造函数一样,在该方法内填入 `State` 类中各个字段的初始值。 | 是 |
+| byte[] serialize() | 将 `State` 序列化为二进制数据。该方法用于 IoTDB 内部的 `State` 对象传递,注意序列化的顺序必须和下面的反序列化方法一致。 | 是 |
+| void deserialize(byte[] bytes) | 将二进制数据反序列化为 `State`。该方法用于 IoTDB 内部的 `State` 对象传递,注意反序列化的顺序必须和上面的序列化方法一致。 | 是 |
+
+#### 接口详细介绍:
+
+1. **void reset()**
+
+该方法的作用是将 `State` 重置为初始的状态,您需要在该方法内填写 `State` 对象中各个字段的初始值。出于优化上的考量,IoTDB 在内部会尽可能地复用 `State`,而不是为每一个组创建一个新的 `State`,这样会引入不必要的开销。当 `State` 更新完一个组中的数据之后,就会调用这个方法重置为初始状态,以此来处理下一个组。
+
+以求平均数(也就是 `avg`)的 `State` 为例,您需要数据的总和 `sum` 与数据的条数 `count`,并在 `reset()` 方法中将二者初始化为 0。
+
+```java
+class AvgState implements State {
+ double sum;
+
+ long count;
+
+ @Override
+ public void reset() {
+ sum = 0;
+ count = 0;
+ }
+
+ // other methods
+}
+```
+
+2. **byte[] serialize()/void deserialize(byte[] bytes)**
+
+该方法的作用是将 State 序列化为二进制数据,和从二进制数据中反序列化出 State。IoTDB 作为分布式数据库,涉及到在不同节点中传递数据,因此您需要编写这两个方法,来实现 State 在不同节点中的传递。注意序列化和反序列的顺序必须一致。
+
+还是以求平均数(也就是求 avg)的 State 为例,您可以通过任意途径将 State 的内容转化为 `byte[]` 数组,以及从 `byte[]` 数组中读取出 State 的内容,下面展示的是用 Java8 引入的 `ByteBuffer` 进行序列化/反序列的代码:
+
+```java
+@Override
+public byte[] serialize() {
+ ByteBuffer buffer = ByteBuffer.allocate(Double.BYTES + Long.BYTES);
+ buffer.putDouble(sum);
+ buffer.putLong(count);
+
+ return buffer.array();
+}
+
+@Override
+public void deserialize(byte[] bytes) {
+ ByteBuffer buffer = ByteBuffer.wrap(bytes);
+ sum = buffer.getDouble();
+ count = buffer.getLong();
+}
+```
+
+#### UDAF 类
+
+编写一个 UDAF 类需要实现`org.apache.iotdb.udf.api.UDAF`接口,下表是需要实现的方法说明。
+
+#### 接口说明:
+
+| 接口定义 | 描述 | 是否必须 |
+| ------------------------------------------------------------ | ------------------------------------------------------------ | -------- |
+| void validate(UDFParameterValidator validator) throws Exception | 在初始化方法`beforeStart`调用前执行,用于检测`UDFParameters`中用户输入的参数是否合法。该方法与 UDTF 的`validate`相同。 | 否 |
+| void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception | 初始化方法,在 UDAF 处理输入数据前,调用用户自定义的初始化行为。与 UDTF 不同的是,这里的 configuration 是 `UDAFConfiguration` 类型。 | 是 |
+| State createState() | 创建`State`对象,一般只需要调用默认构造函数,然后按需修改默认的初始值即可。 | 是 |
+| void addInput(State state, Column[] columns, BitMap bitMap) | 根据传入的数据`Column[]`批量地更新`State`对象,注意最后一列,也就是 `columns[columns.length - 1]` 总是代表时间列。另外`BitMap`表示之前已经被过滤掉的数据,您在编写该方法时需要手动判断对应的数据是否被过滤掉。 | 是 |
+| void combineState(State state, State rhs) | 将`rhs`状态合并至`state`状态中。在分布式场景下,同一组的数据可能分布在不同节点上,IoTDB 会为每个节点上的部分数据生成一个`State`对象,然后调用该方法合并成完整的`State`。 | 是 |
+| void outputFinal(State state, ResultValue resultValue) | 根据`State`中的数据,计算出最终的聚合结果。注意根据聚合的语义,每一组只能输出一个值。 | 是 |
+| void beforeDestroy() | UDAF 的结束方法。此方法由框架调用,并且只会被调用一次,即在处理完最后一条记录之后被调用。 | 否 |
+
+在一个完整的 UDAF 实例生命周期中,各个方法的调用顺序如下:
+
+1. State createState()
+2. void validate(UDFParameterValidator validator) throws Exception
+3. void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception
+4. void addInput(State state, Column[] columns, BitMap bitMap)
+5. void combineState(State state, State rhs)
+6. void outputFinal(State state, ResultValue resultValue)
+7. void beforeDestroy()
+
+和 UDTF 类似,框架每执行一次 UDAF 查询,都会构造一个全新的 UDF 类实例,查询结束时,对应的 UDF 类实例即被销毁,因此不同 UDAF 查询(即使是在同一个 SQL 语句中)UDF 类实例内部的数据都是隔离的。您可以放心地在 UDAF 中维护一些状态数据,无需考虑并发对 UDF 类实例内部状态数据的影响。
+
+#### 接口详细介绍:
+
+1. **void validate(UDFParameterValidator validator) throws Exception**
+
+同 UDTF, `validate`方法能够对用户输入的参数进行验证。
+
+您可以在该方法中限制输入序列的数量和类型,检查用户输入的属性或者进行自定义逻辑的验证。
+
+2. **void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception**
+
+ `beforeStart`方法的作用 UDAF 相同:
+
+ 1. 帮助用户解析 SQL 语句中的 UDF 参数
+ 2. 配置 UDF 运行时必要的信息,即指定 UDF 访问原始数据时采取的策略和输出结果序列的类型
+ 3. 创建资源,比如建立外部链接,打开文件等。
+
+其中,`UDFParameters` 类型的作用可以参照上文。
+
+2.2 **UDTFConfigurations**
+
+和 UDTF 的区别在于,UDAF 使用了 `UDAFConfigurations` 作为 `configuration` 对象的类型。
+
+目前,该类仅支持设置输出数据的类型。
+
+```java
+void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception {
+ // parameters
+ // ...
+
+ // configurations
+ configurations
+ .setOutputDataType(Type.INT32);
+}
+```
+
+`setOutputDataType` 中设定的输出类型和 `ResultValue` 实际能够接收的数据输出类型关系如下:
+
+| `setOutputDataType`中设定的输出类型 | `ResultValue`实际能够接收的输出类型 |
+| :---------------------------------- | :------------------------------------- |
+| INT32 | int |
+| INT64 | long |
+| FLOAT | float |
+| DOUBLE | double |
+| BOOLEAN | boolean |
+| TEXT | org.apache.iotdb.udf.api.type.Binary |
+
+UDAF 输出序列的类型也是运行时决定的。您可以根据输入序列类型动态决定输出序列类型。
+
+示例:
+
+```java
+void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception {
+ // do something
+ // ...
+
+ configurations
+ .setOutputDataType(parameters.getDataType(0));
+}
+```
+
+3. **State createState()**
+
+为 UDAF 创建并初始化 `State`。由于 Java 语言本身的限制,您只能调用 `State` 类的默认构造函数。默认构造函数会为类中所有的字段赋一个默认的初始值,如果该初始值并不符合您的要求,您需要在这个方法内进行手动的初始化。
+
+下面是一个包含手动初始化的例子。假设您要实现一个累乘的聚合函数,`State` 的初始值应该设置为 1,但是默认构造函数会初始化为 0,因此您需要在调用默认构造函数之后,手动对 `State` 进行初始化:
+
+```java
+public State createState() {
+ MultiplyState state = new MultiplyState();
+ state.result = 1;
+ return state;
+}
+```
+
+4. **void addInput(State state, Column[] columns, BitMap bitMap)**
+
+该方法的作用是,通过原始的输入数据来更新 `State` 对象。出于性能上的考量,也是为了和 IoTDB 向量化的查询引擎相对齐,原始的输入数据不再是一个数据点,而是列的数组 `Column[]`。注意最后一列(也就是 `columns[columns.length - 1]` )总是时间列,因此您也可以在 UDAF 中根据时间进行不同的操作。
+
+由于输入参数的类型不是一个数据点,而是多个列,您需要手动对列中的部分数据进行过滤处理,这就是第三个参数 `BitMap` 存在的意义。它用来标识这些列中哪些数据被过滤掉了,您在任何情况下都无需考虑被过滤掉的数据。
+
+下面是一个用于统计数据条数(也就是 count)的 `addInput()` 示例。它展示了您应该如何使用 `BitMap` 来忽视那些已经被过滤掉的数据。注意还是由于 Java 语言本身的限制,您需要在方法的开头将接口中定义的 `State` 类型强制转化为自定义的 `State` 类型,不然后续无法正常使用该 `State` 对象。
+
+```java
+public void addInput(State state, Column[] columns, BitMap bitMap) {
+ CountState countState = (CountState) state;
+
+ int count = columns[0].getPositionCount();
+ for (int i = 0; i < count; i++) {
+ if (bitMap != null && !bitMap.isMarked(i)) {
+ continue;
+ }
+ if (!columns[0].isNull(i)) {
+ countState.count++;
+ }
+ }
+}
+```
+
+5. **void combineState(State state, State rhs)**
+
+该方法的作用是合并两个 `State`,更加准确的说,是用第二个 `State` 对象来更新第一个 `State` 对象。IoTDB 是分布式数据库,同一组的数据可能分布在多个不同的节点上。出于性能考虑,IoTDB 会为每个节点上的部分数据先进行聚合成 `State`,然后再将不同节点上的、属于同一个组的 `State` 进行合并,这就是 `combineState` 的作用。
+
+下面是一个用于求平均数(也就是 avg)的 `combineState()` 示例。和 `addInput` 类似,您都需要在开头对两个 `State` 进行强制类型转换。另外需要注意是用第二个 `State` 的内容来更新第一个 `State` 的值。
+
+```java
+public void combineState(State state, State rhs) {
+ AvgState avgState = (AvgState) state;
+ AvgState avgRhs = (AvgState) rhs;
+
+ avgState.count += avgRhs.count;
+ avgState.sum += avgRhs.sum;
+}
+```
+
+6. **void outputFinal(State state, ResultValue resultValue)**
+
+该方法的作用是从 `State` 中计算出最终的结果。您需要访问 `State` 中的各个字段,求出最终的结果,并将最终的结果设置到 `ResultValue` 对象中。IoTDB 内部会为每个组在最后调用一次这个方法。注意根据聚合的语义,最终的结果只能是一个值。
+
+下面还是一个用于求平均数(也就是 avg)的 `outputFinal` 示例。除了开头的强制类型转换之外,您还将看到 `ResultValue` 对象的具体用法,即通过 `setXXX`(其中 `XXX` 是类型名)来设置最后的结果。
+
+```java
+public void outputFinal(State state, ResultValue resultValue) {
+ AvgState avgState = (AvgState) state;
+
+ if (avgState.count != 0) {
+ resultValue.setDouble(avgState.sum / avgState.count);
+ } else {
+ resultValue.setNull();
+ }
+}
+```
+
+7. **void beforeDestroy()**
+
+UDAF 的结束方法,您可以在此方法中进行一些资源释放等的操作。
+
+此方法由框架调用。对于一个 UDF 类实例而言,生命周期中会且只会被调用一次,即在处理完最后一条记录之后被调用。
+
+### 完整 Maven 项目示例
+
+如果您使用 [Maven](http://search.maven.org/),可以参考我们编写的示例项目**udf-example**。您可以在 [这里](https://github.com/apache/iotdb/tree/master/example/udf) 找到它。
+
+
+## 为iotdb贡献通用的内置UDF函数
+
+该部分主要讲述了外部用户如何将自己编写的 UDF 贡献给 IoTDB 社区。
+
+#### 前提条件
+
+1. UDF 具有通用性。
+
+ 通用性主要指的是:UDF 在某些业务场景下,可以被广泛使用。换言之,就是 UDF 具有复用价值,可被社区内其他用户直接使用。
+
+ 如果不确定自己写的 UDF 是否具有通用性,可以发邮件到 `dev@iotdb.apache.org` 或直接创建 ISSUE 发起讨论。
+
+2. UDF 已经完成测试,且能够正常运行在用户的生产环境中。
+
+#### 贡献清单
+
+1. UDF 的源代码
+2. UDF 的测试用例
+3. UDF 的使用说明
+
+#### 源代码
+
+1. 在`iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/builtin`中创建 UDF 主类和相关的辅助类。
+2. 在`iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/udf/builtin/BuiltinTimeSeriesGeneratingFunction.java`中注册编写的 UDF。
+
+#### 测试用例
+
+至少需要为贡献的 UDF 编写集成测试。
+
+可以在`integration-test/src/test/java/org/apache/iotdb/db/it/udf`中为贡献的 UDF 新增一个测试类进行测试。
+
+#### 使用说明
+
+使用说明需要包含:UDF 的名称、UDF 的作用、执行函数必须的属性参数、函数的适用的场景以及使用示例等。
+
+使用说明需包含中英文两个版本。应分别在 `docs/zh/UserGuide/Operation Manual/DML Data Manipulation Language.md` 和 `docs/UserGuide/Operation Manual/DML Data Manipulation Language.md` 中新增使用说明。
+
+#### 提交 PR
+
+当准备好源代码、测试用例和使用说明后,就可以将 UDF 贡献到 IoTDB 社区了。在 [Github](https://github.com/apache/iotdb) 上面提交 Pull Request (PR) 即可。具体提交方式见:[贡献指南](https://iotdb.apache.org/zh/Community/Development-Guide.html)。
+
+当 PR 评审通过并被合并后, UDF 就已经贡献给 IoTDB 社区了!
\ No newline at end of file
diff --git a/src/zh/UserGuide/latest/User-Manual/Database-Programming.md b/src/zh/UserGuide/latest/User-Manual/Database-Programming.md
index 385319086..86b0c31a8 100644
--- a/src/zh/UserGuide/latest/User-Manual/Database-Programming.md
+++ b/src/zh/UserGuide/latest/User-Manual/Database-Programming.md
@@ -1030,812 +1030,3 @@ SELECT avg(count_s1) from root.sg_count.d;
| :---------------------------------- |----------------------|----------|---------------|
| `continuous_query_submit_thread` | 用于周期性提交连续查询执行任务的线程数 | int32 | 2 |
| `continuous_query_min_every_interval_in_ms` | 系统允许的连续查询最小的周期性时间间隔 | duration | 1000 |
-
-## 用户自定义函数
-
-UDF(User Defined Function)即用户自定义函数。IoTDB 提供多种内建函数来满足您的计算需求,同时您还可以通过创建自定义函数来满足更多的计算需求。
-
-根据此文档,您将会很快学会 UDF 的编写、注册、使用等操作。
-
-### UDF 类型
-
-IoTDB 支持两种类型的 UDF 函数,如下表所示。
-
-| UDF 分类 | 描述 |
-| --------------------------------------------------- | ------------------------------------------------------------ |
-| UDTF(User Defined Timeseries Generating Function) | 自定义时间序列生成函数。该类函数允许接收多条时间序列,最终会输出一条时间序列,生成的时间序列可以有任意多数量的数据点。 |
-| UDAF(User Defined Aggregation Function) | 自定义聚合函数。该类函数接受一条时间序列数据,最终会根据用户指定的 GROUP BY 类型,为每个组生成一个聚合后的数据点。 |
-
-### UDF 依赖
-
-如果您使用 [Maven](http://search.maven.org/) ,可以从 [Maven 库](http://search.maven.org/) 中搜索下面示例中的依赖。请注意选择和目标 IoTDB 服务器版本相同的依赖版本。
-
-``` xml
-
-
-`SlidingTimeWindowAccessStrategy`有多种构造方法,您可以向构造方法提供 3 类参数:
-
-1. 时间轴显示时间窗开始和结束时间
-2. 划分时间轴的时间间隔参数(必须为正数)
-3. 滑动步长(不要求大于等于时间间隔,但是必须为正数)
-
-时间轴显示时间窗开始和结束时间不是必须要提供的。当您不提供这类参数时,时间轴显示时间窗开始时间会被定义为整个查询结果集中最小的时间戳,时间轴显示时间窗结束时间会被定义为整个查询结果集中最大的时间戳。
-
-滑动步长参数也不是必须的。当您不提供滑动步长参数时,滑动步长会被设定为划分时间轴的时间间隔。
-
-3 类参数的关系可见下图。策略的构造方法详见 Javadoc。
-
-
-
-注意,最后的一些时间窗口的实际时间间隔可能小于规定的时间间隔参数。另外,可能存在某些时间窗口内数据行数量为 0 的情况,这种情况框架也会为该窗口调用一次`transform`方法。
-
-如图是`SlidingSizeWindowAccessStrategy`的开窗示意图。
-
-
-`SlidingSizeWindowAccessStrategy`有多种构造方法,您可以向构造方法提供 2 个参数:
-
-1. 窗口大小,即一个数据处理窗口包含的数据行数。注意,最后一些窗口的数据行数可能少于规定的数据行数。
-2. 滑动步长,即下一窗口第一个数据行与当前窗口第一个数据行间的数据行数(不要求大于等于窗口大小,但是必须为正数)
-
-滑动步长参数不是必须的。当您不提供滑动步长参数时,滑动步长会被设定为窗口大小。
-
-如图是`SessionTimeWindowAccessStrategy`的开窗示意图。**时间间隔小于等于给定的最小时间间隔 sessionGap 则分为一组。**
-
-
-`SessionTimeWindowAccessStrategy`有多种构造方法,您可以向构造方法提供 2 类参数:
-
-1. 时间轴显示时间窗开始和结束时间。
-2. 会话窗口之间的最小时间间隔。
-
-如图是`StateWindowAccessStrategy`的开窗示意图。**对于数值型数据,状态差值小于等于给定的阈值 delta 则分为一组。**
-
-
-`StateWindowAccessStrategy`有四种构造方法。
-
-1. 针对数值型数据,可以提供时间轴显示时间窗开始和结束时间以及对于单个窗口内部允许变化的阈值delta。
-2. 针对文本数据以及布尔数据,可以提供时间轴显示时间窗开始和结束时间。对于这两种数据类型,单个窗口内的数据是相同的,不需要提供变化阈值。
-3. 针对数值型数据,可以只提供单个窗口内部允许变化的阈值delta,时间轴显示时间窗开始时间会被定义为整个查询结果集中最小的时间戳,时间轴显示时间窗结束时间会被定义为整个查询结果集中最大的时间戳。
-4. 针对文本数据以及布尔数据,可以不提供任何参数,开始与结束时间戳见3中解释。
-
-StateWindowAccessStrategy 目前只能接收一列输入。策略的构造方法详见 Javadoc。
-
- * setOutputDataType
-
-注意,您在此处设定的输出结果序列的类型,决定了`transform`方法中`PointCollector`实际能够接收的数据类型。`setOutputDataType`中设定的输出类型和`PointCollector`实际能够接收的数据输出类型关系如下:
-
-| `setOutputDataType`中设定的输出类型 | `PointCollector`实际能够接收的输出类型 |
-| :---------------------------------- | :----------------------------------------------------------- |
-| `INT32` | `int` |
-| `INT64` | `long` |
-| `FLOAT` | `float` |
-| `DOUBLE` | `double` |
-| `BOOLEAN` | `boolean` |
-| `TEXT` | `java.lang.String` 和 `org.apache.iotdb.udf.api.type.Binary` |
-
-UDTF 输出序列的类型是运行时决定的。您可以根据输入序列类型动态决定输出序列类型。
-
-下面是一个简单的例子:
-
-```java
-void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) throws Exception {
- // do something
- // ...
-
- configurations
- .setAccessStrategy(new RowByRowAccessStrategy())
- .setOutputDataType(parameters.getDataType(0));
-}
-```
-
-* void transform(Row row, PointCollector collector) throws Exception
-
-当您在`beforeStart`方法中指定 UDF 读取原始数据的策略为 `RowByRowAccessStrategy`,您就需要实现该方法,在该方法中增加对原始数据处理的逻辑。
-
-该方法每次处理原始数据的一行。原始数据由`Row`读入,由`PointCollector`输出。您可以选择在一次`transform`方法调用中输出任意数量的数据点。需要注意的是,输出数据点的类型必须与您在`beforeStart`方法中设置的一致,而输出数据点的时间戳必须是严格单调递增的。
-
-下面是一个实现了`void transform(Row row, PointCollector collector) throws Exception`方法的完整 UDF 示例。它是一个加法器,接收两列时间序列输入,当这两个数据点都不为`null`时,输出这两个数据点的代数和。
-
-``` java
-import org.apache.iotdb.udf.api.UDTF;
-import org.apache.iotdb.udf.api.access.Row;
-import org.apache.iotdb.udf.api.collector.PointCollector;
-import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
-import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
-import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
-import org.apache.iotdb.udf.api.type.Type;
-
-public class Adder implements UDTF {
-
- @Override
- public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
- configurations
- .setOutputDataType(Type.INT64)
- .setAccessStrategy(new RowByRowAccessStrategy());
- }
-
- @Override
- public void transform(Row row, PointCollector collector) throws Exception {
- if (row.isNull(0) || row.isNull(1)) {
- return;
- }
- collector.putLong(row.getTime(), row.getLong(0) + row.getLong(1));
- }
-}
-```
-
- * void transform(RowWindow rowWindow, PointCollector collector) throws Exception
-
-当您在`beforeStart`方法中指定 UDF 读取原始数据的策略为 `SlidingTimeWindowAccessStrategy`或者`SlidingSizeWindowAccessStrategy`时,您就需要实现该方法,在该方法中增加对原始数据处理的逻辑。
-
-该方法每次处理固定行数或者固定时间间隔内的一批数据,我们称包含这一批数据的容器为窗口。原始数据由`RowWindow`读入,由`PointCollector`输出。`RowWindow`能够帮助您访问某一批次的`Row`,它提供了对这一批次的`Row`进行随机访问和迭代访问的接口。您可以选择在一次`transform`方法调用中输出任意数量的数据点,需要注意的是,输出数据点的类型必须与您在`beforeStart`方法中设置的一致,而输出数据点的时间戳必须是严格单调递增的。
-
-下面是一个实现了`void transform(RowWindow rowWindow, PointCollector collector) throws Exception`方法的完整 UDF 示例。它是一个计数器,接收任意列数的时间序列输入,作用是统计并输出指定时间范围内每一个时间窗口中的数据行数。
-
-```java
-import java.io.IOException;
-import org.apache.iotdb.udf.api.UDTF;
-import org.apache.iotdb.udf.api.access.RowWindow;
-import org.apache.iotdb.udf.api.collector.PointCollector;
-import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
-import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
-import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy;
-import org.apache.iotdb.udf.api.type.Type;
-
-public class Counter implements UDTF {
-
- @Override
- public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
- configurations
- .setOutputDataType(Type.INT32)
- .setAccessStrategy(new SlidingTimeWindowAccessStrategy(
- parameters.getLong("time_interval"),
- parameters.getLong("sliding_step"),
- parameters.getLong("display_window_begin"),
- parameters.getLong("display_window_end")));
- }
-
- @Override
- public void transform(RowWindow rowWindow, PointCollector collector) throws Exception {
- if (rowWindow.windowSize() != 0) {
- collector.putInt(rowWindow.windowStartTime(), rowWindow.windowSize());
- }
- }
-}
-```
-
- * void terminate(PointCollector collector) throws Exception
-
-在一些场景下,UDF 需要遍历完所有的原始数据后才能得到最后的输出结果。`terminate`接口为这类 UDF 提供了支持。
-
-该方法会在所有的`transform`调用执行完成后,在`beforeDestory`方法执行前被调用。您可以选择使用`transform`方法进行单纯的数据处理,最后使用`terminate`将处理结果输出。
-
-结果需要由`PointCollector`输出。您可以选择在一次`terminate`方法调用中输出任意数量的数据点。需要注意的是,输出数据点的类型必须与您在`beforeStart`方法中设置的一致,而输出数据点的时间戳必须是严格单调递增的。
-
-下面是一个实现了`void terminate(PointCollector collector) throws Exception`方法的完整 UDF 示例。它接收一个`INT32`类型的时间序列输入,作用是输出该序列的最大值点。
-
-```java
-import java.io.IOException;
-import org.apache.iotdb.udf.api.UDTF;
-import org.apache.iotdb.udf.api.access.Row;
-import org.apache.iotdb.udf.api.collector.PointCollector;
-import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations;
-import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters;
-import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy;
-import org.apache.iotdb.udf.api.type.Type;
-
-public class Max implements UDTF {
-
- private Long time;
- private int value;
-
- @Override
- public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) {
- configurations
- .setOutputDataType(TSDataType.INT32)
- .setAccessStrategy(new RowByRowAccessStrategy());
- }
-
- @Override
- public void transform(Row row, PointCollector collector) {
- if (row.isNull(0)) {
- return;
- }
- int candidateValue = row.getInt(0);
- if (time == null || value < candidateValue) {
- time = row.getTime();
- value = candidateValue;
- }
- }
-
- @Override
- public void terminate(PointCollector collector) throws IOException {
- if (time != null) {
- collector.putInt(time, value);
- }
- }
-}
-```
-
- * void beforeDestroy()
-
-UDTF 的结束方法,您可以在此方法中进行一些资源释放等的操作。
-
-此方法由框架调用。对于一个 UDF 类实例而言,生命周期中会且只会被调用一次,即在处理完最后一条记录之后被调用。
-
-### UDAF(User Defined Aggregation Function)
-
-一个完整的 UDAF 定义涉及到 State 和 UDAF 两个类。
-
-#### State 类
-
-编写一个 State 类需要实现`org.apache.iotdb.udf.api.State`接口,下表是需要实现的方法说明。
-
-| 接口定义 | 描述 | 是否必须 |
-| -------------------------------- | ------------------------------------------------------------ | -------- |
-| `void reset()` | 将 `State` 对象重置为初始的状态,您需要像编写构造函数一样,在该方法内填入 `State` 类中各个字段的初始值。 | 是 |
-| `byte[] serialize()` | 将 `State` 序列化为二进制数据。该方法用于 IoTDB 内部的 `State` 对象传递,注意序列化的顺序必须和下面的反序列化方法一致。 | 是 |
-| `void deserialize(byte[] bytes)` | 将二进制数据反序列化为 `State`。该方法用于 IoTDB 内部的 `State` 对象传递,注意反序列化的顺序必须和上面的序列化方法一致。 | 是 |
-
-下面将详细介绍各个接口的使用方法。
-
-- void reset()
-
-该方法的作用是将 `State` 重置为初始的状态,您需要在该方法内填写 `State` 对象中各个字段的初始值。出于优化上的考量,IoTDB 在内部会尽可能地复用 `State`,而不是为每一个组创建一个新的 `State`,这样会引入不必要的开销。当 `State` 更新完一个组中的数据之后,就会调用这个方法重置为初始状态,以此来处理下一个组。
-
-以求平均数(也就是 `avg`)的 `State` 为例,您需要数据的总和 `sum` 与数据的条数 `count`,并在 `reset()` 方法中将二者初始化为 0。
-
-```java
-class AvgState implements State {
- double sum;
-
- long count;
-
- @Override
- public void reset() {
- sum = 0;
- count = 0;
- }
-
- // other methods
-}
-```
-
-- byte[] serialize()/void deserialize(byte[] bytes)
-
-该方法的作用是将 State 序列化为二进制数据,和从二进制数据中反序列化出 State。IoTDB 作为分布式数据库,涉及到在不同节点中传递数据,因此您需要编写这两个方法,来实现 State 在不同节点中的传递。注意序列化和反序列的顺序必须一致。
-
-还是以求平均数(也就是求 avg)的 State 为例,您可以通过任意途径将 State 的内容转化为 `byte[]` 数组,以及从 `byte[]` 数组中读取出 State 的内容,下面展示的是用 Java8 引入的 `ByteBuffer` 进行序列化/反序列的代码:
-
-```java
-@Override
-public byte[] serialize() {
- ByteBuffer buffer = ByteBuffer.allocate(Double.BYTES + Long.BYTES);
- buffer.putDouble(sum);
- buffer.putLong(count);
-
- return buffer.array();
-}
-
-@Override
-public void deserialize(byte[] bytes) {
- ByteBuffer buffer = ByteBuffer.wrap(bytes);
- sum = buffer.getDouble();
- count = buffer.getLong();
-}
-```
-
-#### UDAF 类
-
-编写一个 UDAF 类需要实现`org.apache.iotdb.udf.api.UDAF`接口,下表是需要实现的方法说明。
-
-| 接口定义 | 描述 | 是否必须 |
-| ------------------------------------------------------------ | ------------------------------------------------------------ | -------- |
-| `void validate(UDFParameterValidator validator) throws Exception` | 在初始化方法`beforeStart`调用前执行,用于检测`UDFParameters`中用户输入的参数是否合法。该方法与 UDTF 的`validate`相同。 | 否 |
-| `void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception` | 初始化方法,在 UDAF 处理输入数据前,调用用户自定义的初始化行为。与 UDTF 不同的是,这里的 configuration 是 `UDAFConfiguration` 类型。 | 是 |
-| `State createState()` | 创建`State`对象,一般只需要调用默认构造函数,然后按需修改默认的初始值即可。 | 是 |
-| `void addInput(State state, Column[] columns, BitMap bitMap)` | 根据传入的数据`Column[]`批量地更新`State`对象,注意最后一列,也就是 `columns[columns.length - 1]` 总是代表时间列。另外`BitMap`表示之前已经被过滤掉的数据,您在编写该方法时需要手动判断对应的数据是否被过滤掉。 | 是 |
-| `void combineState(State state, State rhs)` | 将`rhs`状态合并至`state`状态中。在分布式场景下,同一组的数据可能分布在不同节点上,IoTDB 会为每个节点上的部分数据生成一个`State`对象,然后调用该方法合并成完整的`State`。 | 是 |
-| `void outputFinal(State state, ResultValue resultValue)` | 根据`State`中的数据,计算出最终的聚合结果。注意根据聚合的语义,每一组只能输出一个值。 | 是 |
-| `void beforeDestroy() ` | UDAF 的结束方法。此方法由框架调用,并且只会被调用一次,即在处理完最后一条记录之后被调用。 | 否 |
-
-在一个完整的 UDAF 实例生命周期中,各个方法的调用顺序如下:
-
-1. `State createState()`
-2. `void validate(UDFParameterValidator validator) throws Exception`
-3. `void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception`
-4. `void addInput(State state, Column[] columns, BitMap bitMap)`
-5. `void combineState(State state, State rhs)`
-6. `void outputFinal(State state, ResultValue resultValue)`
-7. `void beforeDestroy()`
-
-和 UDTF 类似,框架每执行一次 UDAF 查询,都会构造一个全新的 UDF 类实例,查询结束时,对应的 UDF 类实例即被销毁,因此不同 UDAF 查询(即使是在同一个 SQL 语句中)UDF 类实例内部的数据都是隔离的。您可以放心地在 UDAF 中维护一些状态数据,无需考虑并发对 UDF 类实例内部状态数据的影响。
-
-下面将详细介绍各个接口的使用方法。
-
- * void validate(UDFParameterValidator validator) throws Exception
-
-同 UDTF, `validate`方法能够对用户输入的参数进行验证。
-
-您可以在该方法中限制输入序列的数量和类型,检查用户输入的属性或者进行自定义逻辑的验证。
-
- * void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception
-
- `beforeStart`方法的作用 UDAF 相同:
-
- 1. 帮助用户解析 SQL 语句中的 UDF 参数
- 2. 配置 UDF 运行时必要的信息,即指定 UDF 访问原始数据时采取的策略和输出结果序列的类型
- 3. 创建资源,比如建立外部链接,打开文件等。
-
-其中,`UDFParameters` 类型的作用可以参照上文。
-
-##### UDAFConfigurations
-
-和 UDTF 的区别在于,UDAF 使用了 `UDAFConfigurations` 作为 `configuration` 对象的类型。
-
-目前,该类仅支持设置输出数据的类型。
-
-```java
-void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception {
- // parameters
- // ...
-
- // configurations
- configurations
- .setOutputDataType(Type.INT32);
-}
-```
-
-`setOutputDataType` 中设定的输出类型和 `ResultValue` 实际能够接收的数据输出类型关系如下:
-
-| `setOutputDataType`中设定的输出类型 | `ResultValue`实际能够接收的输出类型 |
-| :---------------------------------- | :------------------------------------- |
-| `INT32` | `int` |
-| `INT64` | `long` |
-| `FLOAT` | `float` |
-| `DOUBLE` | `double` |
-| `BOOLEAN` | `boolean` |
-| `TEXT` | `org.apache.iotdb.udf.api.type.Binary` |
-
-UDAF 输出序列的类型也是运行时决定的。您可以根据输入序列类型动态决定输出序列类型。
-
-下面是一个简单的例子:
-
-```java
-void beforeStart(UDFParameters parameters, UDAFConfigurations configurations) throws Exception {
- // do something
- // ...
-
- configurations
- .setOutputDataType(parameters.getDataType(0));
-}
-```
-
-- State createState()
-
-为 UDAF 创建并初始化 `State`。由于 Java 语言本身的限制,您只能调用 `State` 类的默认构造函数。默认构造函数会为类中所有的字段赋一个默认的初始值,如果该初始值并不符合您的要求,您需要在这个方法内进行手动的初始化。
-
-下面是一个包含手动初始化的例子。假设您要实现一个累乘的聚合函数,`State` 的初始值应该设置为 1,但是默认构造函数会初始化为 0,因此您需要在调用默认构造函数之后,手动对 `State` 进行初始化:
-
-```java
-public State createState() {
- MultiplyState state = new MultiplyState();
- state.result = 1;
- return state;
-}
-```
-
-- void addInput(State state, Column[] columns, BitMap bitMap)
-
-该方法的作用是,通过原始的输入数据来更新 `State` 对象。出于性能上的考量,也是为了和 IoTDB 向量化的查询引擎相对齐,原始的输入数据不再是一个数据点,而是列的数组 `Column[]`。注意最后一列(也就是 `columns[columns.length - 1]` )总是时间列,因此您也可以在 UDAF 中根据时间进行不同的操作。
-
-由于输入参数的类型不是一个数据点,而是多个列,您需要手动对列中的部分数据进行过滤处理,这就是第三个参数 `BitMap` 存在的意义。它用来标识这些列中哪些数据被过滤掉了,您在任何情况下都无需考虑被过滤掉的数据。
-
-下面是一个用于统计数据条数(也就是 count)的 `addInput()` 示例。它展示了您应该如何使用 `BitMap` 来忽视那些已经被过滤掉的数据。注意还是由于 Java 语言本身的限制,您需要在方法的开头将接口中定义的 `State` 类型强制转化为自定义的 `State` 类型,不然后续无法正常使用该 `State` 对象。
-
-```java
-public void addInput(State state, Column[] columns, BitMap bitMap) {
- CountState countState = (CountState) state;
-
- int count = columns[0].getPositionCount();
- for (int i = 0; i < count; i++) {
- if (bitMap != null && !bitMap.isMarked(i)) {
- continue;
- }
- if (!columns[0].isNull(i)) {
- countState.count++;
- }
- }
-}
-```
-
-- void combineState(State state, State rhs)
-
-该方法的作用是合并两个 `State`,更加准确的说,是用第二个 `State` 对象来更新第一个 `State` 对象。IoTDB 是分布式数据库,同一组的数据可能分布在多个不同的节点上。出于性能考虑,IoTDB 会为每个节点上的部分数据先进行聚合成 `State`,然后再将不同节点上的、属于同一个组的 `State` 进行合并,这就是 `combineState` 的作用。
-
-下面是一个用于求平均数(也就是 avg)的 `combineState()` 示例。和 `addInput` 类似,您都需要在开头对两个 `State` 进行强制类型转换。另外需要注意是用第二个 `State` 的内容来更新第一个 `State` 的值。
-
-```java
-public void combineState(State state, State rhs) {
- AvgState avgState = (AvgState) state;
- AvgState avgRhs = (AvgState) rhs;
-
- avgState.count += avgRhs.count;
- avgState.sum += avgRhs.sum;
-}
-```
-
-- void outputFinal(State state, ResultValue resultValue)
-
-该方法的作用是从 `State` 中计算出最终的结果。您需要访问 `State` 中的各个字段,求出最终的结果,并将最终的结果设置到 `ResultValue` 对象中。IoTDB 内部会为每个组在最后调用一次这个方法。注意根据聚合的语义,最终的结果只能是一个值。
-
-下面还是一个用于求平均数(也就是 avg)的 `outputFinal` 示例。除了开头的强制类型转换之外,您还将看到 `ResultValue` 对象的具体用法,即通过 `setXXX`(其中 `XXX` 是类型名)来设置最后的结果。
-
-```java
-public void outputFinal(State state, ResultValue resultValue) {
- AvgState avgState = (AvgState) state;
-
- if (avgState.count != 0) {
- resultValue.setDouble(avgState.sum / avgState.count);
- } else {
- resultValue.setNull();
- }
-}
-```
-
- * void beforeDestroy()
-
-UDAF 的结束方法,您可以在此方法中进行一些资源释放等的操作。
-
-此方法由框架调用。对于一个 UDF 类实例而言,生命周期中会且只会被调用一次,即在处理完最后一条记录之后被调用。
-
-### 完整 Maven 项目示例
-
-如果您使用 [Maven](http://search.maven.org/),可以参考我们编写的示例项目**udf-example**。您可以在 [这里](https://github.com/apache/iotdb/tree/master/example/udf) 找到它。
-
-### UDF 注册
-
-注册一个 UDF 可以按如下流程进行:
-
-1. 实现一个完整的 UDF 类,假定这个类的全类名为`org.apache.iotdb.udf.UDTFExample`
-2. 将项目打成 JAR 包,如果您使用 Maven 管理项目,可以参考上述 Maven 项目示例的写法
-3. 进行注册前的准备工作,根据注册方式的不同需要做不同的准备,具体可参考以下例子
-4. 使用以下 SQL 语句注册 UDF
-
-```sql
-CREATE FUNCTION | UDF 分类 | +数据访问策略 | +描述 | +
|---|---|---|
| UDTF | +MAPPABLE_ROW_BY_ROW | +自定义标量函数,输入 k 列时间序列 1 行数据,输出 1 列时间序列 1 行数据,可用于标量函数出现的任何子句和表达式中,如select子句、where子句等。 | +
| ROW_BY_ROW SLIDING_TIME_WINDOW SLIDING_SIZE_WINDOW SESSION_TIME_WINDOW STATE_WINDOW |
+ 自定义时间序列生成函数,输入 k 列时间序列 m 行数据,输出 1 列时间序列 n 行数据,输入行数 m 可以与输出行数 n 不相同,只能用于SELECT子句中。 | +|
| UDAF | +- | +自定义聚合函数,输入 k 列时间序列 m 行数据,输出 1 列时间序列 1 行数据,可用于聚合函数出现的任何子句和表达式中,如select子句、having子句等。 | +