From c8ebb21dd95a786e1bec4977ca23fb884842cfdd Mon Sep 17 00:00:00 2001 From: Leto_b Date: Tue, 31 Mar 2026 18:22:46 +0800 Subject: [PATCH] fix version number 2082 --- src/.vuepress/sidebar/V2.0.x/zh-Table.ts | 2 +- .../sidebar_timecho/V2.0.x/zh-Table.ts | 2 +- .../AI-capability/AINode_Upgrade_timecho.md | 24 +++++++++---------- .../Programming-Python-Native-API_timecho.md | 2 +- .../Basic-Concept/Table-Management_timecho.md | 2 +- .../SQL-Metadata-Operations_timecho.md | 2 +- .../Table/User-Manual/Audit-Log_timecho.md | 17 +++++++++++++ .../User-Manual/Tree-to-Table_timecho.md | 6 ++--- .../Programming-Python-Native-API_timecho.md | 2 +- .../Basic-Concept/Operate-Metadata_timecho.md | 6 ++--- .../Tree/SQL-Manual/SQL-Manual_timecho.md | 4 ++-- .../Tree/User-Manual/Audit-Log_timecho.md | 17 +++++++++++++ .../Authority-Management_timecho.md | 2 +- .../Tree/User-Manual/Data-Sync_timecho.md | 24 +++++++++---------- .../AI-capability/AINode_Upgrade_timecho.md | 2 +- .../Programming-Python-Native-API_timecho.md | 2 +- .../Basic-Concept/Table-Management_timecho.md | 2 +- .../SQL-Metadata-Operations_timecho.md | 2 +- .../User-Manual/Audit-Log_timecho.md | 16 +++++++++++++ .../User-Manual/Tree-to-Table_timecho.md | 6 ++--- .../Programming-Python-Native-API_timecho.md | 2 +- .../Basic-Concept/Operate-Metadata_timecho.md | 6 ++--- .../latest/SQL-Manual/SQL-Manual_timecho.md | 4 ++-- .../latest/User-Manual/Audit-Log_timecho.md | 15 ++++++++++++ .../Authority-Management_timecho.md | 2 +- .../latest/User-Manual/Data-Sync_timecho.md | 2 +- .../AI-capability/AINode_Upgrade_timecho.md | 2 +- .../Programming-Python-Native-API_timecho.md | 2 +- .../Basic-Concept/Table-Management_timecho.md | 2 +- .../SQL-Metadata-Operations_timecho.md | 2 +- .../Table/User-Manual/Audit-Log_timecho.md | 17 +++++++++++++ .../User-Manual/Tree-to-Table_timecho.md | 6 ++--- .../Programming-Python-Native-API_timecho.md | 2 +- .../Basic-Concept/Operate-Metadata_timecho.md | 6 ++--- .../Tree/SQL-Manual/SQL-Manual_timecho.md | 4 ++-- .../Tree/User-Manual/Audit-Log_timecho.md | 17 +++++++++++++ .../Authority-Management_timecho.md | 2 +- .../Tree/User-Manual/Data-Sync_timecho.md | 2 +- .../AI-capability/AINode_Upgrade_timecho.md | 2 +- .../Programming-Python-Native-API_timecho.md | 2 +- .../Basic-Concept/Table-Management_timecho.md | 2 +- .../SQL-Metadata-Operations_timecho.md | 2 +- .../User-Manual/Audit-Log_timecho.md | 17 +++++++++++++ .../User-Manual/Tree-to-Table_timecho.md | 6 ++--- .../Programming-Python-Native-API_timecho.md | 2 +- .../Basic-Concept/Operate-Metadata_timecho.md | 6 ++--- .../latest/SQL-Manual/SQL-Manual_timecho.md | 4 ++-- .../latest/User-Manual/Audit-Log_timecho.md | 17 +++++++++++++ .../Authority-Management_timecho.md | 2 +- .../latest/User-Manual/Data-Sync_timecho.md | 2 +- 50 files changed, 217 insertions(+), 84 deletions(-) diff --git a/src/.vuepress/sidebar/V2.0.x/zh-Table.ts b/src/.vuepress/sidebar/V2.0.x/zh-Table.ts index 7253f3e5e..fbfc7862b 100644 --- a/src/.vuepress/sidebar/V2.0.x/zh-Table.ts +++ b/src/.vuepress/sidebar/V2.0.x/zh-Table.ts @@ -172,7 +172,7 @@ export const zhSidebar = { { text: 'C#原生接口', link: 'Programming-CSharp-Native-API_apache' }, { text: 'JDBC', link: 'Programming-JDBC_apache' }, { text: 'MQTT协议', link: 'Programming-MQTT_apache' }, - { text: 'RESTAPI V1 ', link: 'RestServiceV1' }, + { text: 'RESTAPI V1 ', link: 'RestServiceV1_apache' }, ], }, { diff --git a/src/.vuepress/sidebar_timecho/V2.0.x/zh-Table.ts b/src/.vuepress/sidebar_timecho/V2.0.x/zh-Table.ts index 9b1e85358..798b701ce 100644 --- a/src/.vuepress/sidebar_timecho/V2.0.x/zh-Table.ts +++ b/src/.vuepress/sidebar_timecho/V2.0.x/zh-Table.ts @@ -195,7 +195,7 @@ export const zhSidebar = { { text: 'JDBC', link: 'Programming-JDBC_timecho' }, { text: 'ODBC', link: 'Programming-ODBC_timecho' }, { text: 'MQTT协议', link: 'Programming-MQTT_timecho' }, - { text: 'RESTAPI V1 ', link: 'RestServiceV1' }, + { text: 'RESTAPI V1 ', link: 'RestServiceV1_timecho' }, ], }, { diff --git a/src/UserGuide/Master/Table/AI-capability/AINode_Upgrade_timecho.md b/src/UserGuide/Master/Table/AI-capability/AINode_Upgrade_timecho.md index 7e393ff10..c8e52b386 100644 --- a/src/UserGuide/Master/Table/AI-capability/AINode_Upgrade_timecho.md +++ b/src/UserGuide/Master/Table/AI-capability/AINode_Upgrade_timecho.md @@ -92,19 +92,19 @@ SELECT * FROM FORECAST( * Built-in model inference does not require a registration process. By using the forecast function and specifying model_id, you can use the inference function of the model. * Parameter description -| Parameter Name | Parameter Type | Parameter Attributes | Description | Required | Notes | -|----------------|----------------|----------------------|-------------|----------|-------| -| model_id | Scalar parameter | String type | Unique identifier of the prediction model | Yes | | -| targets | Table parameter | SET SEMANTIC | Input data for the target variables to be predicted. IoTDB will automatically sort the data in ascending order of time before passing it to AINode. | Yes | Use SQL to describe the input data with target variables. If the input SQL is invalid, corresponding query errors will be reported. | +| Parameter Name | Parameter Type | Parameter Attributes | Description | Required | Notes | +|----------------|----------------|----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------| +| model_id | Scalar parameter | String type | Unique identifier of the prediction model | Yes | | +| targets | Table parameter | SET SEMANTIC | Input data for the target variables to be predicted. IoTDB will automatically sort the data in ascending order of time before passing it to AINode. | Yes | Use SQL to describe the input data with target variables. If the input SQL is invalid, corresponding query errors will be reported. | | history_covs | Scalar parameter | String type (valid table model query SQL), default: none | Specifies historical data of covariates for this prediction task, which are used to assist in predicting target variables. AINode will not output prediction results for historical covariates. Before passing data to the model, AINode will automatically sort the data in ascending order of time. | No | 1. Query results can only contain FIELD columns; 2. Other: Different models may have specific requirements, and errors will be thrown if not met. | -| future_covs | Scalar parameter | String type (valid table model query SQL), default: none | Specifies future data of some covariates for this prediction task, which are used to assist in predicting target variables. Before passing data to the model, AINode will automatically sort the data in ascending order of time. | No | 1. Can only be specified when history_covs is set; 2. The covariate names involved must be a subset of history_covs; 3. Query results can only contain FIELD columns; 4. Other: Different models may have specific requirements, and errors will be thrown if not met. | -| auto_adapt | Scalar parameter | Boolean type, default value: true | Whether to enable adaptive processing for covariate inference. | No | When adaptive mode is enabled: 1. If the set of future covariates (`future_covs`) is not a subset of the historical covariates (`history_covs`), any future covariates not present in the historical set will be automatically discarded. 2. If the length of any historical covariate does not match the length of the input target variable: a. If shorter, pad zeros at the beginning; b. If longer, discard the earliest data points. 3. If the length of any future covariate does not match the prediction length (`output_length`): a. If shorter, pad zeros at the end; b. If longer, discard the most recent data points. | -| output_start_time | Scalar parameter | Timestamp type. Default value: last timestamp of target variable + output_interval | Starting timestamp of output prediction points [i.e., forecast start time] | No | Must be greater than the maximum timestamp of target variable timestamps | -| output_length | Scalar parameter | INT32 type. Default value: 96 | Output window size | No | Must be greater than 0 | -| output_interval | Scalar parameter | Time interval type. Default value: (last timestamp - first timestamp of input data) / n - 1 | Time interval between output prediction points. Supported units: ns, us, ms, s, m, h, d, w | No | Must be greater than 0 | -| timecol | Scalar parameter | String type. Default value: time | Name of time column | No | Must be a TIMESTAMP column existing in targets | -| preserve_input | Scalar parameter | Boolean type. Default value: false | Whether to retain all original rows of target variable input in the output result set | No | | -| model_options | Scalar parameter | String type. Default value: empty string | Key-value pairs related to the model, such as whether to normalize the input. Different key-value pairs are separated by ';'. | No | | +| future_covs | Scalar parameter | String type (valid table model query SQL), default: none | Specifies future data of some covariates for this prediction task, which are used to assist in predicting target variables. Before passing data to the model, AINode will automatically sort the data in ascending order of time. | No | 1. Can only be specified when history_covs is set; 2. The covariate names involved must be a subset of history_covs; 3. Query results can only contain FIELD columns; 4. Other: Different models may have specific requirements, and errors will be thrown if not met. | +| auto_adapt | Scalar parameter | Boolean type, default value: true | Whether to enable adaptive processing for covariate inference.(Support from V2.0.8.2) | No | When adaptive mode is enabled: 1. If the set of future covariates (`future_covs`) is not a subset of the historical covariates (`history_covs`), any future covariates not present in the historical set will be automatically discarded. 2. If the length of any historical covariate does not match the length of the input target variable: a. If shorter, pad zeros at the beginning; b. If longer, discard the earliest data points. 3. If the length of any future covariate does not match the prediction length (`output_length`): a. If shorter, pad zeros at the end; b. If longer, discard the most recent data points. | +| output_start_time | Scalar parameter | Timestamp type. Default value: last timestamp of target variable + output_interval | Starting timestamp of output prediction points [i.e., forecast start time] | No | Must be greater than the maximum timestamp of target variable timestamps | +| output_length | Scalar parameter | INT32 type. Default value: 96 | Output window size | No | Must be greater than 0 | +| output_interval | Scalar parameter | Time interval type. Default value: (last timestamp - first timestamp of input data) / n - 1 | Time interval between output prediction points. Supported units: ns, us, ms, s, m, h, d, w | No | Must be greater than 0 | +| timecol | Scalar parameter | String type. Default value: time | Name of time column | No | Must be a TIMESTAMP column existing in targets | +| preserve_input | Scalar parameter | Boolean type. Default value: false | Whether to retain all original rows of target variable input in the output result set | No | | +| model_options | Scalar parameter | String type. Default value: empty string | Key-value pairs related to the model, such as whether to normalize the input. Different key-value pairs are separated by ';'. | No | | Notes: * **Default behavior**: Predict all columns of targets. Currently, only supports INT32, INT64, FLOAT, DOUBLE types. diff --git a/src/UserGuide/Master/Table/API/Programming-Python-Native-API_timecho.md b/src/UserGuide/Master/Table/API/Programming-Python-Native-API_timecho.md index 23d3b2f60..391382e4f 100644 --- a/src/UserGuide/Master/Table/API/Programming-Python-Native-API_timecho.md +++ b/src/UserGuide/Master/Table/API/Programming-Python-Native-API_timecho.md @@ -46,7 +46,7 @@ Note: Do not use a newer client to connect to an older server, as this may cause | execute_query_statement | Executes a query SQL statement and retrieves results. | sql: `str` | `SessionDataSet` | | close | Closes the session and releases resources. | None | None | -**Since V2.0.8**, `SessionDataSet` provides methods for batch DataFrame retrieval to efficiently handle large-volume queries: +**Since V2.0.8.2**, `SessionDataSet` provides methods for batch DataFrame retrieval to efficiently handle large-volume queries: ```python # Batch DataFrame retrieval diff --git a/src/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md b/src/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md index 442ef32dc..0e6349ad6 100644 --- a/src/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md +++ b/src/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md @@ -67,7 +67,7 @@ comment **Note:** 1. When creating a table, you do not need to specify a time column. IoTDB automatically adds a column named "time" and places it as the first column. All other columns can be added by enabling the `enable_auto_create_schema` option in the database configuration, or through the session interface for automatic creation or by using table modification statements. -2. Since version V2.0.8, tables support custom naming of the time column during creation. The order of the custom time column in the table is determined by the order in the creation SQL. The related constraints are as follows: +2. Since version V2.0.8.2, tables support custom naming of the time column during creation. The order of the custom time column in the table is determined by the order in the creation SQL. The related constraints are as follows: - When the column category is set to TIME, the data type must be TIMESTAMP. - Each table allows at most one time column (columnCategory = TIME). diff --git a/src/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md b/src/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md index 2c609299c..3847f6b4c 100644 --- a/src/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md +++ b/src/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md @@ -212,7 +212,7 @@ CREATE TABLE tableC ( "Temperature" int32 FIELD COMMENT 'temperature' ) with (TTL=DEFAULT); - -- Custom time column: named time_test, located in the second column of the table. (Support from V2.0.8) + -- Custom time column: named time_test, located in the second column of the table. (Support from V2.0.8.2) CREATE TABLE table1 ( region STRING TAG, time_user_defined TIMESTAMP TIME, diff --git a/src/UserGuide/Master/Table/User-Manual/Audit-Log_timecho.md b/src/UserGuide/Master/Table/User-Manual/Audit-Log_timecho.md index 791e23770..b8f079768 100644 --- a/src/UserGuide/Master/Table/User-Manual/Audit-Log_timecho.md +++ b/src/UserGuide/Master/Table/User-Manual/Audit-Log_timecho.md @@ -38,6 +38,23 @@ Audit logs serve as the record credentials of a database, enabling tracking of v Edit the `iotdb-system.properties` file to enable audit logging using the following parameters: + +* V2.0.8.1 + +| Parameter Name | Description | Data Type | Default Value | Activation Method | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------|-----------|-------------------------------|-------------------| +| `enable_audit_log` | Whether to enable audit logging. true: enabled. false: disabled. | Boolean | false | Hot Reload | +| `auditable_operation_type` | Operation type selection. DML: all DML operations are logged; DDL: all DDL operations are logged; QUERY: all query operations are logged; CONTROL: all control statements are logged. | String | DML,DDL,QUERY,CONTROL | Hot Reload | +| `auditable_operation_level` | Permission level selection. global: log all audit events; object: only log events related to data instances. Containment relationship: object < global. For example: when set to global, all audit logs are recorded normally; when set to object, only operations on specific data instances are recorded. | String | global | Hot Reload | +| `auditable_operation_result` | Audit result selection. success: log only successful events; fail: log only failed events | String | success,fail | Hot Reload | +| `audit_log_ttl_in_days` | Audit log TTL (Time To Live). Logs older than this threshold will expire. | Double | -1.0 (never deleted) | Hot Reload | +| `audit_log_space_tl_in_GB` | Audit log SpaceTL. Logs will start rotating when total space reaches this threshold. | Double | 1.0 | Hot Reload | +| `audit_log_batch_interval_in_ms` | Batch write interval for audit logs | Long | 1000 | Hot Reload | +| `audit_log_batch_max_queue_bytes` | Maximum byte size of the queue for batch processing audit logs. Subsequent write operations will be blocked when this threshold is exceeded. | Long | 268435456 | Hot Reload | + +* V2.0.8.2 + + | Parameter Name | Description | Data Type | Default Value | Activation Method | |-------------------------------------------|------------------------------------------------------------------------------------------------------------|-----------|-------------------------------|-------------------| | `enable_audit_log` | Whether to enable audit logging. true: enabled. false: disabled. | Boolean | false | Hot Reload | diff --git a/src/UserGuide/Master/Table/User-Manual/Tree-to-Table_timecho.md b/src/UserGuide/Master/Table/User-Manual/Tree-to-Table_timecho.md index d2fbe530a..729c7ada4 100644 --- a/src/UserGuide/Master/Table/User-Manual/Tree-to-Table_timecho.md +++ b/src/UserGuide/Master/Table/User-Manual/Tree-to-Table_timecho.md @@ -80,7 +80,7 @@ The name of the view, which follows the same rules as a table name (for specific * If a device in the tree model does not contain certain declared FIELD columns, or if their data types are inconsistent with the declared FIELD columns, the value for that FIELD column will always be `NULL`when querying that device. * If no FIELD columns are specified, the system automatically scans for all measurements under the `prefixPath`subtree (including all ordinary sequence measurements and measurements defined in any templates whose mounted paths overlap with the `prefixPath`) during creation. The column names will use the measurement names from the tree model. * The tree model cannot have measurements with the same name (case-insensitive) but different data types. -* `TIME`: When creating a view, you do not need to specify a time column. IoTDB automatically adds a column named "time" and places it as the first column. Since version V2.0.8-beta, views support **custom naming of the time column** during creation. The order of the custom time column in the view is determined by the order in the creation SQL. The related constraints are as follows: +* `TIME`: When creating a view, you do not need to specify a time column. IoTDB automatically adds a column named "time" and places it as the first column. Since version V2.0.8.2, views support **custom naming of the time column** during creation. The order of the custom time column in the view is determined by the order in the creation SQL. The related constraints are as follows: * When the column category is set to `TIME`, the data type must be `TIMESTAMP`. * Each view allows at most one time column (columnCategory = TIME). * If no time column is explicitly defined, no other column can use `time` as its name to avoid conflicts with the system's default time column naming. @@ -143,7 +143,7 @@ with (ttl=604800000) AS root.db.** ``` -When customizing the time column (supported since V2.0.8), the SQL changes are as follows: +When customizing the time column (supported since V2.0.8.2), the SQL changes are as follows: ```SQL CREATE OR REPLACE VIEW viewdb."wind_turbine" @@ -161,7 +161,7 @@ AS root.db.** ### 2.2 Modifying a Table View #### 2.2.1 Syntax Definition -The ALTER VIEW function supports modifying the view name, adding columns, renaming columns, modifying FIELD column data type (supported since V2.0.8), deleting columns, setting the view's TTL property, and adding comments via COMMENT. +The ALTER VIEW function supports modifying the view name, adding columns, renaming columns, modifying FIELD column data type (supported since V2.0.8.2), deleting columns, setting the view's TTL property, and adding comments via COMMENT. ```SQL -- Rename view diff --git a/src/UserGuide/Master/Tree/API/Programming-Python-Native-API_timecho.md b/src/UserGuide/Master/Tree/API/Programming-Python-Native-API_timecho.md index e7cbea097..9ece00588 100644 --- a/src/UserGuide/Master/Tree/API/Programming-Python-Native-API_timecho.md +++ b/src/UserGuide/Master/Tree/API/Programming-Python-Native-API_timecho.md @@ -558,7 +558,7 @@ df = ... ``` -**Since V2.0.8**, `SessionDataSet` provides methods for batch DataFrame retrieval to efficiently handle large-volume queries: +**Since V2.0.8.2**, `SessionDataSet` provides methods for batch DataFrame retrieval to efficiently handle large-volume queries: ```python # Batch DataFrame retrieval diff --git a/src/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md b/src/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md index 2784a1265..87397ab9b 100644 --- a/src/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md +++ b/src/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md @@ -424,7 +424,7 @@ It is also supported to set an alias, tag, and attribute for aligned timeseries. ### 2.3 Modifying Timseries Data Types -Starting from version V2.0.8, modifying the data type of a timeseries via SQL statements is supported. +Starting from version V2.0.8.2, modifying the data type of a timeseries via SQL statements is supported. Syntax definition: @@ -459,7 +459,7 @@ ALTER TIMESERIES root.ln.wf01.wt01.temperature set data type DOUBLE ### 2.4 Modifying Timeseries Name -Since version V2.0.8, it has been supported to modify the full path name of a timeseries through SQL statements. After a successful modification, the original name becomes invalid but is still retained in the metadata storage. +Since version V2.0.8.2, it has been supported to modify the full path name of a timeseries through SQL statements. After a successful modification, the original name becomes invalid but is still retained in the metadata storage. Syntax definition: @@ -640,7 +640,7 @@ It is worth noting that when the queried path does not exist, the system will re - SHOW INVALID TIMESERIES - Since version V2.0.8, this SQL statement is supported to display the invalidated timeseries after a successful full path name modification. + Since version V2.0.8.2, this SQL statement is supported to display the invalidated timeseries after a successful full path name modification. ```sql IoTDB> show invalid timeSeries diff --git a/src/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md b/src/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md index 3f9208ed0..0844f03d1 100644 --- a/src/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md +++ b/src/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md @@ -143,14 +143,14 @@ CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT , longitude FLOAT); ``` ### 2.3 Modify Timeseries Data Type -> Supported since V2.0.8 +> Supported since V2.0.8.2 ```SQL ALTER TIMESERIES root.ln.wf01.wt01.temperature set data type DOUBLE ``` ### 2.4 Modify Timeseries Name -> This statement is supported from V2.0.8 onwards +> This statement is supported from V2.0.8.2 onwards ```sql ALTER TIMESERIES root.ln.wf01.wt01.temperature RENAME TO root.newln.newwf.newwt.temperature diff --git a/src/UserGuide/Master/Tree/User-Manual/Audit-Log_timecho.md b/src/UserGuide/Master/Tree/User-Manual/Audit-Log_timecho.md index 21c45fe37..3004fd1e6 100644 --- a/src/UserGuide/Master/Tree/User-Manual/Audit-Log_timecho.md +++ b/src/UserGuide/Master/Tree/User-Manual/Audit-Log_timecho.md @@ -38,6 +38,23 @@ Audit logs serve as the record credentials of a database, enabling tracking of v Edit the `iotdb-system.properties` file to enable audit logging using the following parameters: + +* V2.0.8.1 + +| Parameter Name | Description | Data Type | Default Value | Activation Method | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------|-----------|-------------------------------|-------------------| +| `enable_audit_log` | Whether to enable audit logging. true: enabled. false: disabled. | Boolean | false | Hot Reload | +| `auditable_operation_type` | Operation type selection. DML: all DML operations are logged; DDL: all DDL operations are logged; QUERY: all query operations are logged; CONTROL: all control statements are logged. | String | DML,DDL,QUERY,CONTROL | Hot Reload | +| `auditable_operation_level` | Permission level selection. global: log all audit events; object: only log events related to data instances. Containment relationship: object < global. For example: when set to global, all audit logs are recorded normally; when set to object, only operations on specific data instances are recorded. | String | global | Hot Reload | +| `auditable_operation_result` | Audit result selection. success: log only successful events; fail: log only failed events | String | success,fail | Hot Reload | +| `audit_log_ttl_in_days` | Audit log TTL (Time To Live). Logs older than this threshold will expire. | Double | -1.0 (never deleted) | Hot Reload | +| `audit_log_space_tl_in_GB` | Audit log SpaceTL. Logs will start rotating when total space reaches this threshold. | Double | 1.0 | Hot Reload | +| `audit_log_batch_interval_in_ms` | Batch write interval for audit logs | Long | 1000 | Hot Reload | +| `audit_log_batch_max_queue_bytes` | Maximum byte size of the queue for batch processing audit logs. Subsequent write operations will be blocked when this threshold is exceeded. | Long | 268435456 | Hot Reload | + +* V2.0.8.2 + + | Parameter Name | Description | Data Type | Default Value | Activation Method | |-------------------------------------------|------------------------------------------------------------------------------------------------------------|-----------|-------------------------------|-------------------| | `enable_audit_log` | Whether to enable audit logging. true: enabled. false: disabled. | Boolean | false | Hot Reload | diff --git a/src/UserGuide/Master/Tree/User-Manual/Authority-Management_timecho.md b/src/UserGuide/Master/Tree/User-Manual/Authority-Management_timecho.md index 4a6806b5e..54bee9150 100644 --- a/src/UserGuide/Master/Tree/User-Manual/Authority-Management_timecho.md +++ b/src/UserGuide/Master/Tree/User-Manual/Authority-Management_timecho.md @@ -84,7 +84,7 @@ The table below describes the types and scope of these permissions: | READ_DATA | Allows reading time series data under the authorized path. | | WRITE_DATA | Allows reading time series data under the authorized path.
Allows inserting and deleting time series data under the authorized path.
Allows importing and loading data under the authorized path. When importing data, you need the WRITE_DATA permission for the corresponding path. When automatically creating databases or time series, you need MANAGE_DATABASE and WRITE_SCHEMA permissions. | | READ_SCHEMA | Allows obtaining detailed information about the metadata tree under the authorized path,
including databases, child paths, child nodes, devices, time series, templates, views, etc. | -| WRITE_SCHEMA | Allows obtaining detailed information about the metadata tree under the authorized path.
Allows creating, deleting, and modifying time series, templates, views, etc. under the authorized path. When creating or modifying views, it checks the WRITE_SCHEMA permission for the view path and READ_SCHEMA permission for the data source. When querying and inserting data into views, it checks the READ_DATA and WRITE_DATA permissions for the view path.
Allows setting, unsetting, and viewing TTL under the authorized path.
Allows attaching or detaching templates under the authorized path.
Allowed to modify the full path name of a timeseries under an authorized path. -- Supported from V2.0.8 onwards | +| WRITE_SCHEMA | Allows obtaining detailed information about the metadata tree under the authorized path.
Allows creating, deleting, and modifying time series, templates, views, etc. under the authorized path. When creating or modifying views, it checks the WRITE_SCHEMA permission for the view path and READ_SCHEMA permission for the data source. When querying and inserting data into views, it checks the READ_DATA and WRITE_DATA permissions for the view path.
Allows setting, unsetting, and viewing TTL under the authorized path.
Allows attaching or detaching templates under the authorized path.
Allowed to modify the full path name of a timeseries under an authorized path. -- Supported from V2.0.8.2 onwards | ### 3.2 Global Permissions diff --git a/src/UserGuide/Master/Tree/User-Manual/Data-Sync_timecho.md b/src/UserGuide/Master/Tree/User-Manual/Data-Sync_timecho.md index cc9806073..9ee157f84 100644 --- a/src/UserGuide/Master/Tree/User-Manual/Data-Sync_timecho.md +++ b/src/UserGuide/Master/Tree/User-Manual/Data-Sync_timecho.md @@ -602,19 +602,19 @@ pipe_all_sinks_rate_limit_bytes_per_second=-1 ### 5.1 source parameter(V1.3.3) -| key | value | value range | required or not | default value | -|:-------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------|:-----------------|:--------------| -| source | iotdb-source | String: iotdb-source | Required | - | -| inclusion | Used to specify the range of data to be synchronized in the data synchronization task, including data, schema, and auth | String:all, data(insert,delete), schema(database,timeseries,ttl), auth | Optional | data.insert | -| inclusion.exclusion | Used to exclude specific operations from the range specified by inclusion, reducing the amount of data synchronized | String:all, data(insert,delete), schema(database,timeseries,ttl), auth | Optional | - | -| mode.streaming | Specifies the capture source for time-series data writes. Applicable when mode.streamingis false, determining the source for capturing data.insertspecified in inclusion. Offers two strategies:- true: ​​Dynamic capture selection.​​ The system adaptively chooses between capturing individual write requests or only TsFile sealing requests based on downstream processing speed. Prioritizes capturing write requests for lower latency when processing is fast; captures only file sealing requests to avoid backlog when slow. Suitable for most scenarios, balancing latency and throughput optimally.- false: ​​Fixed batch capture.​​ Captures only TsFile sealing requests. Suitable for resource-constrained scenarios to reduce system load. Note: The snapshot data captured upon pipe startup is only provided to downstream processing in file format. | Boolean: true / false |Optional | true | -| mode.strict | Determines the strictness when filtering data using time/ path/ database-name/ table-nameparameters:- true: ​​Strict filtering.​​ The system strictly filters captured data according to the given conditions, ensuring only matching data is selected.- false: ​​Non-strict filtering.​​ The system may include some extra data during filtering. Suitable for performance-sensitive scenarios to reduce CPU and I/O consumption. | Boolean: true / false | Optional | true | -| mode.snapshot | Determines the capture mode for time-series data, affecting the dataspecified in inclusion. Offers two modes:- true: ​​Static data capture.​​ Upon pipe startup, a one-time data snapshot is captured. ​​The pipe will automatically terminate (DROP PIPE SQL is executed automatically) after the snapshot data is fully consumed.​​- false: ​​Dynamic data capture.​​ In addition to capturing a snapshot upon startup, the pipe continuously captures subsequent data changes. The pipe runs continuously to handle the dynamic data stream. | Boolean: true / false | Optional | false | +| key | value | value range | required or not | default value | +|:-------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------|:-----------------|:--------------| +| source | iotdb-source | String: iotdb-source | Required | - | +| inclusion | Used to specify the range of data to be synchronized in the data synchronization task, including data, schema, and auth | String:all, data(insert,delete), schema(database,timeseries,ttl), auth | Optional | data.insert | +| inclusion.exclusion | Used to exclude specific operations from the range specified by inclusion, reducing the amount of data synchronized | String:all, data(insert,delete), schema(database,timeseries,ttl), auth | Optional | - | +| mode.streaming | Specifies the capture source for time-series data writes. Applicable when mode.streamingis false, determining the source for capturing data.insertspecified in inclusion. Offers two strategies:- true: ​​Dynamic capture selection.​​ The system adaptively chooses between capturing individual write requests or only TsFile sealing requests based on downstream processing speed. Prioritizes capturing write requests for lower latency when processing is fast; captures only file sealing requests to avoid backlog when slow. Suitable for most scenarios, balancing latency and throughput optimally.- false: ​​Fixed batch capture.​​ Captures only TsFile sealing requests. Suitable for resource-constrained scenarios to reduce system load. Note: The snapshot data captured upon pipe startup is only provided to downstream processing in file format. | Boolean: true / false |Optional | true | +| mode.strict | Determines the strictness when filtering data using time/ path/ database-name/ table-nameparameters:- true: ​​Strict filtering.​​ The system strictly filters captured data according to the given conditions, ensuring only matching data is selected.- false: ​​Non-strict filtering.​​ The system may include some extra data during filtering. Suitable for performance-sensitive scenarios to reduce CPU and I/O consumption. | Boolean: true / false | Optional | true | +| mode.snapshot | Determines the capture mode for time-series data, affecting the dataspecified in inclusion. Offers two modes:- true: ​​Static data capture.​​ Upon pipe startup, a one-time data snapshot is captured. ​​The pipe will automatically terminate (DROP PIPE SQL is executed automatically) after the snapshot data is fully consumed.​​- false: ​​Dynamic data capture.​​ In addition to capturing a snapshot upon startup, the pipe continuously captures subsequent data changes. The pipe runs continuously to handle the dynamic data stream. | Boolean: true / false | Optional | false | | path | Can be specified when the user connects with sql_dialectset to tree. For upgraded user pipes, the default sql_dialectis tree. This parameter determines the capture scope for time-series data, affecting the dataspecified in inclusion, as well as some sequence-related metadata. Data is selected into the streaming pipe if its tree model path matches the specified path.
Starting from version V2.0.8.2, this parameter supports specifying multiple exact paths in a single pipe, e.g., `'path'='root.test.d0.s1,root.test.d0.s2,root.test.d0.s3'`. | String: IoTDB-standard tree path pattern, wildcards allowed | Optional | root.** | -| start-time | The start event time for synchronizing all data, including start-time | Long: [Long.MIN_VALUE, Long.MAX_VALUE] | Optional | Long.MIN_VALUE | -| end-time | The end event time for synchronizing all data, including end-time | Long: [Long.MIN_VALUE, Long.MAX_VALUE] | Optional | Long.MAX_VALUE | -| forwarding-pipe-requests | Whether to forward data written by other Pipes (usually data synchronization) | Boolean: true / false | Optional | true | -| mods | Same as mods.enable, whether to send the MODS file for TSFile. | Boolean: true / false | Optional | false | +| start-time | The start event time for synchronizing all data, including start-time | Long: [Long.MIN_VALUE, Long.MAX_VALUE] | Optional | Long.MIN_VALUE | +| end-time | The end event time for synchronizing all data, including end-time | Long: [Long.MIN_VALUE, Long.MAX_VALUE] | Optional | Long.MAX_VALUE | +| forwarding-pipe-requests | Whether to forward data written by other Pipes (usually data synchronization) | Boolean: true / false | Optional | true | +| mods | Same as mods.enable, whether to send the MODS file for TSFile. | Boolean: true / false | Optional | false | > 💎 **Note:** The difference between the values of true and false for the data extraction mode `mode.streaming` > diff --git a/src/UserGuide/latest-Table/AI-capability/AINode_Upgrade_timecho.md b/src/UserGuide/latest-Table/AI-capability/AINode_Upgrade_timecho.md index 7e393ff10..b8fd7065a 100644 --- a/src/UserGuide/latest-Table/AI-capability/AINode_Upgrade_timecho.md +++ b/src/UserGuide/latest-Table/AI-capability/AINode_Upgrade_timecho.md @@ -98,7 +98,7 @@ SELECT * FROM FORECAST( | targets | Table parameter | SET SEMANTIC | Input data for the target variables to be predicted. IoTDB will automatically sort the data in ascending order of time before passing it to AINode. | Yes | Use SQL to describe the input data with target variables. If the input SQL is invalid, corresponding query errors will be reported. | | history_covs | Scalar parameter | String type (valid table model query SQL), default: none | Specifies historical data of covariates for this prediction task, which are used to assist in predicting target variables. AINode will not output prediction results for historical covariates. Before passing data to the model, AINode will automatically sort the data in ascending order of time. | No | 1. Query results can only contain FIELD columns; 2. Other: Different models may have specific requirements, and errors will be thrown if not met. | | future_covs | Scalar parameter | String type (valid table model query SQL), default: none | Specifies future data of some covariates for this prediction task, which are used to assist in predicting target variables. Before passing data to the model, AINode will automatically sort the data in ascending order of time. | No | 1. Can only be specified when history_covs is set; 2. The covariate names involved must be a subset of history_covs; 3. Query results can only contain FIELD columns; 4. Other: Different models may have specific requirements, and errors will be thrown if not met. | -| auto_adapt | Scalar parameter | Boolean type, default value: true | Whether to enable adaptive processing for covariate inference. | No | When adaptive mode is enabled: 1. If the set of future covariates (`future_covs`) is not a subset of the historical covariates (`history_covs`), any future covariates not present in the historical set will be automatically discarded. 2. If the length of any historical covariate does not match the length of the input target variable: a. If shorter, pad zeros at the beginning; b. If longer, discard the earliest data points. 3. If the length of any future covariate does not match the prediction length (`output_length`): a. If shorter, pad zeros at the end; b. If longer, discard the most recent data points. | +| auto_adapt | Scalar parameter | Boolean type, default value: true | Whether to enable adaptive processing for covariate inference.(Support from V2.0.8.2) | No | When adaptive mode is enabled: 1. If the set of future covariates (`future_covs`) is not a subset of the historical covariates (`history_covs`), any future covariates not present in the historical set will be automatically discarded. 2. If the length of any historical covariate does not match the length of the input target variable: a. If shorter, pad zeros at the beginning; b. If longer, discard the earliest data points. 3. If the length of any future covariate does not match the prediction length (`output_length`): a. If shorter, pad zeros at the end; b. If longer, discard the most recent data points. | | output_start_time | Scalar parameter | Timestamp type. Default value: last timestamp of target variable + output_interval | Starting timestamp of output prediction points [i.e., forecast start time] | No | Must be greater than the maximum timestamp of target variable timestamps | | output_length | Scalar parameter | INT32 type. Default value: 96 | Output window size | No | Must be greater than 0 | | output_interval | Scalar parameter | Time interval type. Default value: (last timestamp - first timestamp of input data) / n - 1 | Time interval between output prediction points. Supported units: ns, us, ms, s, m, h, d, w | No | Must be greater than 0 | diff --git a/src/UserGuide/latest-Table/API/Programming-Python-Native-API_timecho.md b/src/UserGuide/latest-Table/API/Programming-Python-Native-API_timecho.md index 65eac6501..fef00468e 100644 --- a/src/UserGuide/latest-Table/API/Programming-Python-Native-API_timecho.md +++ b/src/UserGuide/latest-Table/API/Programming-Python-Native-API_timecho.md @@ -46,7 +46,7 @@ Note: Do not use a newer client to connect to an older server, as this may cause | execute_query_statement | Executes a query SQL statement and retrieves results. | sql: `str` | `SessionDataSet` | | close | Closes the session and releases resources. | None | None | -**Since V2.0.8**, `SessionDataSet` provides methods for batch DataFrame retrieval to efficiently handle large-volume queries: +**Since V2.0.8.2**, `SessionDataSet` provides methods for batch DataFrame retrieval to efficiently handle large-volume queries: ```python # Batch DataFrame retrieval diff --git a/src/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md b/src/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md index 98d80435c..239bca13e 100644 --- a/src/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md +++ b/src/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md @@ -67,7 +67,7 @@ comment **Note:** 1. When creating a table, you do not need to specify a time column. IoTDB automatically adds a column named "time" and places it as the first column. All other columns can be added by enabling the `enable_auto_create_schema` option in the database configuration, or through the session interface for automatic creation or by using table modification statements. -2. Since version V2.0.8, tables support custom naming of the time column during creation. The order of the custom time column in the table is determined by the order in the creation SQL. The related constraints are as follows: +2. Since version V2.0.8.2, tables support custom naming of the time column during creation. The order of the custom time column in the table is determined by the order in the creation SQL. The related constraints are as follows: - When the column category is set to TIME, the data type must be TIMESTAMP. - Each table allows at most one time column (columnCategory = TIME). diff --git a/src/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md b/src/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md index 2c609299c..3847f6b4c 100644 --- a/src/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md +++ b/src/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md @@ -212,7 +212,7 @@ CREATE TABLE tableC ( "Temperature" int32 FIELD COMMENT 'temperature' ) with (TTL=DEFAULT); - -- Custom time column: named time_test, located in the second column of the table. (Support from V2.0.8) + -- Custom time column: named time_test, located in the second column of the table. (Support from V2.0.8.2) CREATE TABLE table1 ( region STRING TAG, time_user_defined TIMESTAMP TIME, diff --git a/src/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md b/src/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md index c276715a8..b8fb96cb7 100644 --- a/src/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md +++ b/src/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md @@ -38,6 +38,22 @@ Audit logs serve as the record credentials of a database, enabling tracking of v Edit the `iotdb-system.properties` file to enable audit logging using the following parameters: + +* V2.0.8.1 + +| Parameter Name | Description | Data Type | Default Value | Activation Method | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------|-----------|-------------------------------|-------------------| +| `enable_audit_log` | Whether to enable audit logging. true: enabled. false: disabled. | Boolean | false | Hot Reload | +| `auditable_operation_type` | Operation type selection. DML: all DML operations are logged; DDL: all DDL operations are logged; QUERY: all query operations are logged; CONTROL: all control statements are logged. | String | DML,DDL,QUERY,CONTROL | Hot Reload | +| `auditable_operation_level` | Permission level selection. global: log all audit events; object: only log events related to data instances. Containment relationship: object < global. For example: when set to global, all audit logs are recorded normally; when set to object, only operations on specific data instances are recorded. | String | global | Hot Reload | +| `auditable_operation_result` | Audit result selection. success: log only successful events; fail: log only failed events | String | success,fail | Hot Reload | +| `audit_log_ttl_in_days` | Audit log TTL (Time To Live). Logs older than this threshold will expire. | Double | -1.0 (never deleted) | Hot Reload | +| `audit_log_space_tl_in_GB` | Audit log SpaceTL. Logs will start rotating when total space reaches this threshold. | Double | 1.0 | Hot Reload | +| `audit_log_batch_interval_in_ms` | Batch write interval for audit logs | Long | 1000 | Hot Reload | +| `audit_log_batch_max_queue_bytes` | Maximum byte size of the queue for batch processing audit logs. Subsequent write operations will be blocked when this threshold is exceeded. | Long | 268435456 | Hot Reload | + +* V2.0.8.2 + | Parameter Name | Description | Data Type | Default Value | Activation Method | |-------------------------------------------|------------------------------------------------------------------------------------------------------------|-----------|-------------------------------|-------------------| | `enable_audit_log` | Whether to enable audit logging. true: enabled. false: disabled. | Boolean | false | Hot Reload | diff --git a/src/UserGuide/latest-Table/User-Manual/Tree-to-Table_timecho.md b/src/UserGuide/latest-Table/User-Manual/Tree-to-Table_timecho.md index d2fbe530a..729c7ada4 100644 --- a/src/UserGuide/latest-Table/User-Manual/Tree-to-Table_timecho.md +++ b/src/UserGuide/latest-Table/User-Manual/Tree-to-Table_timecho.md @@ -80,7 +80,7 @@ The name of the view, which follows the same rules as a table name (for specific * If a device in the tree model does not contain certain declared FIELD columns, or if their data types are inconsistent with the declared FIELD columns, the value for that FIELD column will always be `NULL`when querying that device. * If no FIELD columns are specified, the system automatically scans for all measurements under the `prefixPath`subtree (including all ordinary sequence measurements and measurements defined in any templates whose mounted paths overlap with the `prefixPath`) during creation. The column names will use the measurement names from the tree model. * The tree model cannot have measurements with the same name (case-insensitive) but different data types. -* `TIME`: When creating a view, you do not need to specify a time column. IoTDB automatically adds a column named "time" and places it as the first column. Since version V2.0.8-beta, views support **custom naming of the time column** during creation. The order of the custom time column in the view is determined by the order in the creation SQL. The related constraints are as follows: +* `TIME`: When creating a view, you do not need to specify a time column. IoTDB automatically adds a column named "time" and places it as the first column. Since version V2.0.8.2, views support **custom naming of the time column** during creation. The order of the custom time column in the view is determined by the order in the creation SQL. The related constraints are as follows: * When the column category is set to `TIME`, the data type must be `TIMESTAMP`. * Each view allows at most one time column (columnCategory = TIME). * If no time column is explicitly defined, no other column can use `time` as its name to avoid conflicts with the system's default time column naming. @@ -143,7 +143,7 @@ with (ttl=604800000) AS root.db.** ``` -When customizing the time column (supported since V2.0.8), the SQL changes are as follows: +When customizing the time column (supported since V2.0.8.2), the SQL changes are as follows: ```SQL CREATE OR REPLACE VIEW viewdb."wind_turbine" @@ -161,7 +161,7 @@ AS root.db.** ### 2.2 Modifying a Table View #### 2.2.1 Syntax Definition -The ALTER VIEW function supports modifying the view name, adding columns, renaming columns, modifying FIELD column data type (supported since V2.0.8), deleting columns, setting the view's TTL property, and adding comments via COMMENT. +The ALTER VIEW function supports modifying the view name, adding columns, renaming columns, modifying FIELD column data type (supported since V2.0.8.2), deleting columns, setting the view's TTL property, and adding comments via COMMENT. ```SQL -- Rename view diff --git a/src/UserGuide/latest/API/Programming-Python-Native-API_timecho.md b/src/UserGuide/latest/API/Programming-Python-Native-API_timecho.md index e7cbea097..9ece00588 100644 --- a/src/UserGuide/latest/API/Programming-Python-Native-API_timecho.md +++ b/src/UserGuide/latest/API/Programming-Python-Native-API_timecho.md @@ -558,7 +558,7 @@ df = ... ``` -**Since V2.0.8**, `SessionDataSet` provides methods for batch DataFrame retrieval to efficiently handle large-volume queries: +**Since V2.0.8.2**, `SessionDataSet` provides methods for batch DataFrame retrieval to efficiently handle large-volume queries: ```python # Batch DataFrame retrieval diff --git a/src/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md b/src/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md index 2784a1265..87397ab9b 100644 --- a/src/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md +++ b/src/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md @@ -424,7 +424,7 @@ It is also supported to set an alias, tag, and attribute for aligned timeseries. ### 2.3 Modifying Timseries Data Types -Starting from version V2.0.8, modifying the data type of a timeseries via SQL statements is supported. +Starting from version V2.0.8.2, modifying the data type of a timeseries via SQL statements is supported. Syntax definition: @@ -459,7 +459,7 @@ ALTER TIMESERIES root.ln.wf01.wt01.temperature set data type DOUBLE ### 2.4 Modifying Timeseries Name -Since version V2.0.8, it has been supported to modify the full path name of a timeseries through SQL statements. After a successful modification, the original name becomes invalid but is still retained in the metadata storage. +Since version V2.0.8.2, it has been supported to modify the full path name of a timeseries through SQL statements. After a successful modification, the original name becomes invalid but is still retained in the metadata storage. Syntax definition: @@ -640,7 +640,7 @@ It is worth noting that when the queried path does not exist, the system will re - SHOW INVALID TIMESERIES - Since version V2.0.8, this SQL statement is supported to display the invalidated timeseries after a successful full path name modification. + Since version V2.0.8.2, this SQL statement is supported to display the invalidated timeseries after a successful full path name modification. ```sql IoTDB> show invalid timeSeries diff --git a/src/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md b/src/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md index 3f9208ed0..0844f03d1 100644 --- a/src/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md +++ b/src/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md @@ -143,14 +143,14 @@ CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT , longitude FLOAT); ``` ### 2.3 Modify Timeseries Data Type -> Supported since V2.0.8 +> Supported since V2.0.8.2 ```SQL ALTER TIMESERIES root.ln.wf01.wt01.temperature set data type DOUBLE ``` ### 2.4 Modify Timeseries Name -> This statement is supported from V2.0.8 onwards +> This statement is supported from V2.0.8.2 onwards ```sql ALTER TIMESERIES root.ln.wf01.wt01.temperature RENAME TO root.newln.newwf.newwt.temperature diff --git a/src/UserGuide/latest/User-Manual/Audit-Log_timecho.md b/src/UserGuide/latest/User-Manual/Audit-Log_timecho.md index 484270a82..a5695ec55 100644 --- a/src/UserGuide/latest/User-Manual/Audit-Log_timecho.md +++ b/src/UserGuide/latest/User-Manual/Audit-Log_timecho.md @@ -38,6 +38,21 @@ Audit logs serve as the record credentials of a database, enabling tracking of v Edit the `iotdb-system.properties` file to enable audit logging using the following parameters: +* V2.0.8.1 + +| Parameter Name | Description | Data Type | Default Value | Activation Method | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------|-----------|-------------------------------|-------------------| +| `enable_audit_log` | Whether to enable audit logging. true: enabled. false: disabled. | Boolean | false | Hot Reload | +| `auditable_operation_type` | Operation type selection. DML: all DML operations are logged; DDL: all DDL operations are logged; QUERY: all query operations are logged; CONTROL: all control statements are logged. | String | DML,DDL,QUERY,CONTROL | Hot Reload | +| `auditable_operation_level` | Permission level selection. global: log all audit events; object: only log events related to data instances. Containment relationship: object < global. For example: when set to global, all audit logs are recorded normally; when set to object, only operations on specific data instances are recorded. | String | global | Hot Reload | +| `auditable_operation_result` | Audit result selection. success: log only successful events; fail: log only failed events | String | success,fail | Hot Reload | +| `audit_log_ttl_in_days` | Audit log TTL (Time To Live). Logs older than this threshold will expire. | Double | -1.0 (never deleted) | Hot Reload | +| `audit_log_space_tl_in_GB` | Audit log SpaceTL. Logs will start rotating when total space reaches this threshold. | Double | 1.0 | Hot Reload | +| `audit_log_batch_interval_in_ms` | Batch write interval for audit logs | Long | 1000 | Hot Reload | +| `audit_log_batch_max_queue_bytes` | Maximum byte size of the queue for batch processing audit logs. Subsequent write operations will be blocked when this threshold is exceeded. | Long | 268435456 | Hot Reload | + +* V2.0.8.2 + | Parameter Name | Description | Data Type | Default Value | Activation Method | |-------------------------------------------|------------------------------------------------------------------------------------------------------------|-----------|-------------------------------|-------------------| | `enable_audit_log` | Whether to enable audit logging. true: enabled. false: disabled. | Boolean | false | Hot Reload | diff --git a/src/UserGuide/latest/User-Manual/Authority-Management_timecho.md b/src/UserGuide/latest/User-Manual/Authority-Management_timecho.md index 4a6806b5e..54bee9150 100644 --- a/src/UserGuide/latest/User-Manual/Authority-Management_timecho.md +++ b/src/UserGuide/latest/User-Manual/Authority-Management_timecho.md @@ -84,7 +84,7 @@ The table below describes the types and scope of these permissions: | READ_DATA | Allows reading time series data under the authorized path. | | WRITE_DATA | Allows reading time series data under the authorized path.
Allows inserting and deleting time series data under the authorized path.
Allows importing and loading data under the authorized path. When importing data, you need the WRITE_DATA permission for the corresponding path. When automatically creating databases or time series, you need MANAGE_DATABASE and WRITE_SCHEMA permissions. | | READ_SCHEMA | Allows obtaining detailed information about the metadata tree under the authorized path,
including databases, child paths, child nodes, devices, time series, templates, views, etc. | -| WRITE_SCHEMA | Allows obtaining detailed information about the metadata tree under the authorized path.
Allows creating, deleting, and modifying time series, templates, views, etc. under the authorized path. When creating or modifying views, it checks the WRITE_SCHEMA permission for the view path and READ_SCHEMA permission for the data source. When querying and inserting data into views, it checks the READ_DATA and WRITE_DATA permissions for the view path.
Allows setting, unsetting, and viewing TTL under the authorized path.
Allows attaching or detaching templates under the authorized path.
Allowed to modify the full path name of a timeseries under an authorized path. -- Supported from V2.0.8 onwards | +| WRITE_SCHEMA | Allows obtaining detailed information about the metadata tree under the authorized path.
Allows creating, deleting, and modifying time series, templates, views, etc. under the authorized path. When creating or modifying views, it checks the WRITE_SCHEMA permission for the view path and READ_SCHEMA permission for the data source. When querying and inserting data into views, it checks the READ_DATA and WRITE_DATA permissions for the view path.
Allows setting, unsetting, and viewing TTL under the authorized path.
Allows attaching or detaching templates under the authorized path.
Allowed to modify the full path name of a timeseries under an authorized path. -- Supported from V2.0.8.2 onwards | ### 3.2 Global Permissions diff --git a/src/UserGuide/latest/User-Manual/Data-Sync_timecho.md b/src/UserGuide/latest/User-Manual/Data-Sync_timecho.md index dbcee4bb9..cc9806073 100644 --- a/src/UserGuide/latest/User-Manual/Data-Sync_timecho.md +++ b/src/UserGuide/latest/User-Manual/Data-Sync_timecho.md @@ -610,7 +610,7 @@ pipe_all_sinks_rate_limit_bytes_per_second=-1 | mode.streaming | Specifies the capture source for time-series data writes. Applicable when mode.streamingis false, determining the source for capturing data.insertspecified in inclusion. Offers two strategies:- true: ​​Dynamic capture selection.​​ The system adaptively chooses between capturing individual write requests or only TsFile sealing requests based on downstream processing speed. Prioritizes capturing write requests for lower latency when processing is fast; captures only file sealing requests to avoid backlog when slow. Suitable for most scenarios, balancing latency and throughput optimally.- false: ​​Fixed batch capture.​​ Captures only TsFile sealing requests. Suitable for resource-constrained scenarios to reduce system load. Note: The snapshot data captured upon pipe startup is only provided to downstream processing in file format. | Boolean: true / false |Optional | true | | mode.strict | Determines the strictness when filtering data using time/ path/ database-name/ table-nameparameters:- true: ​​Strict filtering.​​ The system strictly filters captured data according to the given conditions, ensuring only matching data is selected.- false: ​​Non-strict filtering.​​ The system may include some extra data during filtering. Suitable for performance-sensitive scenarios to reduce CPU and I/O consumption. | Boolean: true / false | Optional | true | | mode.snapshot | Determines the capture mode for time-series data, affecting the dataspecified in inclusion. Offers two modes:- true: ​​Static data capture.​​ Upon pipe startup, a one-time data snapshot is captured. ​​The pipe will automatically terminate (DROP PIPE SQL is executed automatically) after the snapshot data is fully consumed.​​- false: ​​Dynamic data capture.​​ In addition to capturing a snapshot upon startup, the pipe continuously captures subsequent data changes. The pipe runs continuously to handle the dynamic data stream. | Boolean: true / false | Optional | false | -| path | Can be specified when the user connects with sql_dialectset to tree. For upgraded user pipes, the default sql_dialectis tree. This parameter determines the capture scope for time-series data, affecting the dataspecified in inclusion, as well as some sequence-related metadata. Data is selected into the streaming pipe if its tree model path matches the specified path.
Starting from version V2.0.8, this parameter supports specifying multiple exact paths in a single pipe, e.g., `'path'='root.test.d0.s1,root.test.d0.s2,root.test.d0.s3'`. | String: IoTDB-standard tree path pattern, wildcards allowed | Optional | root.** | +| path | Can be specified when the user connects with sql_dialectset to tree. For upgraded user pipes, the default sql_dialectis tree. This parameter determines the capture scope for time-series data, affecting the dataspecified in inclusion, as well as some sequence-related metadata. Data is selected into the streaming pipe if its tree model path matches the specified path.
Starting from version V2.0.8.2, this parameter supports specifying multiple exact paths in a single pipe, e.g., `'path'='root.test.d0.s1,root.test.d0.s2,root.test.d0.s3'`. | String: IoTDB-standard tree path pattern, wildcards allowed | Optional | root.** | | start-time | The start event time for synchronizing all data, including start-time | Long: [Long.MIN_VALUE, Long.MAX_VALUE] | Optional | Long.MIN_VALUE | | end-time | The end event time for synchronizing all data, including end-time | Long: [Long.MIN_VALUE, Long.MAX_VALUE] | Optional | Long.MAX_VALUE | | forwarding-pipe-requests | Whether to forward data written by other Pipes (usually data synchronization) | Boolean: true / false | Optional | true | diff --git a/src/zh/UserGuide/Master/Table/AI-capability/AINode_Upgrade_timecho.md b/src/zh/UserGuide/Master/Table/AI-capability/AINode_Upgrade_timecho.md index 95c9239c1..6073128b3 100644 --- a/src/zh/UserGuide/Master/Table/AI-capability/AINode_Upgrade_timecho.md +++ b/src/zh/UserGuide/Master/Table/AI-capability/AINode_Upgrade_timecho.md @@ -98,7 +98,7 @@ SELECT * FROM FORECAST( | targets | 表参数 | SET SEMANTIC | 待预测目标变量的输入数据。IoTDB会自动将数据按时间升序排序再交给AINode 。 | 是 | 使用 SQL 描述带预测目标变量的输入数据,输入的 SQL 不合法时会有对应的查询报错。 | | history\_covs | 标量参数 | 字符串类型(合法的表模型查询 SQL)默认:无 | 指定此次预测任务的协变量的历史数据,这些数据用于辅助目标变量的预测,AINode 不会对历史协变量输出预测结果。在将数据给予模型前,AINode 会自动将数据按时间升序排序。 | 否 | 1. 查询结果只能包含 FIELD 列; 2. 其它:不同模型可能会有独特要求,不符合时会抛出对应的错误。 | | future\_covs | 标量参数 | 字符串类型(合法的表模型查询 SQL) 默认:无 | 指定此次预测任务部分协变量的未来数据,这些数据用于辅助目标变量的预测。 在将数据给予模型前,AINode 会自动将数据按时间升序排序。 | 否 | 1. 当且仅当设置 history\_covs 时可以指定此参数;2. 所涉及协变量名称必须是 history\_covs 的子集; 3. 查询结果只能包含 FIELD 列; 4. 其它:不同模型可能会有独特要求,不符合时会抛出对应的错误。 | -| auto\_adapt | 标量参数 | 布尔类型,默认值:true | 是否为协变量推理开启自适应。| 否 | 当开启自适应时:1. 若未来协变量集合future\_covs不是历史协变量集合history\_covs的子集,将自动抛弃那些不属于历史协变量的未来协变量。2. 若某个历史协变量的长度不等于输入目标变量的长度:a. 小于时,在其头部补 0;b. 大于时,自动丢弃其最早的数据。3. 若某个未来协变量的长度不等于预测长度output\_length: a. 小于时,在其尾部补 0;b. 大于时,自动丢弃其最新的数据。 | +| auto\_adapt | 标量参数 | 布尔类型,默认值:true | 是否为协变量推理开启自适应。(V2.0.8.2起支持) | 否 | 当开启自适应时:1. 若未来协变量集合future\_covs不是历史协变量集合history\_covs的子集,将自动抛弃那些不属于历史协变量的未来协变量。2. 若某个历史协变量的长度不等于输入目标变量的长度:a. 小于时,在其头部补 0;b. 大于时,自动丢弃其最早的数据。3. 若某个未来协变量的长度不等于预测长度output\_length: a. 小于时,在其尾部补 0;b. 大于时,自动丢弃其最新的数据。 | | output\_start\_time | 标量参数 | 时间戳类型。 默认值:目标变量最后一个时间戳 + output\_interval | 输出的预测点的起始时间戳 【即起报时间】 | 否 | 必须大于目标变量时间戳的最大值 | | output\_length | 标量参数 | INT32 类型。 默认值:96 | 输出窗口大小 | 否 | 必须大于 0 | | output\_interval | 标量参数 | 时间间隔类型。 默认值:(输入数据的最后一个时间戳 - 输入数据的第一个时间戳) / n - 1 | 输出的预测点之间的时间间隔 支持的单位是 ns、us、ms、s、m、h、d、w | 否 | 必须大于 0 | diff --git a/src/zh/UserGuide/Master/Table/API/Programming-Python-Native-API_timecho.md b/src/zh/UserGuide/Master/Table/API/Programming-Python-Native-API_timecho.md index 7bf6dd1b3..81c09a0f2 100644 --- a/src/zh/UserGuide/Master/Table/API/Programming-Python-Native-API_timecho.md +++ b/src/zh/UserGuide/Master/Table/API/Programming-Python-Native-API_timecho.md @@ -47,7 +47,7 @@ TableSession是IoTDB的一个核心类,用于与IoTDB数据库进行交互。 | execute_query_statement | 执行查询 SQL 语句并返回结果集 | sql: str | SessionDataSet | | close | 关闭会话并释放资源 | None | None | -自 V2.0.8 版本起,SessionDataSet 提供分批获取 DataFrame 的方法,用于高效处理大数据量查询: +自 V2.0.8.2 版本起,SessionDataSet 提供分批获取 DataFrame 的方法,用于高效处理大数据量查询: ```python # 分批获取 DataFrame diff --git a/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md b/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md index 60b3100c3..2f6220131 100644 --- a/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md +++ b/src/zh/UserGuide/Master/Table/Basic-Concept/Table-Management_timecho.md @@ -67,7 +67,7 @@ comment **说明:** 1. 在创建表时,可以不指定时间列(TIME),IoTDB会自动添加该列并命名为"time", 且顺序上位于第一列。其他所有列可以通过在数据库配置时启用`enable_auto_create_schema`选项,或通过 session 接口自动创建或修改表的语句来添加。 -2. 自 V2.0.8 版本起,支持创建表时自定义命名时间列,自定义时间列在表中的顺序由创建 SQL 中的顺序决定。相关约束如下: +2. 自 V2.0.8.2 版本起,支持创建表时自定义命名时间列,自定义时间列在表中的顺序由创建 SQL 中的顺序决定。相关约束如下: - 当列分类(columnCategory)设为 TIME 时,数据类型(dataType)必须为 TIMESTAMP。 - 每张表最多允许定义 1个时间列(columnCategory = TIME)。 - 当未显式定义时间列时,不允许其他列使用 time 作为名称,否则会与系统默认时间列命名冲突。 diff --git a/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md b/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md index 6bc003d98..dccbd6acf 100644 --- a/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md +++ b/src/zh/UserGuide/Master/Table/SQL-Manual/SQL-Metadata-Operations_timecho.md @@ -212,7 +212,7 @@ CREATE TABLE tableC ( temperature int32 FIELD COMMENT 'temperature' ) with (TTL=DEFAULT); --- 自定义时间列:命名为time_test, 位于表的第二列 (V2.0.8 起支持) +-- 自定义时间列:命名为time_test, 位于表的第二列 (V2.0.8.2 起支持) CREATE TABLE table1 ( region STRING TAG, time_user_defined TIMESTAMP TIME, diff --git a/src/zh/UserGuide/Master/Table/User-Manual/Audit-Log_timecho.md b/src/zh/UserGuide/Master/Table/User-Manual/Audit-Log_timecho.md index f032eb013..06a3ddd7f 100644 --- a/src/zh/UserGuide/Master/Table/User-Manual/Audit-Log_timecho.md +++ b/src/zh/UserGuide/Master/Table/User-Manual/Audit-Log_timecho.md @@ -38,6 +38,23 @@ 通过编辑配置文件 `iotdb-system.properties` 中如下参数来启动审计日志功能。 +* V2.0.8.1 + + +| 参数名称 | 参数描述 | 数据类型 | 默认值 | 生效方式 | +|-----------------------------------|------------------------------------------------------------------------------------------------------| ---------- | ------------------------ | ---------- | +| `enable_audit_log` | 是否开启审计日志。 true:启用。false:禁用。 | Boolean | false | 热加载 | +| `auditable_operation_type` | 操作类型选择。 DML :所有 DML 都会记录审计日志; DDL :所有 DDL 都会记录审计日志; QUERY :所有 QUERY 都会记录审计日志; CONTROL:所有控制语句都会记录审计日志; | String | DML,DDL,QUERY,CONTROL | 热加载 | +| `auditable_operation_level` | 权限级别选择。 global :记录全部的审计日志; object:仅针对数据实例的事件的审计日志会被记录; 包含关系:object < global。 例如:设置为 global 时,所有审计日志正常记录;设置为 object 时,仅记录对具体数据实例的操作。 | String | global | 热加载 | +| `auditable_operation_result` | 审计结果选择。 success:只记录成功事件的审计日志; fail:只记录失败事件的审计日志; | String | success, fail | 热加载 | +| `audit_log_ttl_in_days` | 审计日志的 TTL,生成审计日志的时间达到该阈值后过期。 | Double | -1.0(永远不会被删除) | 热加载 | +| `audit_log_space_tl_in_GB` | 审计日志的 SpaceTL,审计日志总空间达到该阈值后开始轮转删除。 | Double | 1.0| 热加载| +| `audit_log_batch_interval_in_ms` | 审计日志批量写入的时间间隔 | Long | 1000 | 热加载 | +| `audit_log_batch_max_queue_bytes` | 用于批量处理审计日志的队列最大字节数。当队列大小超过此值时,后续的写入操作将被阻塞。 | Long | 268435456 | 热加载 | + + +* V2.0.8.2 + | 参数名称 | 参数描述 | 数据类型 | 默认值 | 生效方式 | |-----------------------------------|------------------------------------------------------------------------------------------------------| ---------- | ------------------------ | ---------- | | `enable_audit_log` | 是否开启审计日志。 true:启用。false:禁用。 | Boolean | false | 热加载 | diff --git a/src/zh/UserGuide/Master/Table/User-Manual/Tree-to-Table_timecho.md b/src/zh/UserGuide/Master/Table/User-Manual/Tree-to-Table_timecho.md index 5d7ae3c52..5cd9bef30 100644 --- a/src/zh/UserGuide/Master/Table/User-Manual/Tree-to-Table_timecho.md +++ b/src/zh/UserGuide/Master/Table/User-Manual/Tree-to-Table_timecho.md @@ -79,7 +79,7 @@ comment * 若树模型中的设备不包含某些声明的 FIELD 列,或与声明的 FIELD 列的数据类型不一致,则在查询该设备时,该 FIELD 列的值永远为 NULL。 * 若未指定 FIELD 列,则创建时会自动扫描出`prefixPath`子树下所有的测点(包括定义为所有普通序列的测点,以及挂载路径与 `prefixPath `有所重合的所有模板中的测点),列名使用树模型测点名称。 * 不支持树模型存在名称(含小写)相同但类型不同的测点 -* `TIME`:创建视图时可以不指定时间列(TIME),IoTDB 会自动添加该列并命名为"time", 且顺序上位于第一列。自 V2.0.8 版本起,支持创建视图时**自定义命名时间列**,自定义时间列在视图中的顺序由创建 SQL 中的顺序决定。相关约束如下: +* `TIME`:创建视图时可以不指定时间列(TIME),IoTDB 会自动添加该列并命名为"time", 且顺序上位于第一列。自 V2.0.8.2 版本起,支持创建视图时**自定义命名时间列**,自定义时间列在视图中的顺序由创建 SQL 中的顺序决定。相关约束如下: * 当列分类(columnCategory)设为 `TIME` 时,数据类型(dataType)必须为 `TIMESTAMP`。 * 每个视图最多允许定义 1个时间列(columnCategory = TIME)。 * 当未显式定义时间列时,不允许其他列使用 `time` 作为名称,否则会与系统默认时间列命名冲突。 @@ -140,7 +140,7 @@ AS root.db.** with (ttl=604800000) ``` -* 当需要自定义时间列(V2.0.8 起支持)时,SQL 变更如下: +* 当需要自定义时间列(V2.0.8.2 起支持)时,SQL 变更如下: ```SQL CREATE OR REPLACE VIEW viewdb."风机表" @@ -158,7 +158,7 @@ with (ttl=604800000) ### 2.2 修改表视图 #### 2.2.1 语法定义 -修改表视图功能支持修改视图名称、添加列、列重命名、修改 FIELD 列数据类型(V2.0.8 起支持)、删除列、设置视图的 TTL 属性,以及通过 COMMENT 添加注释。 +修改表视图功能支持修改视图名称、添加列、列重命名、修改 FIELD 列数据类型(V2.0.8.2 起支持)、删除列、设置视图的 TTL 属性,以及通过 COMMENT 添加注释。 ```SQL -- 修改视图名 diff --git a/src/zh/UserGuide/Master/Tree/API/Programming-Python-Native-API_timecho.md b/src/zh/UserGuide/Master/Tree/API/Programming-Python-Native-API_timecho.md index f06dbb909..f9895a3d4 100644 --- a/src/zh/UserGuide/Master/Tree/API/Programming-Python-Native-API_timecho.md +++ b/src/zh/UserGuide/Master/Tree/API/Programming-Python-Native-API_timecho.md @@ -555,7 +555,7 @@ session.close() df = ... ``` -自 V2.0.8 版本起,SessionDataSet 提供分批获取 DataFrame 的方法,用于高效处理大数据量查询: +自 V2.0.8.2 版本起,SessionDataSet 提供分批获取 DataFrame 的方法,用于高效处理大数据量查询: ```python # 分批获取 DataFrame diff --git a/src/zh/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md b/src/zh/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md index 3ffe571b7..b0dc59933 100644 --- a/src/zh/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md +++ b/src/zh/UserGuide/Master/Tree/Basic-Concept/Operate-Metadata_timecho.md @@ -408,7 +408,7 @@ CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ### 2.3 修改时间序列数据类型 -自 V2.0.8 版本起,支持通过 SQL 语句修改时间序列的数据类型。 +自 V2.0.8.2 版本起,支持通过 SQL 语句修改时间序列的数据类型。 语法定义: @@ -442,7 +442,7 @@ ALTER TIMESERIES root.ln.wf01.wt01.temperature set data type DOUBLE; ### 2.4 修改时间序列名称 -自 V2.0.8 版本起,支持通过 SQL 语句修改时间序列的全路径名称。修改成功后,原有名称作废,但仍在元数据的存储中。 +自 V2.0.8.2 版本起,支持通过 SQL 语句修改时间序列的全路径名称。修改成功后,原有名称作废,但仍在元数据的存储中。 语法定义: @@ -619,7 +619,7 @@ It costs 0.004s * SHOW INVALID TIMESERIES -自 V2.0.8 版本起,支持该 SQL 语句,用于展示**修改全路径名称**成功后的作废时间序列。 +自 V2.0.8.2 版本起,支持该 SQL 语句,用于展示**修改全路径名称**成功后的作废时间序列。 ```SQL IoTDB> show invalid timeSeries diff --git a/src/zh/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md b/src/zh/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md index deb356f69..8a7cfb0d3 100644 --- a/src/zh/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md +++ b/src/zh/UserGuide/Master/Tree/SQL-Manual/SQL-Manual_timecho.md @@ -73,14 +73,14 @@ CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ``` #### 修改时间序列数据类型 -> V2.0.8 起支持该语句 +> V2.0.8.2 起支持该语句 ```sql ALTER TIMESERIES root.ln.wf01.wt01.temperature set data type DOUBLE ``` #### 修改时间序列名称 -> V2.0.8 起支持该语句 +> V2.0.8.2 起支持该语句 ```SQL ALTER TIMESERIES root.ln.wf01.wt01.temperature RENAME TO root.newln.newwf.newwt.temperature diff --git a/src/zh/UserGuide/Master/Tree/User-Manual/Audit-Log_timecho.md b/src/zh/UserGuide/Master/Tree/User-Manual/Audit-Log_timecho.md index 24d7dc565..b63fc4c44 100644 --- a/src/zh/UserGuide/Master/Tree/User-Manual/Audit-Log_timecho.md +++ b/src/zh/UserGuide/Master/Tree/User-Manual/Audit-Log_timecho.md @@ -38,6 +38,23 @@ 通过编辑配置文件 `iotdb-system.properties` 中如下参数来启动审计日志功能。 +* V2.0.8.1 + + +| 参数名称 | 参数描述 | 数据类型 | 默认值 | 生效方式 | +|-----------------------------------|------------------------------------------------------------------------------------------------------| ---------- | ------------------------ | ---------- | +| `enable_audit_log` | 是否开启审计日志。 true:启用。false:禁用。 | Boolean | false | 热加载 | +| `auditable_operation_type` | 操作类型选择。 DML :所有 DML 都会记录审计日志; DDL :所有 DDL 都会记录审计日志; QUERY :所有 QUERY 都会记录审计日志; CONTROL:所有控制语句都会记录审计日志; | String | DML,DDL,QUERY,CONTROL | 热加载 | +| `auditable_operation_level` | 权限级别选择。 global :记录全部的审计日志; object:仅针对数据实例的事件的审计日志会被记录; 包含关系:object < global。 例如:设置为 global 时,所有审计日志正常记录;设置为 object 时,仅记录对具体数据实例的操作。 | String | global | 热加载 | +| `auditable_operation_result` | 审计结果选择。 success:只记录成功事件的审计日志; fail:只记录失败事件的审计日志; | String | success, fail | 热加载 | +| `audit_log_ttl_in_days` | 审计日志的 TTL,生成审计日志的时间达到该阈值后过期。 | Double | -1.0(永远不会被删除) | 热加载 | +| `audit_log_space_tl_in_GB` | 审计日志的 SpaceTL,审计日志总空间达到该阈值后开始轮转删除。 | Double | 1.0| 热加载| +| `audit_log_batch_interval_in_ms` | 审计日志批量写入的时间间隔 | Long | 1000 | 热加载 | +| `audit_log_batch_max_queue_bytes` | 用于批量处理审计日志的队列最大字节数。当队列大小超过此值时,后续的写入操作将被阻塞。 | Long | 268435456 | 热加载 | + + +* V2.0.8.2 + | 参数名称 | 参数描述 | 数据类型 | 默认值 | 生效方式 | |-----------------------------------|------------------------------------------------------------------------------------------------------| ---------- | ------------------------ | ---------- | | `enable_audit_log` | 是否开启审计日志。 true:启用。false:禁用。 | Boolean | false | 热加载 | diff --git a/src/zh/UserGuide/Master/Tree/User-Manual/Authority-Management_timecho.md b/src/zh/UserGuide/Master/Tree/User-Manual/Authority-Management_timecho.md index dbd0cfe67..2e4c5c21e 100644 --- a/src/zh/UserGuide/Master/Tree/User-Manual/Authority-Management_timecho.md +++ b/src/zh/UserGuide/Master/Tree/User-Manual/Authority-Management_timecho.md @@ -79,7 +79,7 @@ IoTDB 主要有两类权限:序列权限、全局权限。 | READ_DATA | 允许读取授权路径下的序列数据。 | | WRITE_DATA | 允许读取授权路径下的序列数据。
允许插入、删除授权路径下的的序列数据。
允许在授权路径下导入、加载数据,在导入数据时,需要拥有对应路径的 WRITE_DATA 权限,在自动创建数据库与序列时,需要有 MANAGE_DATABASE 与 WRITE_SCHEMA 权限。 | | READ_SCHEMA | 允许获取授权路径下元数据树的详细信息:
包括:路径下的数据库、子路径、子节点、设备、序列、模版、视图等。 | -| WRITE_SCHEMA | 允许获取授权路径下元数据树的详细信息。
允许在授权路径下对序列、模版、视图等进行创建、删除、修改操作。
在创建或修改 view 的时候,会检查 view 路径的 WRITE_SCHEMA 权限、数据源的 READ_SCHEMA 权限。
在对 view 进行查询、插入时,会检查 view 路径的 READ_DATA 权限、WRITE_DATA 权限。
允许在授权路径下设置、取消、查看TTL。
允许在授权路径下挂载或者接触挂载模板。
允许在授权路径下对序列进行全路径名称的修改操作。//V2.0.8 起支持该功能 | +| WRITE_SCHEMA | 允许获取授权路径下元数据树的详细信息。
允许在授权路径下对序列、模版、视图等进行创建、删除、修改操作。
在创建或修改 view 的时候,会检查 view 路径的 WRITE_SCHEMA 权限、数据源的 READ_SCHEMA 权限。
在对 view 进行查询、插入时,会检查 view 路径的 READ_DATA 权限、WRITE_DATA 权限。
允许在授权路径下设置、取消、查看TTL。
允许在授权路径下挂载或者接触挂载模板。
允许在授权路径下对序列进行全路径名称的修改操作。//V2.0.8.2 起支持该功能 | ### 3.2 全局权限 diff --git a/src/zh/UserGuide/Master/Tree/User-Manual/Data-Sync_timecho.md b/src/zh/UserGuide/Master/Tree/User-Manual/Data-Sync_timecho.md index 7bbd46954..dd371b9cf 100644 --- a/src/zh/UserGuide/Master/Tree/User-Manual/Data-Sync_timecho.md +++ b/src/zh/UserGuide/Master/Tree/User-Manual/Data-Sync_timecho.md @@ -606,7 +606,7 @@ pipe_all_sinks_rate_limit_bytes_per_second=-1 | mode.streaming | 此参数指定时序数据写入的捕获来源。适用于 `mode.streaming`为 `false` 模式下的场景,决定`inclusion`中`data.insert`数据的捕获来源。提供两种捕获策略:true: 动态选择捕获的类型。系统将根据下游处理速度,自适应地选择是捕获每个写入请求还是仅捕获 TsFile 文件的封口请求。当下游处理速度快时,优先捕获写入请求以减少延迟;当处理速度慢时,仅捕获文件封口请求以避免处理堆积。这种模式适用于大多数场景,能够实现处理延迟和吞吐量的最优平衡。false:固定按批捕获方式。仅捕获 TsFile 文件的封口请求,适用于资源受限的应用场景,以降低系统负载。注意,pipe 启动时捕获的快照数据只会以文件的方式供下游处理。 | Boolean: true / false | 否 | true | | mode.strict | 在使用 time / path / database-name / table-name 参数过滤数据时,是否需要严格按照条件筛选:`true`: 严格筛选。系统将完全按照给定条件过滤筛选被捕获的数据,确保只有符合条件的数据被选中。`false`:非严格筛选。系统在筛选被捕获的数据时可能会包含一些额外的数据,适用于性能敏感的场景,可降低 CPU 和 IO 消耗。 | Boolean: true / false | 否 | true | | mode.snapshot | 此参数决定时序数据的捕获方式,影响`inclusion`中的`data`数据。提供两种模式:`true`:静态数据捕获。启动 pipe 时,会进行一次性的数据快照捕获。当快照数据被完全消费后,**pipe 将自动终止(DROP PIPE SQL 会自动执行)**。`false`:动态数据捕获。除了在 pipe 启动时捕获快照数据外,还会持续捕获后续的数据变更。pipe 将持续运行以处理动态数据流。 | Boolean: true / false | 否 | false | -| path | 当用户连接指定的sql_dialect为tree时可以指定。对于升级上来的用户pipe,默认sql_dialect为tree。此参数决定时序数据的捕获范围,影响 inclusion中的data数据,以及部分序列相关的元数据。当数据的树模型路径能够被path匹配时,数据会被筛选出来进入流处理pipe。
自 V2.0.8 版本起,该参数支持在一个pipe中填写多个精确路径的path , 如 `'path'='root.test.d0,s1,root.test.d0.s2,root.test.d0.s3'` | String:IoTDB标准的树路径模式,可以带通配符 | 选填 | root.** | +| path | 当用户连接指定的sql_dialect为tree时可以指定。对于升级上来的用户pipe,默认sql_dialect为tree。此参数决定时序数据的捕获范围,影响 inclusion中的data数据,以及部分序列相关的元数据。当数据的树模型路径能够被path匹配时,数据会被筛选出来进入流处理pipe。
自 V2.0.8.2 版本起,该参数支持在一个pipe中填写多个精确路径的path , 如 `'path'='root.test.d0,s1,root.test.d0.s2,root.test.d0.s3'` | String:IoTDB标准的树路径模式,可以带通配符 | 选填 | root.** | | start-time | 同步所有数据的开始 event time,包含 start-time | Long: [Long.MIN_VALUE, Long.MAX_VALUE] | 选填 | Long.MIN_VALUE | | end-time | 同步所有数据的结束 event time,包含 end-time | Long: [Long.MIN_VALUE, Long.MAX_VALUE] | 选填 | Long.MAX_VALUE | | forwarding-pipe-requests | 是否转发由其他 Pipe (通常是数据同步)写入的数据 | Boolean: true, false | 选填 | true | diff --git a/src/zh/UserGuide/latest-Table/AI-capability/AINode_Upgrade_timecho.md b/src/zh/UserGuide/latest-Table/AI-capability/AINode_Upgrade_timecho.md index acd7f50ae..6965523d5 100644 --- a/src/zh/UserGuide/latest-Table/AI-capability/AINode_Upgrade_timecho.md +++ b/src/zh/UserGuide/latest-Table/AI-capability/AINode_Upgrade_timecho.md @@ -98,7 +98,7 @@ SELECT * FROM FORECAST( | targets | 表参数 | SET SEMANTIC | 待预测目标变量的输入数据。IoTDB会自动将数据按时间升序排序再交给AINode 。 | 是 | 使用 SQL 描述带预测目标变量的输入数据,输入的 SQL 不合法时会有对应的查询报错。 | | history\_covs | 标量参数 | 字符串类型(合法的表模型查询 SQL)默认:无 | 指定此次预测任务的协变量的历史数据,这些数据用于辅助目标变量的预测,AINode 不会对历史协变量输出预测结果。在将数据给予模型前,AINode 会自动将数据按时间升序排序。 | 否 | 1. 查询结果只能包含 FIELD 列; 2. 其它:不同模型可能会有独特要求,不符合时会抛出对应的错误。 | | future\_covs | 标量参数 | 字符串类型(合法的表模型查询 SQL) 默认:无 | 指定此次预测任务部分协变量的未来数据,这些数据用于辅助目标变量的预测。 在将数据给予模型前,AINode 会自动将数据按时间升序排序。 | 否 | 1. 当且仅当设置 history\_covs 时可以指定此参数;2. 所涉及协变量名称必须是 history\_covs 的子集; 3. 查询结果只能包含 FIELD 列; 4. 其它:不同模型可能会有独特要求,不符合时会抛出对应的错误。 | -| auto\_adapt | 标量参数 | 布尔类型,默认值:true | 是否为协变量推理开启自适应。| 否 | 当开启自适应时:1. 若未来协变量集合future\_covs不是历史协变量集合history\_covs的子集,将自动抛弃那些不属于历史协变量的未来协变量。2. 若某个历史协变量的长度不等于输入目标变量的长度:a. 小于时,在其头部补 0;b. 大于时,自动丢弃其最早的数据。3. 若某个未来协变量的长度不等于预测长度output\_length: a. 小于时,在其尾部补 0;b. 大于时,自动丢弃其最新的数据。 | +| auto\_adapt | 标量参数 | 布尔类型,默认值:true | 是否为协变量推理开启自适应。(V2.0.8.2起支持) | 否 | 当开启自适应时:1. 若未来协变量集合future\_covs不是历史协变量集合history\_covs的子集,将自动抛弃那些不属于历史协变量的未来协变量。2. 若某个历史协变量的长度不等于输入目标变量的长度:a. 小于时,在其头部补 0;b. 大于时,自动丢弃其最早的数据。3. 若某个未来协变量的长度不等于预测长度output\_length: a. 小于时,在其尾部补 0;b. 大于时,自动丢弃其最新的数据。 | | output\_start\_time | 标量参数 | 时间戳类型。 默认值:目标变量最后一个时间戳 + output\_interval | 输出的预测点的起始时间戳 【即起报时间】 | 否 | 必须大于目标变量时间戳的最大值 | | output\_length | 标量参数 | INT32 类型。 默认值:96 | 输出窗口大小 | 否 | 必须大于 0 | | output\_interval | 标量参数 | 时间间隔类型。 默认值:(输入数据的最后一个时间戳 - 输入数据的第一个时间戳) / n - 1 | 输出的预测点之间的时间间隔 支持的单位是 ns、us、ms、s、m、h、d、w | 否 | 必须大于 0 | diff --git a/src/zh/UserGuide/latest-Table/API/Programming-Python-Native-API_timecho.md b/src/zh/UserGuide/latest-Table/API/Programming-Python-Native-API_timecho.md index cd68cae1e..20925783d 100644 --- a/src/zh/UserGuide/latest-Table/API/Programming-Python-Native-API_timecho.md +++ b/src/zh/UserGuide/latest-Table/API/Programming-Python-Native-API_timecho.md @@ -47,7 +47,7 @@ TableSession是IoTDB的一个核心类,用于与IoTDB数据库进行交互。 | execute_query_statement | 执行查询 SQL 语句并返回结果集 | sql: str | SessionDataSet | | close | 关闭会话并释放资源 | None | None | -自 V2.0.8 版本起,SessionDataSet 提供分批获取 DataFrame 的方法,用于高效处理大数据量查询: +自 V2.0.8.2 版本起,SessionDataSet 提供分批获取 DataFrame 的方法,用于高效处理大数据量查询: ```python # 分批获取 DataFrame diff --git a/src/zh/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md b/src/zh/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md index 60b3100c3..2f6220131 100644 --- a/src/zh/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md +++ b/src/zh/UserGuide/latest-Table/Basic-Concept/Table-Management_timecho.md @@ -67,7 +67,7 @@ comment **说明:** 1. 在创建表时,可以不指定时间列(TIME),IoTDB会自动添加该列并命名为"time", 且顺序上位于第一列。其他所有列可以通过在数据库配置时启用`enable_auto_create_schema`选项,或通过 session 接口自动创建或修改表的语句来添加。 -2. 自 V2.0.8 版本起,支持创建表时自定义命名时间列,自定义时间列在表中的顺序由创建 SQL 中的顺序决定。相关约束如下: +2. 自 V2.0.8.2 版本起,支持创建表时自定义命名时间列,自定义时间列在表中的顺序由创建 SQL 中的顺序决定。相关约束如下: - 当列分类(columnCategory)设为 TIME 时,数据类型(dataType)必须为 TIMESTAMP。 - 每张表最多允许定义 1个时间列(columnCategory = TIME)。 - 当未显式定义时间列时,不允许其他列使用 time 作为名称,否则会与系统默认时间列命名冲突。 diff --git a/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md b/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md index 6bc003d98..dccbd6acf 100644 --- a/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md +++ b/src/zh/UserGuide/latest-Table/SQL-Manual/SQL-Metadata-Operations_timecho.md @@ -212,7 +212,7 @@ CREATE TABLE tableC ( temperature int32 FIELD COMMENT 'temperature' ) with (TTL=DEFAULT); --- 自定义时间列:命名为time_test, 位于表的第二列 (V2.0.8 起支持) +-- 自定义时间列:命名为time_test, 位于表的第二列 (V2.0.8.2 起支持) CREATE TABLE table1 ( region STRING TAG, time_user_defined TIMESTAMP TIME, diff --git a/src/zh/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md b/src/zh/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md index c8e3b2153..0f727c4ec 100644 --- a/src/zh/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md +++ b/src/zh/UserGuide/latest-Table/User-Manual/Audit-Log_timecho.md @@ -38,6 +38,23 @@ 通过编辑配置文件 `iotdb-system.properties` 中如下参数来启动审计日志功能。 +* V2.0.8.1 + + +| 参数名称 | 参数描述 | 数据类型 | 默认值 | 生效方式 | +|-----------------------------------|------------------------------------------------------------------------------------------------------| ---------- | ------------------------ | ---------- | +| `enable_audit_log` | 是否开启审计日志。 true:启用。false:禁用。 | Boolean | false | 热加载 | +| `auditable_operation_type` | 操作类型选择。 DML :所有 DML 都会记录审计日志; DDL :所有 DDL 都会记录审计日志; QUERY :所有 QUERY 都会记录审计日志; CONTROL:所有控制语句都会记录审计日志; | String | DML,DDL,QUERY,CONTROL | 热加载 | +| `auditable_operation_level` | 权限级别选择。 global :记录全部的审计日志; object:仅针对数据实例的事件的审计日志会被记录; 包含关系:object < global。 例如:设置为 global 时,所有审计日志正常记录;设置为 object 时,仅记录对具体数据实例的操作。 | String | global | 热加载 | +| `auditable_operation_result` | 审计结果选择。 success:只记录成功事件的审计日志; fail:只记录失败事件的审计日志; | String | success, fail | 热加载 | +| `audit_log_ttl_in_days` | 审计日志的 TTL,生成审计日志的时间达到该阈值后过期。 | Double | -1.0(永远不会被删除) | 热加载 | +| `audit_log_space_tl_in_GB` | 审计日志的 SpaceTL,审计日志总空间达到该阈值后开始轮转删除。 | Double | 1.0| 热加载| +| `audit_log_batch_interval_in_ms` | 审计日志批量写入的时间间隔 | Long | 1000 | 热加载 | +| `audit_log_batch_max_queue_bytes` | 用于批量处理审计日志的队列最大字节数。当队列大小超过此值时,后续的写入操作将被阻塞。 | Long | 268435456 | 热加载 | + + +* V2.0.8.2 + | 参数名称 | 参数描述 | 数据类型 | 默认值 | 生效方式 | |-----------------------------------|------------------------------------------------------------------------------------------------------| ---------- | ------------------------ | ---------- | | `enable_audit_log` | 是否开启审计日志。 true:启用。false:禁用。 | Boolean | false | 热加载 | diff --git a/src/zh/UserGuide/latest-Table/User-Manual/Tree-to-Table_timecho.md b/src/zh/UserGuide/latest-Table/User-Manual/Tree-to-Table_timecho.md index 5d7ae3c52..5cd9bef30 100644 --- a/src/zh/UserGuide/latest-Table/User-Manual/Tree-to-Table_timecho.md +++ b/src/zh/UserGuide/latest-Table/User-Manual/Tree-to-Table_timecho.md @@ -79,7 +79,7 @@ comment * 若树模型中的设备不包含某些声明的 FIELD 列,或与声明的 FIELD 列的数据类型不一致,则在查询该设备时,该 FIELD 列的值永远为 NULL。 * 若未指定 FIELD 列,则创建时会自动扫描出`prefixPath`子树下所有的测点(包括定义为所有普通序列的测点,以及挂载路径与 `prefixPath `有所重合的所有模板中的测点),列名使用树模型测点名称。 * 不支持树模型存在名称(含小写)相同但类型不同的测点 -* `TIME`:创建视图时可以不指定时间列(TIME),IoTDB 会自动添加该列并命名为"time", 且顺序上位于第一列。自 V2.0.8 版本起,支持创建视图时**自定义命名时间列**,自定义时间列在视图中的顺序由创建 SQL 中的顺序决定。相关约束如下: +* `TIME`:创建视图时可以不指定时间列(TIME),IoTDB 会自动添加该列并命名为"time", 且顺序上位于第一列。自 V2.0.8.2 版本起,支持创建视图时**自定义命名时间列**,自定义时间列在视图中的顺序由创建 SQL 中的顺序决定。相关约束如下: * 当列分类(columnCategory)设为 `TIME` 时,数据类型(dataType)必须为 `TIMESTAMP`。 * 每个视图最多允许定义 1个时间列(columnCategory = TIME)。 * 当未显式定义时间列时,不允许其他列使用 `time` 作为名称,否则会与系统默认时间列命名冲突。 @@ -140,7 +140,7 @@ AS root.db.** with (ttl=604800000) ``` -* 当需要自定义时间列(V2.0.8 起支持)时,SQL 变更如下: +* 当需要自定义时间列(V2.0.8.2 起支持)时,SQL 变更如下: ```SQL CREATE OR REPLACE VIEW viewdb."风机表" @@ -158,7 +158,7 @@ with (ttl=604800000) ### 2.2 修改表视图 #### 2.2.1 语法定义 -修改表视图功能支持修改视图名称、添加列、列重命名、修改 FIELD 列数据类型(V2.0.8 起支持)、删除列、设置视图的 TTL 属性,以及通过 COMMENT 添加注释。 +修改表视图功能支持修改视图名称、添加列、列重命名、修改 FIELD 列数据类型(V2.0.8.2 起支持)、删除列、设置视图的 TTL 属性,以及通过 COMMENT 添加注释。 ```SQL -- 修改视图名 diff --git a/src/zh/UserGuide/latest/API/Programming-Python-Native-API_timecho.md b/src/zh/UserGuide/latest/API/Programming-Python-Native-API_timecho.md index f06dbb909..f9895a3d4 100644 --- a/src/zh/UserGuide/latest/API/Programming-Python-Native-API_timecho.md +++ b/src/zh/UserGuide/latest/API/Programming-Python-Native-API_timecho.md @@ -555,7 +555,7 @@ session.close() df = ... ``` -自 V2.0.8 版本起,SessionDataSet 提供分批获取 DataFrame 的方法,用于高效处理大数据量查询: +自 V2.0.8.2 版本起,SessionDataSet 提供分批获取 DataFrame 的方法,用于高效处理大数据量查询: ```python # 分批获取 DataFrame diff --git a/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md b/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md index 8ad637a6c..f42f3ff43 100644 --- a/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md +++ b/src/zh/UserGuide/latest/Basic-Concept/Operate-Metadata_timecho.md @@ -408,7 +408,7 @@ CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ### 2.3 修改时间序列数据类型 -自 V2.0.8 版本起,支持通过 SQL 语句修改时间序列的数据类型。 +自 V2.0.8.2 版本起,支持通过 SQL 语句修改时间序列的数据类型。 语法定义: @@ -442,7 +442,7 @@ ALTER TIMESERIES root.ln.wf01.wt01.temperature set data type DOUBLE; ### 2.4 修改时间序列名称 -自 V2.0.8 版本起,支持通过 SQL 语句修改时间序列的全路径名称。修改成功后,原有名称作废,但仍在元数据的存储中。 +自 V2.0.8.2 版本起,支持通过 SQL 语句修改时间序列的全路径名称。修改成功后,原有名称作废,但仍在元数据的存储中。 语法定义: @@ -620,7 +620,7 @@ It costs 0.004s * SHOW INVALID TIMESERIES -自 V2.0.8 版本起,支持该 SQL 语句,用于展示**修改全路径名称**成功后的作废时间序列。 +自 V2.0.8.2 版本起,支持该 SQL 语句,用于展示**修改全路径名称**成功后的作废时间序列。 ```SQL IoTDB> show invalid timeSeries diff --git a/src/zh/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md b/src/zh/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md index deb356f69..8a7cfb0d3 100644 --- a/src/zh/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md +++ b/src/zh/UserGuide/latest/SQL-Manual/SQL-Manual_timecho.md @@ -73,14 +73,14 @@ CREATE ALIGNED TIMESERIES root.ln.wf01.GPS(latitude FLOAT, longitude FLOAT); ``` #### 修改时间序列数据类型 -> V2.0.8 起支持该语句 +> V2.0.8.2 起支持该语句 ```sql ALTER TIMESERIES root.ln.wf01.wt01.temperature set data type DOUBLE ``` #### 修改时间序列名称 -> V2.0.8 起支持该语句 +> V2.0.8.2 起支持该语句 ```SQL ALTER TIMESERIES root.ln.wf01.wt01.temperature RENAME TO root.newln.newwf.newwt.temperature diff --git a/src/zh/UserGuide/latest/User-Manual/Audit-Log_timecho.md b/src/zh/UserGuide/latest/User-Manual/Audit-Log_timecho.md index 7ff956b88..323399afc 100644 --- a/src/zh/UserGuide/latest/User-Manual/Audit-Log_timecho.md +++ b/src/zh/UserGuide/latest/User-Manual/Audit-Log_timecho.md @@ -38,6 +38,23 @@ 通过编辑配置文件 `iotdb-system.properties` 中如下参数来启动审计日志功能。 +* V2.0.8.1 + + +| 参数名称 | 参数描述 | 数据类型 | 默认值 | 生效方式 | +|-----------------------------------|------------------------------------------------------------------------------------------------------| ---------- | ------------------------ | ---------- | +| `enable_audit_log` | 是否开启审计日志。 true:启用。false:禁用。 | Boolean | false | 热加载 | +| `auditable_operation_type` | 操作类型选择。 DML :所有 DML 都会记录审计日志; DDL :所有 DDL 都会记录审计日志; QUERY :所有 QUERY 都会记录审计日志; CONTROL:所有控制语句都会记录审计日志; | String | DML,DDL,QUERY,CONTROL | 热加载 | +| `auditable_operation_level` | 权限级别选择。 global :记录全部的审计日志; object:仅针对数据实例的事件的审计日志会被记录; 包含关系:object < global。 例如:设置为 global 时,所有审计日志正常记录;设置为 object 时,仅记录对具体数据实例的操作。 | String | global | 热加载 | +| `auditable_operation_result` | 审计结果选择。 success:只记录成功事件的审计日志; fail:只记录失败事件的审计日志; | String | success, fail | 热加载 | +| `audit_log_ttl_in_days` | 审计日志的 TTL,生成审计日志的时间达到该阈值后过期。 | Double | -1.0(永远不会被删除) | 热加载 | +| `audit_log_space_tl_in_GB` | 审计日志的 SpaceTL,审计日志总空间达到该阈值后开始轮转删除。 | Double | 1.0| 热加载| +| `audit_log_batch_interval_in_ms` | 审计日志批量写入的时间间隔 | Long | 1000 | 热加载 | +| `audit_log_batch_max_queue_bytes` | 用于批量处理审计日志的队列最大字节数。当队列大小超过此值时,后续的写入操作将被阻塞。 | Long | 268435456 | 热加载 | + + +* V2.0.8.2 + | 参数名称 | 参数描述 | 数据类型 | 默认值 | 生效方式 | |-----------------------------------|------------------------------------------------------------------------------------------------------| ---------- | ------------------------ | ---------- | | `enable_audit_log` | 是否开启审计日志。 true:启用。false:禁用。 | Boolean | false | 热加载 | diff --git a/src/zh/UserGuide/latest/User-Manual/Authority-Management_timecho.md b/src/zh/UserGuide/latest/User-Manual/Authority-Management_timecho.md index dbd0cfe67..2e4c5c21e 100644 --- a/src/zh/UserGuide/latest/User-Manual/Authority-Management_timecho.md +++ b/src/zh/UserGuide/latest/User-Manual/Authority-Management_timecho.md @@ -79,7 +79,7 @@ IoTDB 主要有两类权限:序列权限、全局权限。 | READ_DATA | 允许读取授权路径下的序列数据。 | | WRITE_DATA | 允许读取授权路径下的序列数据。
允许插入、删除授权路径下的的序列数据。
允许在授权路径下导入、加载数据,在导入数据时,需要拥有对应路径的 WRITE_DATA 权限,在自动创建数据库与序列时,需要有 MANAGE_DATABASE 与 WRITE_SCHEMA 权限。 | | READ_SCHEMA | 允许获取授权路径下元数据树的详细信息:
包括:路径下的数据库、子路径、子节点、设备、序列、模版、视图等。 | -| WRITE_SCHEMA | 允许获取授权路径下元数据树的详细信息。
允许在授权路径下对序列、模版、视图等进行创建、删除、修改操作。
在创建或修改 view 的时候,会检查 view 路径的 WRITE_SCHEMA 权限、数据源的 READ_SCHEMA 权限。
在对 view 进行查询、插入时,会检查 view 路径的 READ_DATA 权限、WRITE_DATA 权限。
允许在授权路径下设置、取消、查看TTL。
允许在授权路径下挂载或者接触挂载模板。
允许在授权路径下对序列进行全路径名称的修改操作。//V2.0.8 起支持该功能 | +| WRITE_SCHEMA | 允许获取授权路径下元数据树的详细信息。
允许在授权路径下对序列、模版、视图等进行创建、删除、修改操作。
在创建或修改 view 的时候,会检查 view 路径的 WRITE_SCHEMA 权限、数据源的 READ_SCHEMA 权限。
在对 view 进行查询、插入时,会检查 view 路径的 READ_DATA 权限、WRITE_DATA 权限。
允许在授权路径下设置、取消、查看TTL。
允许在授权路径下挂载或者接触挂载模板。
允许在授权路径下对序列进行全路径名称的修改操作。//V2.0.8.2 起支持该功能 | ### 3.2 全局权限 diff --git a/src/zh/UserGuide/latest/User-Manual/Data-Sync_timecho.md b/src/zh/UserGuide/latest/User-Manual/Data-Sync_timecho.md index 7bbd46954..dd371b9cf 100644 --- a/src/zh/UserGuide/latest/User-Manual/Data-Sync_timecho.md +++ b/src/zh/UserGuide/latest/User-Manual/Data-Sync_timecho.md @@ -606,7 +606,7 @@ pipe_all_sinks_rate_limit_bytes_per_second=-1 | mode.streaming | 此参数指定时序数据写入的捕获来源。适用于 `mode.streaming`为 `false` 模式下的场景,决定`inclusion`中`data.insert`数据的捕获来源。提供两种捕获策略:true: 动态选择捕获的类型。系统将根据下游处理速度,自适应地选择是捕获每个写入请求还是仅捕获 TsFile 文件的封口请求。当下游处理速度快时,优先捕获写入请求以减少延迟;当处理速度慢时,仅捕获文件封口请求以避免处理堆积。这种模式适用于大多数场景,能够实现处理延迟和吞吐量的最优平衡。false:固定按批捕获方式。仅捕获 TsFile 文件的封口请求,适用于资源受限的应用场景,以降低系统负载。注意,pipe 启动时捕获的快照数据只会以文件的方式供下游处理。 | Boolean: true / false | 否 | true | | mode.strict | 在使用 time / path / database-name / table-name 参数过滤数据时,是否需要严格按照条件筛选:`true`: 严格筛选。系统将完全按照给定条件过滤筛选被捕获的数据,确保只有符合条件的数据被选中。`false`:非严格筛选。系统在筛选被捕获的数据时可能会包含一些额外的数据,适用于性能敏感的场景,可降低 CPU 和 IO 消耗。 | Boolean: true / false | 否 | true | | mode.snapshot | 此参数决定时序数据的捕获方式,影响`inclusion`中的`data`数据。提供两种模式:`true`:静态数据捕获。启动 pipe 时,会进行一次性的数据快照捕获。当快照数据被完全消费后,**pipe 将自动终止(DROP PIPE SQL 会自动执行)**。`false`:动态数据捕获。除了在 pipe 启动时捕获快照数据外,还会持续捕获后续的数据变更。pipe 将持续运行以处理动态数据流。 | Boolean: true / false | 否 | false | -| path | 当用户连接指定的sql_dialect为tree时可以指定。对于升级上来的用户pipe,默认sql_dialect为tree。此参数决定时序数据的捕获范围,影响 inclusion中的data数据,以及部分序列相关的元数据。当数据的树模型路径能够被path匹配时,数据会被筛选出来进入流处理pipe。
自 V2.0.8 版本起,该参数支持在一个pipe中填写多个精确路径的path , 如 `'path'='root.test.d0,s1,root.test.d0.s2,root.test.d0.s3'` | String:IoTDB标准的树路径模式,可以带通配符 | 选填 | root.** | +| path | 当用户连接指定的sql_dialect为tree时可以指定。对于升级上来的用户pipe,默认sql_dialect为tree。此参数决定时序数据的捕获范围,影响 inclusion中的data数据,以及部分序列相关的元数据。当数据的树模型路径能够被path匹配时,数据会被筛选出来进入流处理pipe。
自 V2.0.8.2 版本起,该参数支持在一个pipe中填写多个精确路径的path , 如 `'path'='root.test.d0,s1,root.test.d0.s2,root.test.d0.s3'` | String:IoTDB标准的树路径模式,可以带通配符 | 选填 | root.** | | start-time | 同步所有数据的开始 event time,包含 start-time | Long: [Long.MIN_VALUE, Long.MAX_VALUE] | 选填 | Long.MIN_VALUE | | end-time | 同步所有数据的结束 event time,包含 end-time | Long: [Long.MIN_VALUE, Long.MAX_VALUE] | 选填 | Long.MAX_VALUE | | forwarding-pipe-requests | 是否转发由其他 Pipe (通常是数据同步)写入的数据 | Boolean: true, false | 选填 | true |