diff --git a/inc/bench.h b/inc/bench.h index 99134772..275e07fc 100644 --- a/inc/bench.h +++ b/inc/bench.h @@ -1,1084 +1,1087 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the MIT license as published by the Free Software - * Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#ifndef INC_BENCH_H_ -#define INC_BENCH_H_ - -#define _GNU_SOURCE -#define CURL_STATICLIB -#define ALLOW_FORBID_FUNC - -#ifdef LINUX - -#ifndef _ALPINE -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#elif DARWIN -#include -#include -#include -#include -#include -#include -#include -#else -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#ifdef WEBSOCKET -#include -#endif - -#ifdef WINDOWS -#define _CRT_RAND_S -#include -#include -#define SHUT_WR SD_SEND - -typedef unsigned __int32 uint32_t; - -#pragma comment(lib, "ws2_32.lib") -// Some old MinGW/CYGWIN distributions don't define this: -#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING -#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 -#endif // ENABLE_VIRTUAL_TERMINAL_PROCESSING -#else -#define SOCKET_ERROR -1 -#endif - - -#ifndef TSDB_DATA_TYPE_VARCHAR -#define TSDB_DATA_TYPE_VARCHAR 8 -#endif - -#ifndef TSDB_DATA_TYPE_VARBINARY -#define TSDB_DATA_TYPE_VARBINARY 16 -#endif - -#ifndef TSDB_DATA_TYPE_DECIMAL -#define TSDB_DATA_TYPE_DECIMAL 17 -#endif - -#ifndef TSDB_DATA_TYPE_MEDIUMBLOB -#define TSDB_DATA_TYPE_MEDIUMBLOB 19 -#endif - -#ifndef TSDB_DATA_TYPE_MAX -#define TSDB_DATA_TYPE_MAX 20 -#endif - -#define REQ_EXTRA_BUF_LEN 1024 -#define RESP_BUF_LEN 4096 -#define SHORT_1K_SQL_BUFF_LEN 1024 -#define URL_BUFF_LEN 1024 - -#define STR_INSERT_INTO "INSERT INTO " - -// 16*MAX_COLUMNS + (192+32)*2 + insert into -#define HEAD_BUFF_LEN (TSDB_MAX_COLUMNS * 24) - -#define FETCH_BUFFER_SIZE (100 * TSDB_MAX_ALLOWED_SQL_LEN) -#define COND_BUF_LEN (TSDB_MAX_ALLOWED_SQL_LEN - 30) - -#define OPT_ABORT 1 /* –abort */ -#define MAX_RECORDS_PER_REQ 65536 -#define DEFAULT_START_TIME 1500000000000 -#define TELNET_TCP_PORT 6046 -#define INT_BUFF_LEN 12 -#define BIGINT_BUFF_LEN 21 -#define SMALLINT_BUFF_LEN 8 -#define TINYINT_BUFF_LEN 6 -#define BOOL_BUFF_LEN 6 -#define FLOAT_BUFF_LEN 22 -#define DOUBLE_BUFF_LEN 42 -#define JSON_BUFF_LEN 20 -#define TIMESTAMP_BUFF_LEN 21 -#define PRINT_STAT_INTERVAL 30 * 1000 - -#define MAX_QUERY_SQL_COUNT 100 - -#define MAX_JSON_BUFF 6400000 - -#define INPUT_BUF_LEN 256 -#define EXTRA_SQL_LEN 256 -#define DATATYPE_BUFF_LEN (TINY_BUFF_LEN * 3) -#define SML_MAX_BATCH 65536 * 32 -#define DEFAULT_NTHREADS 8 - -#define DEFAULT_CHILDTABLES 10000 -#define DEFAULT_PORT 6030 -#define DEFAULT_REST_PORT 6041 -#define DEFAULT_DATABASE "test" -#define DEFAULT_TB_PREFIX "d" -#define DEFAULT_OUTPUT "./output.txt" -#define DEFAULT_BINWIDTH 64 -#define DEFAULT_REPLICA 1 -#define DEFAULT_CFGNAME_LEN 10 -#define DEFAULT_PREPARED_RAND 10000 -#define DEFAULT_REQ_PER_REQ 30000 -#define DEFAULT_INSERT_ROWS 10000 -#define DEFAULT_DISORDER_RANGE 1000 -#define DEFAULT_CREATE_BATCH 10 -#define DEFAULT_SUB_INTERVAL 10000 -#define DEFAULT_QUERY_INTERVAL 10000 -#define BARRAY_MIN_SIZE 8 -#define SML_LINE_SQL_SYNTAX_OFFSET 7 - -// tdengine define macro -#define TSDB_DEFAULT_DURATION_PER_FILE (10 * 1440) - -#define TS_COL_NAME "ts" -#define RD(max) ((max ==0)? 1 : (taosRandom() % (max))) -#define SML_JSON_TAOS_FORMAT 255 - - - -#define BENCH_FILE \ - "(**IMPORTANT**) Set JSON configuration file " \ - "(all options are going to read from this JSON file), " \ - "which is mutually exclusive with other commandline options. " \ - "You can find examples from official repository. " -#define BENCH_CFG_DIR "Configuration directory." -#define BENCH_HOST \ - "Specify FQDN to connect server, default is localhost." -#define BENCH_PORT \ - "The TCP/IP port number to use for the connection, default is 6030." -#define BENCH_MODE \ - "insert mode, default is taosc, options: taosc|rest|stmt|sml" -#define BENCH_USER \ - "The user name to use when connecting to the server, default is root." -#define BENCH_PASS \ - "The password to use when connecting to the server, default is taosdata." -#define BENCH_OUTPUT "The path of result output file, default is ./output.txt." -#define BENCH_THREAD "The number of thread when insert data, default is 8." -#define BENCH_INTERVAL \ - "Insert interval for interlace mode in milliseconds, default is 0." -#define BENCH_STEP "Timestamp step in milliseconds, default is 1." -#define BENCH_SUPPLEMENT \ - "Supplementally insert data without create " \ - "database and table, optional, default is off." -#define BENCH_START_TIMESTAMP \ - "Specify timestamp to insert data. Optional, " \ - "default is 1500000000000 (2017-07-14 10:40:00.000)." -#define BENCH_INTERLACE \ - "The number of interlace rows insert into tables, default is 0." -#define BENCH_BATCH \ - "Number of records in each insert request, default is 30000." -#define BENCH_TABLE "Number of child tables, default is 10000." -#define BENCH_ROWS "Number of records for each table, default is 10000." -#define BENCH_DATABASE "Name of database, default is test." -#define BENCH_COLS_NUM \ - "Number of INT data type columns in table, default is 0." -#define BENCH_PARTIAL_COL_NUM \ - "Specify first numbers of columns has data. " \ - "Rest of columns' data are NULL. Default is all columns have data" -#define BENCH_TAGS "Data type of tables' tags, default is INT,BINARY(16)." -#define BENCH_COLS "Data type of tables' cols, default is FLOAT,INT,FLOAT." -#define BENCH_WIDTH \ - "The default length of nchar and binary if not specified, default is 64." -#define BENCH_PREFIX "Prefix of child table name, default is d." -#define BENCH_ESCAPE \ - "Use escape character in stable and child table name, optional." -#define BENCH_CHINESE \ - "Nchar and binary are basic unicode chinese characters, optional." -#define BENCH_NORMAL "Only create normal table without super table, optional." -#define BENCH_RANDOM "Data source is randomly generated, optional." -#define BENCH_AGGR "Query aggregation function after insertion, optional." -#define BENCH_YES "Pass confirmation prompt to continue, optional." -#define BENCH_RANGE "Range of disordered timestamp, default is 1000." -#define BENCH_DISORDER \ - "Ratio of inserting data with disorder timestamp, default is 0." -#define BENCH_REPLICA \ - "The number of replica when create database, default is 1." -#define BENCH_DEBUG "Debug mode, optional." -#define BENCH_PERFORMANCE "Performance mode, optional." -#define BENCH_PREPARE "Random data source size, default is 10000." -#define BENCH_VGROUPS "Specify Vgroups number for creating database, " \ - "only valid with daemon version 3.0+" -#define BENCH_VERSION "Print program version." -#define BENCH_KEEPTRYING "Keep trying if failed to insert, default is no." -#define BENCH_TRYING_INTERVAL \ - "Specify interval between keep trying insert. " \ - "Valid value is a positive number. Only valid " \ - "when keep trying be enabled." - -#ifdef WEBSOCKET -#define BENCH_DSN "The dsn to connect the cloud service." -#define BENCH_TIMEOUT \ - "The timeout wait on websocket query in seconds, default is 10." -#endif - - -#define debugPrint(fmt, ...) \ - do { \ - if (g_arguments->debug_print) { \ - struct tm Tm, *ptm; \ - struct timeval timeSecs; \ - time_t curTime; \ - toolsGetTimeOfDay(&timeSecs); \ - curTime = timeSecs.tv_sec; \ - ptm = toolsLocalTime(&curTime, &Tm); \ - fprintf(stdout, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ - ptm->tm_mon + 1, \ - ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ - (int32_t)timeSecs.tv_usec); \ - fprintf(stdout, "DEBG: "); \ - fprintf(stdout, "%s(%d) ", __FILE__, __LINE__); \ - fprintf(stdout, "" fmt, __VA_ARGS__); \ - } \ - } while (0) - -#define debugPrintWithLen(fmt, len, ...) \ - do { \ - if (g_arguments->debug_print) { \ - struct tm Tm, *ptm; \ - struct timeval timeSecs; \ - time_t curTime; \ - toolsGetTimeOfDay(&timeSecs); \ - curTime = timeSecs.tv_sec; \ - ptm = toolsLocalTime(&curTime, &Tm); \ - fnprintf(stdout, len, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ - ptm->tm_mon + 1, \ - ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ - (int32_t)timeSecs.tv_usec); \ - fprintf(stdout, "DEBG: "); \ - fprintf(stdout, "%s(%d) ", __FILE__, __LINE__); \ - fprintf(stdout, "" fmt, __VA_ARGS__); \ - } \ - } while (0) - -#define debugPrintJsonNoTime(json) \ - do { \ - if (g_arguments->debug_print) { \ - char *out = tools_cJSON_PrintUnformatted(json); \ - fprintf(stdout, "JSON: %s\n", out); \ - free(out); \ - } \ - } while (0) - -#define debugPrintNoTimestamp(fmt, ...) \ - do { \ - if (g_arguments->debug_print) { \ - fprintf(stdout, "" fmt, __VA_ARGS__); \ - } \ - } while (0) - -#define infoPrintNoTimestamp(fmt, ...) \ - do { \ - fprintf(stdout, "" fmt, __VA_ARGS__); \ - } while (0) - -#define infoPrintNoTimestampToFile(fp, fmt, ...) \ - do { \ - fprintf(fp, "" fmt, __VA_ARGS__); \ - } while (0) - -#define infoPrint(fmt, ...) \ - do { \ - struct tm Tm, *ptm; \ - struct timeval timeSecs; \ - time_t curTime; \ - toolsGetTimeOfDay(&timeSecs); \ - curTime = timeSecs.tv_sec; \ - ptm = toolsLocalTime(&curTime, &Tm); \ - fprintf(stdout, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ - ptm->tm_mon + 1, \ - ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ - (int32_t)timeSecs.tv_usec); \ - fprintf(stdout, "INFO: " fmt, __VA_ARGS__); \ - } while (0) - -#define infoPrintToFile(fp, fmt, ...) \ - do { \ - struct tm Tm, *ptm; \ - struct timeval timeSecs; \ - time_t curTime; \ - toolsGetTimeOfDay(&timeSecs); \ - curTime = timeSecs.tv_sec; \ - ptm = toolsLocalTime(&curTime, &Tm); \ - fprintf(fp, "[%02d/%02d %02d:%02d:%02d.%06d] ", ptm->tm_mon + 1, \ - ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ - (int32_t)timeSecs.tv_usec); \ - fprintf(fp, "INFO: " fmt, __VA_ARGS__); \ - } while (0) - -#define perfPrint(fmt, ...) \ - do { \ - if (g_arguments->performance_print) { \ - struct tm Tm, *ptm; \ - struct timeval timeSecs; \ - time_t curTime; \ - toolsGetTimeOfDay(&timeSecs); \ - curTime = timeSecs.tv_sec; \ - ptm = toolsLocalTime(&curTime, &Tm); \ - fprintf(stderr, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ - ptm->tm_mon + 1, \ - ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ - (int32_t)timeSecs.tv_usec); \ - fprintf(stderr, "PERF: " fmt, __VA_ARGS__); \ - if (g_arguments->fpOfInsertResult && !g_arguments->terminate) { \ - fprintf(g_arguments->fpOfInsertResult, \ - "[%02d/%02d %02d:%02d:%02d.%06d] ", \ - ptm->tm_mon + 1, \ - ptm->tm_mday, ptm->tm_hour, ptm->tm_min, \ - ptm->tm_sec, \ - (int32_t)timeSecs.tv_usec); \ - fprintf(g_arguments->fpOfInsertResult, "PERF: "); \ - fprintf(g_arguments->fpOfInsertResult, \ - "" fmt, __VA_ARGS__); \ - } \ - } \ - } while (0) - -#define errorPrint(fmt, ...) \ - do { \ - struct tm Tm, *ptm; \ - struct timeval timeSecs; \ - time_t curTime; \ - toolsGetTimeOfDay(&timeSecs); \ - curTime = timeSecs.tv_sec; \ - ptm = toolsLocalTime(&curTime, &Tm); \ - fprintf(stderr, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ - ptm->tm_mon + 1, \ - ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ - (int32_t)timeSecs.tv_usec); \ - fprintf(stderr, "\033[31m"); \ - fprintf(stderr, "ERROR: "); \ - if (g_arguments->debug_print) { \ - fprintf(stderr, "%s(%d) ", __FILE__, __LINE__); \ - } \ - fprintf(stderr, "" fmt, __VA_ARGS__); \ - fprintf(stderr, "\033[0m"); \ - if (g_arguments->fpOfInsertResult && !g_arguments->terminate) { \ - fprintf(g_arguments->fpOfInsertResult, \ - "[%02d/%02d %02d:%02d:%02d.%06d] ", ptm->tm_mon + 1, \ - ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ - (int32_t)timeSecs.tv_usec); \ - fprintf(g_arguments->fpOfInsertResult, "ERROR: "); \ - fprintf(g_arguments->fpOfInsertResult, "" fmt, __VA_ARGS__); \ - } \ - } while (0) - -#define warnPrint(fmt, ...) \ - do { \ - struct tm Tm, *ptm; \ - struct timeval timeSecs; \ - time_t curTime; \ - toolsGetTimeOfDay(&timeSecs); \ - curTime = timeSecs.tv_sec; \ - ptm = toolsLocalTime(&curTime, &Tm); \ - fprintf(stderr, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ - ptm->tm_mon + 1, \ - ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ - (int32_t)timeSecs.tv_usec); \ - fprintf(stderr, "\033[33m"); \ - fprintf(stderr, "WARN: "); \ - if (g_arguments->debug_print) { \ - fprintf(stderr, "%s(%d) ", __FILE__, __LINE__); \ - } \ - fprintf(stderr, "" fmt, __VA_ARGS__); \ - fprintf(stderr, "\033[0m"); \ - if (g_arguments->fpOfInsertResult && !g_arguments->terminate) { \ - fprintf(g_arguments->fpOfInsertResult, \ - "[%02d/%02d %02d:%02d:%02d.%06d] ", ptm->tm_mon + 1, \ - ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ - (int32_t)timeSecs.tv_usec); \ - fprintf(g_arguments->fpOfInsertResult, "WARN: "); \ - fprintf(g_arguments->fpOfInsertResult, "" fmt, __VA_ARGS__); \ - } \ - } while (0) - -#define succPrint(fmt, ...) \ - do { \ - struct tm Tm, *ptm; \ - struct timeval timeSecs; \ - time_t curTime; \ - toolsGetTimeOfDay(&timeSecs); \ - curTime = timeSecs.tv_sec; \ - ptm = toolsLocalTime(&curTime, &Tm); \ - fprintf(stderr, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ - ptm->tm_mon + 1, \ - ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ - (int32_t)timeSecs.tv_usec); \ - fprintf(stderr, "\033[32m"); \ - fprintf(stderr, "SUCC: "); \ - if (g_arguments->debug_print) { \ - fprintf(stderr, "%s(%d) ", __FILE__, __LINE__); \ - } \ - fprintf(stderr, "" fmt, __VA_ARGS__); \ - fprintf(stderr, "\033[0m"); \ - if (g_arguments->fpOfInsertResult && !g_arguments->terminate) { \ - fprintf(g_arguments->fpOfInsertResult, \ - "[%02d/%02d %02d:%02d:%02d.%06d] ", ptm->tm_mon + 1, \ - ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ - (int32_t)timeSecs.tv_usec); \ - fprintf(g_arguments->fpOfInsertResult, "SUCC: "); \ - fprintf(g_arguments->fpOfInsertResult, "" fmt, __VA_ARGS__); \ - } \ - } while (0) - -enum TEST_MODE { - INSERT_TEST, // 0 - QUERY_TEST, // 1 - SUBSCRIBE_TEST, // 2 -}; - -enum enumSYNC_MODE { SYNC_MODE, ASYNC_MODE, MODE_BUT }; - -enum enum_TAOS_INTERFACE { - TAOSC_IFACE, - REST_IFACE, - STMT_IFACE, - SML_IFACE, - SML_REST_IFACE, - INTERFACE_BUT -}; - -typedef enum enumQUERY_CLASS { - SPECIFIED_CLASS, - STABLE_CLASS, - CLASS_BUT -} QUERY_CLASS; - -enum _show_db_index { - TSDB_SHOW_DB_NAME_INDEX, - TSDB_SHOW_DB_CREATED_TIME_INDEX, - TSDB_SHOW_DB_NTABLES_INDEX, - TSDB_SHOW_DB_VGROUPS_INDEX, - TSDB_SHOW_DB_REPLICA_INDEX, - TSDB_SHOW_DB_QUORUM_INDEX, - TSDB_SHOW_DB_DAYS_INDEX, - TSDB_SHOW_DB_KEEP_INDEX, - TSDB_SHOW_DB_CACHE_INDEX, - TSDB_SHOW_DB_BLOCKS_INDEX, - TSDB_SHOW_DB_MINROWS_INDEX, - TSDB_SHOW_DB_MAXROWS_INDEX, - TSDB_SHOW_DB_WALLEVEL_INDEX, - TSDB_SHOW_DB_FSYNC_INDEX, - TSDB_SHOW_DB_COMP_INDEX, - TSDB_SHOW_DB_CACHELAST_INDEX, - TSDB_SHOW_DB_PRECISION_INDEX, - TSDB_SHOW_DB_UPDATE_INDEX, - TSDB_SHOW_DB_STATUS_INDEX, - TSDB_MAX_SHOW_DB -}; - -// -----------------------------------------SHOW TABLES CONFIGURE -// ------------------------------------- - -enum _describe_table_index { - TSDB_DESCRIBE_METRIC_FIELD_INDEX, - TSDB_DESCRIBE_METRIC_TYPE_INDEX, - TSDB_DESCRIBE_METRIC_LENGTH_INDEX, - TSDB_DESCRIBE_METRIC_NOTE_INDEX, - TSDB_MAX_DESCRIBE_METRIC -}; - -typedef struct BArray { - size_t size; - uint64_t capacity; - uint64_t elemSize; - void* pData; -} BArray; - -typedef struct { - uint64_t magic; - uint64_t custom; - uint64_t len; - uint64_t cap; - char data[]; -} dstr; - -static const int DS_HEADER_SIZE = sizeof(uint64_t) * 4; -static const uint64_t MAGIC_NUMBER = 0xDCDC52545344DADA; - -static const int OFF_MAGIC = -4; -static const int OFF_CUSTOM = -3; -static const int OFF_LEN = -2; -static const int OFF_CAP = -1; - -typedef struct SStmtData { - void *data; - char *is_null; -} StmtData; - -typedef struct SChildField { - StmtData stmtData; -} ChildField; - -typedef struct SField { - uint8_t type; - char name[TSDB_COL_NAME_LEN + 1]; - uint32_t length; - bool none; - bool null; - StmtData stmtData; - int64_t max; - int64_t min; - tools_cJSON * values; - bool sma; -} Field; - -typedef struct STSMA { - char* name; - char* func; - char* interval; - char* sliding; - int start_when_inserted; - char* custom; - bool done; -} TSMA; - -// generate row data rule -#define RULE_OLD 0 // old generator method -#define RULE_MIX_RANDOM 1 // old data mix update delete ratio -#define RULE_MIX_ALL 2 // mix with all var data -#define RULE_MIX_TS_CALC 3 // ts calc other column -#define RULE_MIX_FIX_VALUE 4 // fixed value with give - -// define suit -#define SUIT_DATAPOS_MEM 1 -#define SUIT_DATAPOS_STT 2 -#define SUIT_DATAPOS_FILE 3 -#define SUIT_DATAPOS_MUL_FILE 4 -#define SUIT_DATAPOS_MIX 5 - -enum CONTINUE_IF_FAIL_MODE { - NO_IF_FAILED, // 0 - YES_IF_FAILED, // 1 - SMART_IF_FAILED, // 2 -}; - -typedef struct SChildTable_S { - char name[TSDB_TABLE_NAME_LEN]; - bool useOwnSample; - char *sampleDataBuf; - uint64_t insertRows; - BArray *childCols; -} SChildTable; - -typedef struct SSuperTable_S { - char *stbName; - bool random_data_source; // rand_gen or sample - bool use_metric; - char *childTblPrefix; - char *childTblSample; - bool childTblExists; - uint64_t childTblCount; - uint64_t batchTblCreatingNum; // 0: no batch, > 0: batch table number in - char *batchTblCreatingNumbers; // NULL: no numbers - BArray *batchTblCreatingNumbersArray; - char *batchTblCreatingIntervals; // NULL: no interval - BArray *batchTblCreatingIntervalsArray; - // one sql - bool autoTblCreating; - uint16_t iface; // 0: taosc, 1: rest, 2: stmt - uint16_t lineProtocol; - int64_t childTblLimit; - int64_t childTblOffset; - int64_t childTblFrom; - int64_t childTblTo; - enum CONTINUE_IF_FAIL_MODE continueIfFail; - - // int multiThreadWriteOneTbl; // 0: no, 1: yes - uint32_t interlaceRows; // - int disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms, us or ns. according to database precision - - // ratio - uint8_t disRatio; // disorder ratio 0 ~ 100 % - uint8_t updRatio; // update ratio 0 ~ 100 % - uint8_t delRatio; // delete ratio 0 ~ 100 % - - // range - uint64_t disRange; // disorder range - uint64_t updRange; // update range - uint64_t delRange; // delete range - - // generate row value rule see pre RULE_ define - uint8_t genRowRule; - - // data position - uint8_t dataPos; // see define DATAPOS_ - - uint32_t fillIntervalUpd; // fill Upd interval rows cnt - uint32_t fillIntervalDis; // fill Dis interval rows cnt - - // binary prefix - char *binaryPrefex; - // nchar prefix - char *ncharPrefex; - - // random write future time - bool useNow; - bool writeFuture; - int32_t durMinute; // passed database->durMinute - int32_t checkInterval; // check correct interval - - int64_t max_sql_len; - uint64_t insert_interval; - uint64_t insertRows; - uint64_t timestamp_step; - int64_t startTimestamp; - int64_t specifiedColumns; - char sampleFile[MAX_FILE_NAME_LEN]; - char tagsFile[MAX_FILE_NAME_LEN]; - uint32_t partialColNum; - char *partialColNameBuf; - BArray *cols; - BArray *tags; - BArray *tsmas; - SChildTable **childTblArray; - char *colsOfCreateChildTable; - uint32_t lenOfTags; - uint32_t lenOfCols; - - char *sampleDataBuf; - bool useSampleTs; - char *tagDataBuf; - bool tcpTransfer; - bool non_stop; - char *comment; - int delay; - int file_factor; - char *rollup; - char *max_delay; - char *watermark; - int ttl; - int32_t keep_trying; - uint32_t trying_interval; -} SSuperTable; - -typedef struct SDbCfg_S { - char* name; - char* valuestring; - int valueint; -} SDbCfg; - -typedef struct SSTREAM_S { - char stream_name[TSDB_TABLE_NAME_LEN]; - char stream_stb[TSDB_TABLE_NAME_LEN]; - char stream_stb_field[TSDB_DEFAULT_PKT_SIZE]; - char stream_tag_field[TSDB_DEFAULT_PKT_SIZE]; - char subtable[TSDB_DEFAULT_PKT_SIZE]; - char trigger_mode[BIGINT_BUFF_LEN]; - char watermark[BIGINT_BUFF_LEN]; - char ignore_expired[BIGINT_BUFF_LEN]; - char ignore_update[BIGINT_BUFF_LEN]; - char fill_history[BIGINT_BUFF_LEN]; - char source_sql[TSDB_DEFAULT_PKT_SIZE]; - bool drop; -} SSTREAM; - -#ifdef TD_VER_COMPATIBLE_3_0_0_0 -typedef struct SVGroup_S { - int32_t vgId; - uint64_t tbCountPerVgId; - SChildTable **childTblArray; - uint64_t tbOffset; // internal use -} SVGroup; -#endif // TD_VER_COMPATIBLE_3_0_0_0 - // -typedef struct SDataBase_S { - char * dbName; - bool drop; // 0: use exists, 1: if exists, drop then new create - int precision; - int sml_precision; - int durMinute; // duration minutes - BArray *cfgs; - BArray *superTbls; -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - int32_t vgroups; - BArray *vgArray; -#endif // TD_VER_COMPATIBLE_3_0_0_0 - bool flush; -} SDataBase; - -typedef struct SSQL_S { - char *command; - char result[MAX_FILE_NAME_LEN]; - int64_t* delay_list; -} SSQL; - -typedef struct SpecifiedQueryInfo_S { - uint64_t queryInterval; // 0: unlimited > 0 loop/s - uint64_t queryTimes; - uint32_t concurrent; - uint32_t asyncMode; // 0: sync, 1: async - uint64_t subscribeInterval; // ms - uint64_t subscribeTimes; // ms - bool subscribeRestart; - int subscribeKeepProgress; - BArray* sqls; - int resubAfterConsume[MAX_QUERY_SQL_COUNT]; - int endAfterConsume[MAX_QUERY_SQL_COUNT]; - TAOS_SUB *tsub[MAX_QUERY_SQL_COUNT]; - char topic[MAX_QUERY_SQL_COUNT][32]; - int consumed[MAX_QUERY_SQL_COUNT]; - TAOS_RES *res[MAX_QUERY_SQL_COUNT]; - uint64_t totalQueried; - bool mixed_query; -} SpecifiedQueryInfo; - -typedef struct SuperQueryInfo_S { - char stbName[TSDB_TABLE_NAME_LEN]; - uint64_t queryInterval; // 0: unlimited > 0 loop/s - uint64_t queryTimes; - uint32_t threadCnt; - uint32_t asyncMode; // 0: sync, 1: async - uint64_t subscribeInterval; // ms - uint64_t subscribeTimes; // ms - bool subscribeRestart; - int subscribeKeepProgress; - int64_t childTblCount; - int sqlCount; - char sql[MAX_QUERY_SQL_COUNT][TSDB_MAX_ALLOWED_SQL_LEN + 1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; - int resubAfterConsume; - int endAfterConsume; - TAOS_SUB *tsub[MAX_QUERY_SQL_COUNT]; - char ** childTblName; - uint64_t totalQueried; -} SuperQueryInfo; - -typedef struct SQueryMetaInfo_S { - SpecifiedQueryInfo specifiedQueryInfo; - SuperQueryInfo superQueryInfo; - uint64_t totalQueried; - uint64_t query_times; - uint64_t killQueryThreshold; - int32_t killQueryInterval; - uint64_t response_buffer; - bool reset_query_cache; - uint16_t iface; - char* dbName; -} SQueryMetaInfo; - - -typedef struct SConsumerInfo_S { - uint32_t concurrent; - uint32_t pollDelay; // ms - char* groupId; - char* clientId; - char* autoOffsetReset; - - char* enableManualCommit; - char* enableAutoCommit; - uint32_t autoCommitIntervalMs; // ms - char* enableHeartbeatBackground; - char* snapshotEnable; - char* msgWithTableName; - char* rowsFile; - int32_t expectRows; - - char topicName[MAX_QUERY_SQL_COUNT][256]; - char topicSql[MAX_QUERY_SQL_COUNT][256]; - int topicCount; -} SConsumerInfo; - -typedef struct STmqMetaInfo_S { - SConsumerInfo consumerInfo; - uint16_t iface; -} STmqMetaInfo; - -typedef struct SArguments_S { - uint8_t taosc_version; - char * metaFile; - int32_t test_mode; - char * host; - uint16_t port; - uint16_t telnet_tcp_port; - bool host_auto; - bool port_auto; - bool port_inputted; - bool cfg_inputted; - char * user; - char * password; - bool answer_yes; - bool debug_print; - bool performance_print; - bool chinese; - char * output_file; - uint32_t binwidth; - uint32_t intColumnCount; - uint32_t nthreads; - bool nthreads_auto; - uint32_t table_threads; - uint64_t prepared_rand; - uint32_t reqPerReq; - uint64_t insert_interval; - bool demo_mode; - bool aggr_func; - struct sockaddr_in serv_addr; - uint64_t totalChildTables; - uint64_t actualChildTables; - uint64_t autoCreatedChildTables; - uint64_t existedChildTables; - FILE * fpOfInsertResult; - BArray * databases; - BArray* streams; - char base64_buf[INPUT_BUF_LEN]; -#ifdef LINUX - sem_t cancelSem; -#endif - bool terminate; - bool in_prompt; -#ifdef WEBSOCKET - int32_t timeout; - char* dsn; - bool websocket; -#endif - bool supplementInsert; - int64_t startTimestamp; - int32_t partialColNum; - int32_t keep_trying; - uint32_t trying_interval; - int iface; - int rest_server_ver_major; - bool check_sql; - int suit; // see define SUIT_ -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - int16_t inputted_vgroups; -#endif - enum CONTINUE_IF_FAIL_MODE continueIfFail; - bool mistMode; - bool escape_character; -} SArguments; - -typedef struct SBenchConn { - TAOS* taos; - TAOS* ctaos; // check taos - TAOS_STMT* stmt; -#ifdef WEBSOCKET - WS_TAOS* taos_ws; - WS_STMT* stmt_ws; -#endif -} SBenchConn; - -#define MAX_BATCOLS 256 -typedef struct SThreadInfo_S { - SBenchConn *conn; - uint64_t *bind_ts; - uint64_t *bind_ts_array; - char *bindParams; - char *is_null; - uint32_t threadID; - uint64_t start_table_from; - uint64_t end_table_to; - uint64_t ntables; - uint64_t tables_created; - char * buffer; - uint64_t counter; - uint64_t st; - uint64_t et; - uint64_t samplePos; - uint64_t totalInsertRows; - uint64_t totalQueried; - int64_t totalDelay; - uint64_t querySeq; - TAOS_SUB *tsub; - char ** lines; - uint32_t line_buf_len; - int32_t sockfd; - SDataBase *dbInfo; - SSuperTable *stbInfo; - char **sml_tags; - tools_cJSON *json_array; - tools_cJSON *sml_json_tags; - char **sml_tags_json_array; - char **sml_json_value_array; - uint64_t start_time; - uint64_t max_sql_len; - FILE *fp; - char filePath[MAX_PATH_LEN]; - BArray* delayList; - uint64_t *query_delay_list; - double avg_delay; -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - SVGroup *vg; -#endif - - int posOfTblCreatingBatch; - int posOfTblCreatingInterval; - // new - uint16_t batCols[MAX_BATCOLS]; - uint16_t nBatCols; // valid count for array batCols - - // check sql result - char *csql; - int32_t clen; // csql current write position -} threadInfo; - -typedef struct SQueryThreadInfo_S { - int start_sql; - int end_sql; - int threadId; - BArray* query_delay_list; - int sockfd; - SBenchConn* conn; - int64_t total_delay; -} queryThreadInfo; - -typedef struct STSmaThreadInfo_S { - char* dbName; - char* stbName; - BArray* tsmas; -} tsmaThreadInfo; - -typedef void (*ToolsSignalHandler)(int signum, void *sigInfo, void *context); - -/* ************ Global variables ************ */ -extern char * g_aggreFuncDemo[]; -extern char * g_aggreFunc[]; -extern SArguments * g_arguments; -extern SQueryMetaInfo g_queryInfo; -extern STmqMetaInfo g_tmqInfo; -extern bool g_fail; -extern char configDir[]; -extern tools_cJSON * root; -extern uint64_t g_memoryUsage; - -#define min(a, b) (((a) < (b)) ? (a) : (b)) -#define BARRAY_GET_ELEM(array, index) \ - ((void*)((char*)((array)->pData) + (index) * (array)->elemSize)) -/* ************ Function declares ************ */ -/* benchCommandOpt.c */ -int32_t benchParseArgs(int32_t argc, char* argv[]); -void modifyArgument(); -void initArgument(); -void queryAggrFunc(); -void parseFieldDatatype(char *dataType, BArray *fields, bool isTag); -/* demoJsonOpt.c */ -int getInfoFromJsonFile(); -/* demoUtil.c */ -int compare(const void *a, const void *b); -void encodeAuthBase64(); -void replaceChildTblName(char *inSql, char *outSql, int tblIndex); -void setupForAnsiEscape(void); -void resetAfterAnsiEscape(void); -char * convertDatatypeToString(int type); -int convertStringToDatatype(char *type, int length); -unsigned int taosRandom(); -void tmfree(void *buf); -void tmfclose(FILE *fp); -void fetchResult(TAOS_RES *res, threadInfo *pThreadInfo); -void prompt(bool NonStopMode); -void ERROR_EXIT(const char *msg); -int getServerVersionRest(int16_t rest_port); -int postProceSql(char *sqlstr, char* dbName, int precision, int iface, - int protocol, uint16_t rest_port, bool tcp, - int sockfd, char* filePath); -int queryDbExecCall(SBenchConn *conn, char *command); -int queryDbExecRest(char *command, char* dbName, int precision, - int iface, int protocol, bool tcp, int sockfd); -SBenchConn* initBenchConn(); -void closeBenchConn(SBenchConn* conn); -int regexMatch(const char *s, const char *reg, int cflags); -int convertHostToServAddr(char *host, uint16_t port, - struct sockaddr_in *serv_addr); -int getAllChildNameOfSuperTable(TAOS *taos, char *dbName, char *stbName, - char ** childTblNameOfSuperTbl, - int64_t childTblCountOfSuperTbl); -void* benchCalloc(size_t nmemb, size_t size, bool record); -BArray* benchArrayInit(size_t size, size_t elemSize); -void* benchArrayPush(BArray* pArray, void* pData); -void* benchArrayDestroy(BArray* pArray); -void benchArrayClear(BArray* pArray); -void* benchArrayGet(const BArray* pArray, size_t index); -void* benchArrayAddBatch(BArray* pArray, void* pData, int32_t elems); - -#ifdef LINUX -int32_t bsem_wait(sem_t* sem); -void benchSetSignal(int32_t signum, ToolsSignalHandler sigfp); -#endif - -int convertTypeToLength(uint8_t type); -int64_t convertDatatypeToDefaultMax(uint8_t type); -int64_t convertDatatypeToDefaultMin(uint8_t type); - -// dynamic string -char* new_ds(size_t size); -void free_ds(char** ps); -int is_ds(const char* s); -uint64_t ds_custom(const char* s); -void ds_set_custom(char* s, uint64_t custom); -uint64_t ds_len(const char* s); -uint64_t ds_cap(const char* s); -int ds_last(char* s); -char* ds_end(char* s); -char* ds_grow(char**ps, size_t needsize); -char* ds_resize(char** ps, size_t cap); -char * ds_pack(char **ps); -char * ds_add_char(char **ps, char c); -char * ds_add_str(char **ps, const char* sub); -char * ds_add_strs(char **ps, int count, ...); -char * ds_ins_str(char **ps, size_t pos, const char *sub, size_t len); - -int insertTestProcess(); -void postFreeResource(); -int queryTestProcess(); -int subscribeTestProcess(); -int convertServAddr(int iface, bool tcp, int protocol); -int createSockFd(); -void destroySockFd(int sockfd); - -void printVersion(); -int32_t benchParseSingleOpt(int32_t key, char* arg); - -void printErrCmdCodeStr(char *cmd, int32_t code, TAOS_RES *res); -void printWarnCmdCodeStr(char *cmd, int32_t code, TAOS_RES *res); - -#ifndef LINUX -int32_t benchParseArgsNoArgp(int argc, char* argv[]); -#endif - -int32_t execInsert(threadInfo *pThreadInfo, uint32_t k); - -#endif // INC_BENCH_H_ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the MIT license as published by the Free Software + * Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef INC_BENCH_H_ +#define INC_BENCH_H_ + +#define _GNU_SOURCE +#define CURL_STATICLIB +#define ALLOW_FORBID_FUNC + +#ifdef LINUX + +#ifndef _ALPINE +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#elif DARWIN +#include +#include +#include +#include +#include +#include +#include +#else +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef WEBSOCKET +#include +#endif + +#ifdef WINDOWS +#define _CRT_RAND_S +#include +#include +#define SHUT_WR SD_SEND + +typedef unsigned __int32 uint32_t; + +#pragma comment(lib, "ws2_32.lib") +// Some old MinGW/CYGWIN distributions don't define this: +#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING +#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 +#endif // ENABLE_VIRTUAL_TERMINAL_PROCESSING +#else +#define SOCKET_ERROR -1 +#endif + + +#ifndef TSDB_DATA_TYPE_VARCHAR +#define TSDB_DATA_TYPE_VARCHAR 8 +#endif + +#ifndef TSDB_DATA_TYPE_VARBINARY +#define TSDB_DATA_TYPE_VARBINARY 16 +#endif + +#ifndef TSDB_DATA_TYPE_DECIMAL +#define TSDB_DATA_TYPE_DECIMAL 17 +#endif + +#ifndef TSDB_DATA_TYPE_MEDIUMBLOB +#define TSDB_DATA_TYPE_MEDIUMBLOB 19 +#endif + +#ifndef TSDB_DATA_TYPE_MAX +#define TSDB_DATA_TYPE_MAX 20 +#endif + +#define REQ_EXTRA_BUF_LEN 1024 +#define RESP_BUF_LEN 4096 +#define SHORT_1K_SQL_BUFF_LEN 1024 +#define URL_BUFF_LEN 1024 + +#define STR_INSERT_INTO "INSERT INTO " + +// 16*MAX_COLUMNS + (192+32)*2 + insert into +#define HEAD_BUFF_LEN (TSDB_MAX_COLUMNS * 24) + +#define FETCH_BUFFER_SIZE (100 * TSDB_MAX_ALLOWED_SQL_LEN) +#define COND_BUF_LEN (TSDB_MAX_ALLOWED_SQL_LEN - 30) + +#define OPT_ABORT 1 /* –abort */ +#define MAX_RECORDS_PER_REQ 65536 +#define DEFAULT_START_TIME 1500000000000 +#define TELNET_TCP_PORT 6046 +#define INT_BUFF_LEN 12 +#define BIGINT_BUFF_LEN 21 +#define SMALLINT_BUFF_LEN 8 +#define TINYINT_BUFF_LEN 6 +#define BOOL_BUFF_LEN 6 +#define FLOAT_BUFF_LEN 22 +#define DOUBLE_BUFF_LEN 42 +#define JSON_BUFF_LEN 20 +#define TIMESTAMP_BUFF_LEN 21 +#define PRINT_STAT_INTERVAL 30 * 1000 + +#define MAX_QUERY_SQL_COUNT 100 + +#define MAX_JSON_BUFF 6400000 + +#define INPUT_BUF_LEN 256 +#define EXTRA_SQL_LEN 256 +#define DATATYPE_BUFF_LEN (TINY_BUFF_LEN * 3) +#define SML_MAX_BATCH 65536 * 32 +#define DEFAULT_NTHREADS 8 + +#define DEFAULT_CHILDTABLES 10000 +#define DEFAULT_PORT 6030 +#define DEFAULT_REST_PORT 6041 +#define DEFAULT_DATABASE "test" +#define DEFAULT_TB_PREFIX "d" +#define DEFAULT_OUTPUT "./output.txt" +#define DEFAULT_BINWIDTH 64 +#define DEFAULT_REPLICA 1 +#define DEFAULT_CFGNAME_LEN 10 +#define DEFAULT_PREPARED_RAND 10000 +#define DEFAULT_REQ_PER_REQ 30000 +#define DEFAULT_INSERT_ROWS 10000 +#define DEFAULT_DISORDER_RANGE 1000 +#define DEFAULT_CREATE_BATCH 10 +#define DEFAULT_SUB_INTERVAL 10000 +#define DEFAULT_QUERY_INTERVAL 10000 +#define BARRAY_MIN_SIZE 8 +#define SML_LINE_SQL_SYNTAX_OFFSET 7 + +// tdengine define macro +#define TSDB_DEFAULT_DURATION_PER_FILE (10 * 1440) + +#define TS_COL_NAME "ts" +#define RD(max) ((max ==0)? 1 : (taosRandom() % (max))) +#define SML_JSON_TAOS_FORMAT 255 + + + +#define BENCH_FILE \ + "(**IMPORTANT**) Set JSON configuration file " \ + "(all options are going to read from this JSON file), " \ + "which is mutually exclusive with other commandline options. " \ + "You can find examples from official repository. " +#define BENCH_CFG_DIR "Configuration directory." +#define BENCH_HOST \ + "Specify FQDN to connect server, default is localhost." +#define BENCH_PORT \ + "The TCP/IP port number to use for the connection, default is 6030." +#define BENCH_MODE \ + "insert mode, default is taosc, options: taosc|rest|stmt|sml" +#define BENCH_USER \ + "The user name to use when connecting to the server, default is root." +#define BENCH_PASS \ + "The password to use when connecting to the server, default is taosdata." +#define BENCH_OUTPUT "The path of result output file, default is ./output.txt." +#define BENCH_THREAD "The number of thread when insert data, default is 8." +#define BENCH_INTERVAL \ + "Insert interval for interlace mode in milliseconds, default is 0." +#define BENCH_STEP "Timestamp step in milliseconds, default is 1." +#define BENCH_SUPPLEMENT \ + "Supplementally insert data without create " \ + "database and table, optional, default is off." +#define BENCH_START_TIMESTAMP \ + "Specify timestamp to insert data. Optional, " \ + "default is 1500000000000 (2017-07-14 10:40:00.000)." +#define BENCH_INTERLACE \ + "The number of interlace rows insert into tables, default is 0." +#define BENCH_BATCH \ + "Number of records in each insert request, default is 30000." +#define BENCH_TABLE "Number of child tables, default is 10000." +#define BENCH_ROWS "Number of records for each table, default is 10000." +#define BENCH_DATABASE "Name of database, default is test." +#define BENCH_COLS_NUM \ + "Number of INT data type columns in table, default is 0." +#define BENCH_PARTIAL_COL_NUM \ + "Specify first numbers of columns has data. " \ + "Rest of columns' data are NULL. Default is all columns have data" +#define BENCH_TAGS "Data type of tables' tags, default is INT,BINARY(16)." +#define BENCH_COLS "Data type of tables' cols, default is FLOAT,INT,FLOAT." +#define BENCH_WIDTH \ + "The default length of nchar and binary if not specified, default is 64." +#define BENCH_PREFIX "Prefix of child table name, default is d." +#define BENCH_ESCAPE \ + "Use escape character in stable and child table name, optional." +#define BENCH_CHINESE \ + "Nchar and binary are basic unicode chinese characters, optional." +#define BENCH_NORMAL "Only create normal table without super table, optional." +#define BENCH_RANDOM "Data source is randomly generated, optional." +#define BENCH_AGGR "Query aggregation function after insertion, optional." +#define BENCH_YES "Pass confirmation prompt to continue, optional." +#define BENCH_RANGE "Range of disordered timestamp, default is 1000." +#define BENCH_DISORDER \ + "Ratio of inserting data with disorder timestamp, default is 0." +#define BENCH_REPLICA \ + "The number of replica when create database, default is 1." +#define BENCH_DEBUG "Debug mode, optional." +#define BENCH_PERFORMANCE "Performance mode, optional." +#define BENCH_PREPARE "Random data source size, default is 10000." +#define BENCH_VGROUPS "Specify Vgroups number for creating database, " \ + "only valid with daemon version 3.0+" +#define BENCH_VERSION "Print program version." +#define BENCH_KEEPTRYING "Keep trying if failed to insert, default is no." +#define BENCH_TRYING_INTERVAL \ + "Specify interval between keep trying insert. " \ + "Valid value is a positive number. Only valid " \ + "when keep trying be enabled." + +#ifdef WEBSOCKET +#define BENCH_DSN "The dsn to connect the cloud service." +#define BENCH_TIMEOUT \ + "The timeout wait on websocket query in seconds, default is 10." +#endif + + +#define debugPrint(fmt, ...) \ + do { \ + if (g_arguments->debug_print) { \ + struct tm Tm, *ptm; \ + struct timeval timeSecs; \ + time_t curTime; \ + toolsGetTimeOfDay(&timeSecs); \ + curTime = timeSecs.tv_sec; \ + ptm = toolsLocalTime(&curTime, &Tm); \ + fprintf(stdout, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ + ptm->tm_mon + 1, \ + ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ + (int32_t)timeSecs.tv_usec); \ + fprintf(stdout, "DEBG: "); \ + fprintf(stdout, "%s(%d) ", __FILE__, __LINE__); \ + fprintf(stdout, "" fmt, __VA_ARGS__); \ + } \ + } while (0) + +#define debugPrintWithLen(fmt, len, ...) \ + do { \ + if (g_arguments->debug_print) { \ + struct tm Tm, *ptm; \ + struct timeval timeSecs; \ + time_t curTime; \ + toolsGetTimeOfDay(&timeSecs); \ + curTime = timeSecs.tv_sec; \ + ptm = toolsLocalTime(&curTime, &Tm); \ + fnprintf(stdout, len, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ + ptm->tm_mon + 1, \ + ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ + (int32_t)timeSecs.tv_usec); \ + fprintf(stdout, "DEBG: "); \ + fprintf(stdout, "%s(%d) ", __FILE__, __LINE__); \ + fprintf(stdout, "" fmt, __VA_ARGS__); \ + } \ + } while (0) + +#define debugPrintJsonNoTime(json) \ + do { \ + if (g_arguments->debug_print) { \ + char *out = tools_cJSON_PrintUnformatted(json); \ + fprintf(stdout, "JSON: %s\n", out); \ + free(out); \ + } \ + } while (0) + +#define debugPrintNoTimestamp(fmt, ...) \ + do { \ + if (g_arguments->debug_print) { \ + fprintf(stdout, "" fmt, __VA_ARGS__); \ + } \ + } while (0) + +#define infoPrintNoTimestamp(fmt, ...) \ + do { \ + fprintf(stdout, "" fmt, __VA_ARGS__); \ + } while (0) + +#define infoPrintNoTimestampToFile(fp, fmt, ...) \ + do { \ + fprintf(fp, "" fmt, __VA_ARGS__); \ + } while (0) + +#define infoPrint(fmt, ...) \ + do { \ + struct tm Tm, *ptm; \ + struct timeval timeSecs; \ + time_t curTime; \ + toolsGetTimeOfDay(&timeSecs); \ + curTime = timeSecs.tv_sec; \ + ptm = toolsLocalTime(&curTime, &Tm); \ + fprintf(stdout, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ + ptm->tm_mon + 1, \ + ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ + (int32_t)timeSecs.tv_usec); \ + fprintf(stdout, "INFO: " fmt, __VA_ARGS__); \ + } while (0) + +#define infoPrintToFile(fp, fmt, ...) \ + do { \ + struct tm Tm, *ptm; \ + struct timeval timeSecs; \ + time_t curTime; \ + toolsGetTimeOfDay(&timeSecs); \ + curTime = timeSecs.tv_sec; \ + ptm = toolsLocalTime(&curTime, &Tm); \ + fprintf(fp, "[%02d/%02d %02d:%02d:%02d.%06d] ", ptm->tm_mon + 1, \ + ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ + (int32_t)timeSecs.tv_usec); \ + fprintf(fp, "INFO: " fmt, __VA_ARGS__); \ + } while (0) + +#define perfPrint(fmt, ...) \ + do { \ + if (g_arguments->performance_print) { \ + struct tm Tm, *ptm; \ + struct timeval timeSecs; \ + time_t curTime; \ + toolsGetTimeOfDay(&timeSecs); \ + curTime = timeSecs.tv_sec; \ + ptm = toolsLocalTime(&curTime, &Tm); \ + fprintf(stderr, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ + ptm->tm_mon + 1, \ + ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ + (int32_t)timeSecs.tv_usec); \ + fprintf(stderr, "PERF: " fmt, __VA_ARGS__); \ + if (g_arguments->fpOfInsertResult && !g_arguments->terminate) { \ + fprintf(g_arguments->fpOfInsertResult, \ + "[%02d/%02d %02d:%02d:%02d.%06d] ", \ + ptm->tm_mon + 1, \ + ptm->tm_mday, ptm->tm_hour, ptm->tm_min, \ + ptm->tm_sec, \ + (int32_t)timeSecs.tv_usec); \ + fprintf(g_arguments->fpOfInsertResult, "PERF: "); \ + fprintf(g_arguments->fpOfInsertResult, \ + "" fmt, __VA_ARGS__); \ + } \ + } \ + } while (0) + +#define errorPrint(fmt, ...) \ + do { \ + struct tm Tm, *ptm; \ + struct timeval timeSecs; \ + time_t curTime; \ + toolsGetTimeOfDay(&timeSecs); \ + curTime = timeSecs.tv_sec; \ + ptm = toolsLocalTime(&curTime, &Tm); \ + fprintf(stderr, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ + ptm->tm_mon + 1, \ + ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ + (int32_t)timeSecs.tv_usec); \ + fprintf(stderr, "\033[31m"); \ + fprintf(stderr, "ERROR: "); \ + if (g_arguments->debug_print) { \ + fprintf(stderr, "%s(%d) ", __FILE__, __LINE__); \ + } \ + fprintf(stderr, "" fmt, __VA_ARGS__); \ + fprintf(stderr, "\033[0m"); \ + if (g_arguments->fpOfInsertResult && !g_arguments->terminate) { \ + fprintf(g_arguments->fpOfInsertResult, \ + "[%02d/%02d %02d:%02d:%02d.%06d] ", ptm->tm_mon + 1, \ + ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ + (int32_t)timeSecs.tv_usec); \ + fprintf(g_arguments->fpOfInsertResult, "ERROR: "); \ + fprintf(g_arguments->fpOfInsertResult, "" fmt, __VA_ARGS__); \ + } \ + } while (0) + +#define warnPrint(fmt, ...) \ + do { \ + struct tm Tm, *ptm; \ + struct timeval timeSecs; \ + time_t curTime; \ + toolsGetTimeOfDay(&timeSecs); \ + curTime = timeSecs.tv_sec; \ + ptm = toolsLocalTime(&curTime, &Tm); \ + fprintf(stderr, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ + ptm->tm_mon + 1, \ + ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ + (int32_t)timeSecs.tv_usec); \ + fprintf(stderr, "\033[33m"); \ + fprintf(stderr, "WARN: "); \ + if (g_arguments->debug_print) { \ + fprintf(stderr, "%s(%d) ", __FILE__, __LINE__); \ + } \ + fprintf(stderr, "" fmt, __VA_ARGS__); \ + fprintf(stderr, "\033[0m"); \ + if (g_arguments->fpOfInsertResult && !g_arguments->terminate) { \ + fprintf(g_arguments->fpOfInsertResult, \ + "[%02d/%02d %02d:%02d:%02d.%06d] ", ptm->tm_mon + 1, \ + ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ + (int32_t)timeSecs.tv_usec); \ + fprintf(g_arguments->fpOfInsertResult, "WARN: "); \ + fprintf(g_arguments->fpOfInsertResult, "" fmt, __VA_ARGS__); \ + } \ + } while (0) + +#define succPrint(fmt, ...) \ + do { \ + struct tm Tm, *ptm; \ + struct timeval timeSecs; \ + time_t curTime; \ + toolsGetTimeOfDay(&timeSecs); \ + curTime = timeSecs.tv_sec; \ + ptm = toolsLocalTime(&curTime, &Tm); \ + fprintf(stderr, "[%02d/%02d %02d:%02d:%02d.%06d] ", \ + ptm->tm_mon + 1, \ + ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ + (int32_t)timeSecs.tv_usec); \ + fprintf(stderr, "\033[32m"); \ + fprintf(stderr, "SUCC: "); \ + if (g_arguments->debug_print) { \ + fprintf(stderr, "%s(%d) ", __FILE__, __LINE__); \ + } \ + fprintf(stderr, "" fmt, __VA_ARGS__); \ + fprintf(stderr, "\033[0m"); \ + if (g_arguments->fpOfInsertResult && !g_arguments->terminate) { \ + fprintf(g_arguments->fpOfInsertResult, \ + "[%02d/%02d %02d:%02d:%02d.%06d] ", ptm->tm_mon + 1, \ + ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, \ + (int32_t)timeSecs.tv_usec); \ + fprintf(g_arguments->fpOfInsertResult, "SUCC: "); \ + fprintf(g_arguments->fpOfInsertResult, "" fmt, __VA_ARGS__); \ + } \ + } while (0) + +enum TEST_MODE { + INSERT_TEST, // 0 + QUERY_TEST, // 1 + SUBSCRIBE_TEST, // 2 +}; + +enum enumSYNC_MODE { SYNC_MODE, ASYNC_MODE, MODE_BUT }; + +enum enum_TAOS_INTERFACE { + TAOSC_IFACE, + REST_IFACE, + STMT_IFACE, + SML_IFACE, + SML_REST_IFACE, + INTERFACE_BUT +}; + +typedef enum enumQUERY_CLASS { + SPECIFIED_CLASS, + STABLE_CLASS, + CLASS_BUT +} QUERY_CLASS; + +enum _show_db_index { + TSDB_SHOW_DB_NAME_INDEX, + TSDB_SHOW_DB_CREATED_TIME_INDEX, + TSDB_SHOW_DB_NTABLES_INDEX, + TSDB_SHOW_DB_VGROUPS_INDEX, + TSDB_SHOW_DB_REPLICA_INDEX, + TSDB_SHOW_DB_QUORUM_INDEX, + TSDB_SHOW_DB_DAYS_INDEX, + TSDB_SHOW_DB_KEEP_INDEX, + TSDB_SHOW_DB_CACHE_INDEX, + TSDB_SHOW_DB_BLOCKS_INDEX, + TSDB_SHOW_DB_MINROWS_INDEX, + TSDB_SHOW_DB_MAXROWS_INDEX, + TSDB_SHOW_DB_WALLEVEL_INDEX, + TSDB_SHOW_DB_FSYNC_INDEX, + TSDB_SHOW_DB_COMP_INDEX, + TSDB_SHOW_DB_CACHELAST_INDEX, + TSDB_SHOW_DB_PRECISION_INDEX, + TSDB_SHOW_DB_UPDATE_INDEX, + TSDB_SHOW_DB_STATUS_INDEX, + TSDB_MAX_SHOW_DB +}; + +// -----------------------------------------SHOW TABLES CONFIGURE +// ------------------------------------- + +enum _describe_table_index { + TSDB_DESCRIBE_METRIC_FIELD_INDEX, + TSDB_DESCRIBE_METRIC_TYPE_INDEX, + TSDB_DESCRIBE_METRIC_LENGTH_INDEX, + TSDB_DESCRIBE_METRIC_NOTE_INDEX, + TSDB_MAX_DESCRIBE_METRIC +}; + +typedef struct BArray { + size_t size; + uint64_t capacity; + uint64_t elemSize; + void* pData; +} BArray; + +typedef struct { + uint64_t magic; + uint64_t custom; + uint64_t len; + uint64_t cap; + char data[]; +} dstr; + +static const int DS_HEADER_SIZE = sizeof(uint64_t) * 4; +static const uint64_t MAGIC_NUMBER = 0xDCDC52545344DADA; + +static const int OFF_MAGIC = -4; +static const int OFF_CUSTOM = -3; +static const int OFF_LEN = -2; +static const int OFF_CAP = -1; + +typedef struct SStmtData { + void *data; + char *is_null; +} StmtData; + +typedef struct SChildField { + StmtData stmtData; +} ChildField; + +typedef struct SField { + uint8_t type; + char name[TSDB_COL_NAME_LEN + 1]; + uint32_t length; + bool none; + bool null; + StmtData stmtData; + int64_t max; + int64_t min; + tools_cJSON * values; + bool sma; +} Field; + +typedef struct STSMA { + char* name; + char* func; + char* interval; + char* sliding; + int start_when_inserted; + char* custom; + bool done; +} TSMA; + +// generate row data rule +#define RULE_OLD 0 // old generator method +#define RULE_MIX_RANDOM 1 // old data mix update delete ratio +#define RULE_MIX_ALL 2 // mix with all var data +#define RULE_MIX_TS_CALC 3 // ts calc other column +#define RULE_MIX_FIX_VALUE 4 // fixed value with give + +// define suit +#define SUIT_DATAPOS_MEM 1 +#define SUIT_DATAPOS_STT 2 +#define SUIT_DATAPOS_FILE 3 +#define SUIT_DATAPOS_MUL_FILE 4 +#define SUIT_DATAPOS_MIX 5 + +enum CONTINUE_IF_FAIL_MODE { + NO_IF_FAILED, // 0 + YES_IF_FAILED, // 1 + SMART_IF_FAILED, // 2 +}; + +typedef struct SChildTable_S { + char name[TSDB_TABLE_NAME_LEN]; + bool useOwnSample; + char *sampleDataBuf; + uint64_t insertRows; + BArray *childCols; +} SChildTable; + +typedef struct SSuperTable_S { + char *stbName; + bool random_data_source; // rand_gen or sample + bool use_metric; + char *childTblPrefix; + char *childTblSample; + bool childTblExists; + uint64_t childTblCount; + uint64_t batchTblCreatingNum; // 0: no batch, > 0: batch table number in + char *batchTblCreatingNumbers; // NULL: no numbers + BArray *batchTblCreatingNumbersArray; + char *batchTblCreatingIntervals; // NULL: no interval + BArray *batchTblCreatingIntervalsArray; + // one sql + bool autoTblCreating; + uint16_t iface; // 0: taosc, 1: rest, 2: stmt + uint16_t lineProtocol; + int64_t childTblLimit; + int64_t childTblOffset; + int64_t childTblFrom; + int64_t childTblTo; + enum CONTINUE_IF_FAIL_MODE continueIfFail; + + // int multiThreadWriteOneTbl; // 0: no, 1: yes + uint32_t interlaceRows; // + int disorderRatio; // 0: no disorder, >0: x% + int disorderRange; // ms, us or ns. according to database precision + + // ratio + uint8_t disRatio; // disorder ratio 0 ~ 100 % + uint8_t updRatio; // update ratio 0 ~ 100 % + uint8_t delRatio; // delete ratio 0 ~ 100 % + + // range + uint64_t disRange; // disorder range + uint64_t updRange; // update range + uint64_t delRange; // delete range + + // generate row value rule see pre RULE_ define + uint8_t genRowRule; + + // data position + uint8_t dataPos; // see define DATAPOS_ + + uint32_t fillIntervalUpd; // fill Upd interval rows cnt + uint32_t fillIntervalDis; // fill Dis interval rows cnt + + // binary prefix + char *binaryPrefex; + // nchar prefix + char *ncharPrefex; + + // random write future time + bool useNow; + bool writeFuture; + int32_t durMinute; // passed database->durMinute + int32_t checkInterval; // check correct interval + + int64_t max_sql_len; + uint64_t insert_interval; + uint64_t insertRows; + uint64_t timestamp_step; + int64_t startTimestamp; + int64_t specifiedColumns; + char sampleFile[MAX_FILE_NAME_LEN]; + char tagsFile[MAX_FILE_NAME_LEN]; + uint32_t partialColNum; + char *partialColNameBuf; + BArray *cols; + BArray *tags; + BArray *tsmas; + SChildTable **childTblArray; + char *colsOfCreateChildTable; + uint32_t lenOfTags; + uint32_t lenOfCols; + + char *sampleDataBuf; + bool useSampleTs; + char *tagDataBuf; + bool tcpTransfer; + bool non_stop; + char *comment; + int delay; + int file_factor; + char *rollup; + char *max_delay; + char *watermark; + int ttl; + int32_t keep_trying; + uint32_t trying_interval; +} SSuperTable; + +typedef struct SDbCfg_S { + char* name; + char* valuestring; + int valueint; +} SDbCfg; + +typedef struct SSTREAM_S { + char stream_name[TSDB_TABLE_NAME_LEN]; + char stream_stb[TSDB_TABLE_NAME_LEN]; + char stream_stb_field[TSDB_DEFAULT_PKT_SIZE]; + char stream_tag_field[TSDB_DEFAULT_PKT_SIZE]; + char subtable[TSDB_DEFAULT_PKT_SIZE]; + char trigger_mode[BIGINT_BUFF_LEN]; + char watermark[BIGINT_BUFF_LEN]; + char ignore_expired[BIGINT_BUFF_LEN]; + char ignore_update[BIGINT_BUFF_LEN]; + char fill_history[BIGINT_BUFF_LEN]; + char source_sql[TSDB_DEFAULT_PKT_SIZE]; + bool drop; +} SSTREAM; + +#ifdef TD_VER_COMPATIBLE_3_0_0_0 +typedef struct SVGroup_S { + int32_t vgId; + uint64_t tbCountPerVgId; + SChildTable **childTblArray; + uint64_t tbOffset; // internal use +} SVGroup; +#endif // TD_VER_COMPATIBLE_3_0_0_0 + // +typedef struct SDataBase_S { + char * dbName; + bool drop; // 0: use exists, 1: if exists, drop then new create + int precision; + int sml_precision; + int durMinute; // duration minutes + BArray *cfgs; + BArray *superTbls; +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + int32_t vgroups; + BArray *vgArray; +#endif // TD_VER_COMPATIBLE_3_0_0_0 + bool flush; +} SDataBase; + +typedef struct SSQL_S { + char *command; + char result[MAX_FILE_NAME_LEN]; + int64_t* delay_list; +} SSQL; + +typedef struct SpecifiedQueryInfo_S { + uint64_t queryInterval; // 0: unlimited > 0 loop/s + uint64_t queryTimes; + uint32_t concurrent; + uint32_t asyncMode; // 0: sync, 1: async + uint64_t subscribeInterval; // ms + uint64_t subscribeTimes; // ms + bool subscribeRestart; + int subscribeKeepProgress; + BArray* sqls; + int resubAfterConsume[MAX_QUERY_SQL_COUNT]; + int endAfterConsume[MAX_QUERY_SQL_COUNT]; + TAOS_SUB *tsub[MAX_QUERY_SQL_COUNT]; + char topic[MAX_QUERY_SQL_COUNT][32]; + int consumed[MAX_QUERY_SQL_COUNT]; + TAOS_RES *res[MAX_QUERY_SQL_COUNT]; + uint64_t totalQueried; + bool mixed_query; +} SpecifiedQueryInfo; + +typedef struct SuperQueryInfo_S { + char stbName[TSDB_TABLE_NAME_LEN]; + uint64_t queryInterval; // 0: unlimited > 0 loop/s + uint64_t queryTimes; + uint32_t threadCnt; + uint32_t asyncMode; // 0: sync, 1: async + uint64_t subscribeInterval; // ms + uint64_t subscribeTimes; // ms + bool subscribeRestart; + int subscribeKeepProgress; + int64_t childTblCount; + int sqlCount; + char sql[MAX_QUERY_SQL_COUNT][TSDB_MAX_ALLOWED_SQL_LEN + 1]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; + int resubAfterConsume; + int endAfterConsume; + TAOS_SUB *tsub[MAX_QUERY_SQL_COUNT]; + char ** childTblName; + uint64_t totalQueried; +} SuperQueryInfo; + +typedef struct SQueryMetaInfo_S { + SpecifiedQueryInfo specifiedQueryInfo; + SuperQueryInfo superQueryInfo; + uint64_t totalQueried; + uint64_t query_times; + uint64_t killQueryThreshold; + int32_t killQueryInterval; + uint64_t response_buffer; + bool reset_query_cache; + uint16_t iface; + char* dbName; +} SQueryMetaInfo; + + +typedef struct SConsumerInfo_S { + uint32_t concurrent; + uint32_t pollDelay; // ms + char* groupId; + char* clientId; + char* autoOffsetReset; + + char* createMode; + char* groupMode; + + char* enableManualCommit; + char* enableAutoCommit; + uint32_t autoCommitIntervalMs; // ms + char* enableHeartbeatBackground; + char* snapshotEnable; + char* msgWithTableName; + char* rowsFile; + int32_t expectRows; + + char topicName[MAX_QUERY_SQL_COUNT][256]; + char topicSql[MAX_QUERY_SQL_COUNT][256]; + int topicCount; +} SConsumerInfo; + +typedef struct STmqMetaInfo_S { + SConsumerInfo consumerInfo; + uint16_t iface; +} STmqMetaInfo; + +typedef struct SArguments_S { + uint8_t taosc_version; + char * metaFile; + int32_t test_mode; + char * host; + uint16_t port; + uint16_t telnet_tcp_port; + bool host_auto; + bool port_auto; + bool port_inputted; + bool cfg_inputted; + char * user; + char * password; + bool answer_yes; + bool debug_print; + bool performance_print; + bool chinese; + char * output_file; + uint32_t binwidth; + uint32_t intColumnCount; + uint32_t nthreads; + bool nthreads_auto; + uint32_t table_threads; + uint64_t prepared_rand; + uint32_t reqPerReq; + uint64_t insert_interval; + bool demo_mode; + bool aggr_func; + struct sockaddr_in serv_addr; + uint64_t totalChildTables; + uint64_t actualChildTables; + uint64_t autoCreatedChildTables; + uint64_t existedChildTables; + FILE * fpOfInsertResult; + BArray * databases; + BArray* streams; + char base64_buf[INPUT_BUF_LEN]; +#ifdef LINUX + sem_t cancelSem; +#endif + bool terminate; + bool in_prompt; +#ifdef WEBSOCKET + int32_t timeout; + char* dsn; + bool websocket; +#endif + bool supplementInsert; + int64_t startTimestamp; + int32_t partialColNum; + int32_t keep_trying; + uint32_t trying_interval; + int iface; + int rest_server_ver_major; + bool check_sql; + int suit; // see define SUIT_ +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + int16_t inputted_vgroups; +#endif + enum CONTINUE_IF_FAIL_MODE continueIfFail; + bool mistMode; + bool escape_character; +} SArguments; + +typedef struct SBenchConn { + TAOS* taos; + TAOS* ctaos; // check taos + TAOS_STMT* stmt; +#ifdef WEBSOCKET + WS_TAOS* taos_ws; + WS_STMT* stmt_ws; +#endif +} SBenchConn; + +#define MAX_BATCOLS 256 +typedef struct SThreadInfo_S { + SBenchConn *conn; + uint64_t *bind_ts; + uint64_t *bind_ts_array; + char *bindParams; + char *is_null; + uint32_t threadID; + uint64_t start_table_from; + uint64_t end_table_to; + uint64_t ntables; + uint64_t tables_created; + char * buffer; + uint64_t counter; + uint64_t st; + uint64_t et; + uint64_t samplePos; + uint64_t totalInsertRows; + uint64_t totalQueried; + int64_t totalDelay; + uint64_t querySeq; + TAOS_SUB *tsub; + char ** lines; + uint32_t line_buf_len; + int32_t sockfd; + SDataBase *dbInfo; + SSuperTable *stbInfo; + char **sml_tags; + tools_cJSON *json_array; + tools_cJSON *sml_json_tags; + char **sml_tags_json_array; + char **sml_json_value_array; + uint64_t start_time; + uint64_t max_sql_len; + FILE *fp; + char filePath[MAX_PATH_LEN]; + BArray* delayList; + uint64_t *query_delay_list; + double avg_delay; +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + SVGroup *vg; +#endif + + int posOfTblCreatingBatch; + int posOfTblCreatingInterval; + // new + uint16_t batCols[MAX_BATCOLS]; + uint16_t nBatCols; // valid count for array batCols + + // check sql result + char *csql; + int32_t clen; // csql current write position +} threadInfo; + +typedef struct SQueryThreadInfo_S { + int start_sql; + int end_sql; + int threadId; + BArray* query_delay_list; + int sockfd; + SBenchConn* conn; + int64_t total_delay; +} queryThreadInfo; + +typedef struct STSmaThreadInfo_S { + char* dbName; + char* stbName; + BArray* tsmas; +} tsmaThreadInfo; + +typedef void (*ToolsSignalHandler)(int signum, void *sigInfo, void *context); + +/* ************ Global variables ************ */ +extern char * g_aggreFuncDemo[]; +extern char * g_aggreFunc[]; +extern SArguments * g_arguments; +extern SQueryMetaInfo g_queryInfo; +extern STmqMetaInfo g_tmqInfo; +extern bool g_fail; +extern char configDir[]; +extern tools_cJSON * root; +extern uint64_t g_memoryUsage; + +#define min(a, b) (((a) < (b)) ? (a) : (b)) +#define BARRAY_GET_ELEM(array, index) \ + ((void*)((char*)((array)->pData) + (index) * (array)->elemSize)) +/* ************ Function declares ************ */ +/* benchCommandOpt.c */ +int32_t benchParseArgs(int32_t argc, char* argv[]); +void modifyArgument(); +void initArgument(); +void queryAggrFunc(); +void parseFieldDatatype(char *dataType, BArray *fields, bool isTag); +/* demoJsonOpt.c */ +int getInfoFromJsonFile(); +/* demoUtil.c */ +int compare(const void *a, const void *b); +void encodeAuthBase64(); +void replaceChildTblName(char *inSql, char *outSql, int tblIndex); +void setupForAnsiEscape(void); +void resetAfterAnsiEscape(void); +char * convertDatatypeToString(int type); +int convertStringToDatatype(char *type, int length); +unsigned int taosRandom(); +void tmfree(void *buf); +void tmfclose(FILE *fp); +void fetchResult(TAOS_RES *res, threadInfo *pThreadInfo); +void prompt(bool NonStopMode); +void ERROR_EXIT(const char *msg); +int getServerVersionRest(int16_t rest_port); +int postProceSql(char *sqlstr, char* dbName, int precision, int iface, + int protocol, uint16_t rest_port, bool tcp, + int sockfd, char* filePath); +int queryDbExecCall(SBenchConn *conn, char *command); +int queryDbExecRest(char *command, char* dbName, int precision, + int iface, int protocol, bool tcp, int sockfd); +SBenchConn* initBenchConn(); +void closeBenchConn(SBenchConn* conn); +int regexMatch(const char *s, const char *reg, int cflags); +int convertHostToServAddr(char *host, uint16_t port, + struct sockaddr_in *serv_addr); +int getAllChildNameOfSuperTable(TAOS *taos, char *dbName, char *stbName, + char ** childTblNameOfSuperTbl, + int64_t childTblCountOfSuperTbl); +void* benchCalloc(size_t nmemb, size_t size, bool record); +BArray* benchArrayInit(size_t size, size_t elemSize); +void* benchArrayPush(BArray* pArray, void* pData); +void* benchArrayDestroy(BArray* pArray); +void benchArrayClear(BArray* pArray); +void* benchArrayGet(const BArray* pArray, size_t index); +void* benchArrayAddBatch(BArray* pArray, void* pData, int32_t elems); + +#ifdef LINUX +int32_t bsem_wait(sem_t* sem); +void benchSetSignal(int32_t signum, ToolsSignalHandler sigfp); +#endif + +int convertTypeToLength(uint8_t type); +int64_t convertDatatypeToDefaultMax(uint8_t type); +int64_t convertDatatypeToDefaultMin(uint8_t type); + +// dynamic string +char* new_ds(size_t size); +void free_ds(char** ps); +int is_ds(const char* s); +uint64_t ds_custom(const char* s); +void ds_set_custom(char* s, uint64_t custom); +uint64_t ds_len(const char* s); +uint64_t ds_cap(const char* s); +int ds_last(char* s); +char* ds_end(char* s); +char* ds_grow(char**ps, size_t needsize); +char* ds_resize(char** ps, size_t cap); +char * ds_pack(char **ps); +char * ds_add_char(char **ps, char c); +char * ds_add_str(char **ps, const char* sub); +char * ds_add_strs(char **ps, int count, ...); +char * ds_ins_str(char **ps, size_t pos, const char *sub, size_t len); + +int insertTestProcess(); +void postFreeResource(); +int queryTestProcess(); +int subscribeTestProcess(); +int convertServAddr(int iface, bool tcp, int protocol); +int createSockFd(); +void destroySockFd(int sockfd); + +void printVersion(); +int32_t benchParseSingleOpt(int32_t key, char* arg); + +void printErrCmdCodeStr(char *cmd, int32_t code, TAOS_RES *res); +void printWarnCmdCodeStr(char *cmd, int32_t code, TAOS_RES *res); + +#ifndef LINUX +int32_t benchParseArgsNoArgp(int argc, char* argv[]); +#endif + +int32_t execInsert(threadInfo *pThreadInfo, uint32_t k); + +#endif // INC_BENCH_H_ diff --git a/src/benchInsert.c b/src/benchInsert.c index 7d8921b1..adf66b69 100644 --- a/src/benchInsert.c +++ b/src/benchInsert.c @@ -1,3592 +1,3595 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the MIT license as published by the Free Software - * Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - */ - -#include -#include -#include - -#define FREE_PIDS_INFOS_RETURN_MINUS_1() \ - do { \ - tmfree(pids); \ - tmfree(infos); \ - return -1; \ - } while (0) - -#define FREE_RESOURCE() \ - do { \ - if (pThreadInfo->conn) \ - closeBenchConn(pThreadInfo->conn); \ - benchArrayDestroy(pThreadInfo->delayList); \ - tmfree(pids); \ - tmfree(infos); \ - } while (0) \ - -static int getSuperTableFromServerRest( - SDataBase* database, SSuperTable* stbInfo, char *command) { - - return -1; - // TODO(me): finish full implementation -#if 0 - int sockfd = createSockFd(); - if (sockfd < 0) { - return -1; - } - - int code = postProceSql(command, - database->dbName, - database->precision, - REST_IFACE, - 0, - g_arguments->port, - false, - sockfd, - NULL); - - destroySockFd(sockfd); -#endif // 0 -} - -static int getSuperTableFromServerTaosc( - SDataBase* database, SSuperTable* stbInfo, char *command) { -#ifdef WEBSOCKET - if (g_arguments->websocket) { - return -1; - } -#endif - TAOS_RES * res; - TAOS_ROW row = NULL; - SBenchConn* conn = initBenchConn(); - if (NULL == conn) { - return -1; - } - - res = taos_query(conn->taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - printWarnCmdCodeStr(command, code, res); - infoPrint("stable %s does not exist, will create one\n", - stbInfo->stbName); - closeBenchConn(conn); - return -1; - } - infoPrint("find stable<%s>, will get meta data from server\n", - stbInfo->stbName); - benchArrayClear(stbInfo->tags); - benchArrayClear(stbInfo->cols); - int count = 0; - while ((row = taos_fetch_row(res)) != NULL) { - if (count == 0) { - count++; - continue; - } - int32_t *lengths = taos_fetch_lengths(res); - if (lengths == NULL) { - errorPrint("%s", "failed to execute taos_fetch_length\n"); - taos_free_result(res); - closeBenchConn(conn); - return -1; - } - if (strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "tag", - strlen("tag")) == 0) { - Field* tag = benchCalloc(1, sizeof(Field), true); - benchArrayPush(stbInfo->tags, tag); - tag = benchArrayGet(stbInfo->tags, stbInfo->tags->size - 1); - tag->type = convertStringToDatatype( - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - lengths[TSDB_DESCRIBE_METRIC_TYPE_INDEX]); - tag->length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - tag->min = convertDatatypeToDefaultMin(tag->type); - tag->max = convertDatatypeToDefaultMax(tag->type); - tstrncpy(tag->name, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - lengths[TSDB_DESCRIBE_METRIC_FIELD_INDEX] + 1); - } else { - Field * col = benchCalloc(1, sizeof(Field), true); - benchArrayPush(stbInfo->cols, col); - col = benchArrayGet(stbInfo->cols, stbInfo->cols->size - 1); - col->type = convertStringToDatatype( - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - lengths[TSDB_DESCRIBE_METRIC_TYPE_INDEX]); - col->length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - col->min = convertDatatypeToDefaultMin(col->type); - col->max = convertDatatypeToDefaultMax(col->type); - tstrncpy(col->name, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - lengths[TSDB_DESCRIBE_METRIC_FIELD_INDEX] + 1); - } - } - taos_free_result(res); - closeBenchConn(conn); - return 0; -} - -static int getSuperTableFromServer(SDataBase* database, SSuperTable* stbInfo) { - int ret = 0; - - char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; - snprintf(command, SHORT_1K_SQL_BUFF_LEN, - "DESCRIBE `%s`.`%s`", database->dbName, - stbInfo->stbName); - - if (REST_IFACE == stbInfo->iface) { - ret = getSuperTableFromServerRest(database, stbInfo, command); - } else { - ret = getSuperTableFromServerTaosc(database, stbInfo, command); - } - - return ret; -} - -static int queryDbExec(SDataBase *database, - SSuperTable *stbInfo, char *command) { - int ret = 0; - if (REST_IFACE == stbInfo->iface) { - if (0 != convertServAddr(stbInfo->iface, false, 1)) { - errorPrint("%s", "Failed to convert server address\n"); - return -1; - } - int sockfd = createSockFd(); - if (sockfd < 0) { - ret = -1; - } else { - ret = queryDbExecRest(command, - database->dbName, - database->precision, - stbInfo->iface, - stbInfo->lineProtocol, - stbInfo->tcpTransfer, - sockfd); - destroySockFd(sockfd); - } - } else { - SBenchConn* conn = initBenchConn(); - if (NULL == conn) { - ret = -1; - } else { - ret = queryDbExecCall(conn, command); - int32_t trying = g_arguments->keep_trying; - while (ret && trying) { - infoPrint("will sleep %"PRIu32" milliseconds then re-create " - "supertable %s\n", - g_arguments->trying_interval, stbInfo->stbName); - toolsMsleep(g_arguments->trying_interval); - ret = queryDbExecCall(conn, command); - if (trying != -1) { - trying--; - } - } - if (0 != ret) { - errorPrint("create supertable %s failed!\n\n", - stbInfo->stbName); - ret = -1; - } - closeBenchConn(conn); - } - } - - return ret; -} - -#ifdef WEBSOCKET -static void dropSuperTable(SDataBase* database, SSuperTable* stbInfo) { - if (g_arguments->supplementInsert) { - return; - } - - char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; - snprintf(command, sizeof(command), - g_arguments->escape_character - ? "DROP TABLE `%s`.`%s`" - : "DROP TABLE %s.%s", - database->dbName, - stbInfo->stbName); - - infoPrint("drop stable: <%s>\n", command); - queryDbExec(database, stbInfo, command); - - return; -} -#endif // WEBSOCKET - -static int createSuperTable(SDataBase* database, SSuperTable* stbInfo) { - if (g_arguments->supplementInsert) { - return 0; - } - - uint32_t col_buffer_len = (TSDB_COL_NAME_LEN + 15) * stbInfo->cols->size; - char *colsBuf = benchCalloc(1, col_buffer_len, false); - char* command = benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, false); - int len = 0; - - for (int colIndex = 0; colIndex < stbInfo->cols->size; colIndex++) { - Field * col = benchArrayGet(stbInfo->cols, colIndex); - int n; - if (col->type == TSDB_DATA_TYPE_BINARY || - col->type == TSDB_DATA_TYPE_NCHAR) { - n = snprintf(colsBuf + len, col_buffer_len - len, - ",%s %s(%d)", col->name, - convertDatatypeToString(col->type), col->length); - } else { - n = snprintf(colsBuf + len, col_buffer_len - len, - ",%s %s", col->name, - convertDatatypeToString(col->type)); - } - if (n < 0 || n >= col_buffer_len - len) { - errorPrint("%s() LN%d, snprintf overflow on %d\n", - __func__, __LINE__, colIndex); - break; - } else { - len += n; - } - } - - // save for creating child table - stbInfo->colsOfCreateChildTable = - (char *)benchCalloc(len + TIMESTAMP_BUFF_LEN, 1, true); - - snprintf(stbInfo->colsOfCreateChildTable, len + TIMESTAMP_BUFF_LEN, - "(ts timestamp%s)", colsBuf); - - if (stbInfo->tags->size == 0) { - free(colsBuf); - free(command); - return 0; - } - - uint32_t tag_buffer_len = (TSDB_COL_NAME_LEN + 15) * stbInfo->tags->size; - char *tagsBuf = benchCalloc(1, tag_buffer_len, false); - int tagIndex; - len = 0; - - int n; - n = snprintf(tagsBuf + len, tag_buffer_len - len, "("); - if (n < 0 || n >= tag_buffer_len - len) { - errorPrint("%s() LN%d snprintf overflow\n", - __func__, __LINE__); - free(colsBuf); - free(command); - tmfree(tagsBuf); - return -1; - } else { - len += n; - } - for (tagIndex = 0; tagIndex < stbInfo->tags->size; tagIndex++) { - Field *tag = benchArrayGet(stbInfo->tags, tagIndex); - if (tag->type == TSDB_DATA_TYPE_BINARY || - tag->type == TSDB_DATA_TYPE_NCHAR) { - n = snprintf(tagsBuf + len, tag_buffer_len - len, - "%s %s(%d),", tag->name, - convertDatatypeToString(tag->type), tag->length); - } else if (tag->type == TSDB_DATA_TYPE_JSON) { - n = snprintf(tagsBuf + len, tag_buffer_len - len, - "%s json", tag->name); - if (n < 0 || n >= tag_buffer_len - len) { - errorPrint("%s() LN%d snprintf overflow on %d\n", - __func__, __LINE__, tagIndex); - break; - } else { - len += n; - } - goto skip; - } else { - n = snprintf(tagsBuf + len, tag_buffer_len - len, - "%s %s,", tag->name, - convertDatatypeToString(tag->type)); - } - - if (n < 0 || n >= tag_buffer_len - len) { - errorPrint("%s() LN%d snprintf overflow on %d\n", - __func__, __LINE__, tagIndex); - break; - } else { - len += n; - } - } - len -= 1; -skip: - snprintf(tagsBuf + len, tag_buffer_len - len, ")"); - - int length = snprintf( - command, TSDB_MAX_ALLOWED_SQL_LEN, - g_arguments->escape_character - ? "CREATE TABLE `%s`.`%s` (ts TIMESTAMP%s) TAGS %s" - : "CREATE TABLE %s.%s (ts TIMESTAMP%s) TAGS %s", - database->dbName, stbInfo->stbName, colsBuf, tagsBuf); - tmfree(colsBuf); - tmfree(tagsBuf); - if (stbInfo->comment != NULL) { - length += snprintf(command + length, TSDB_MAX_ALLOWED_SQL_LEN - length, - " COMMENT '%s'", stbInfo->comment); - } - if (stbInfo->delay >= 0) { - length += snprintf(command + length, - TSDB_MAX_ALLOWED_SQL_LEN - length, " DELAY %d", - stbInfo->delay); - } - if (stbInfo->file_factor >= 0) { - length += - snprintf(command + length, - TSDB_MAX_ALLOWED_SQL_LEN - length, " FILE_FACTOR %f", - (float)stbInfo->file_factor / 100); - } - if (stbInfo->rollup != NULL) { - length += snprintf(command + length, - TSDB_MAX_ALLOWED_SQL_LEN - length, - " ROLLUP(%s)", stbInfo->rollup); - } - - if (stbInfo->max_delay != NULL) { - length += snprintf(command + length, - TSDB_MAX_ALLOWED_SQL_LEN - length, - " MAX_DELAY %s", stbInfo->max_delay); - } - - if (stbInfo->watermark != NULL) { - length += snprintf(command + length, - TSDB_MAX_ALLOWED_SQL_LEN - length, - " WATERMARK %s", stbInfo->watermark); - } - - if (stbInfo->ttl != 0) { - length += snprintf(command + length, - TSDB_MAX_ALLOWED_SQL_LEN - length, - " TTL %d", stbInfo->ttl); - } - - bool first_sma = true; - for (int i = 0; i < stbInfo->cols->size; i++) { - Field * col = benchArrayGet(stbInfo->cols, i); - if (col->sma) { - if (first_sma) { - n = snprintf(command + length, - TSDB_MAX_ALLOWED_SQL_LEN - length, - " SMA(%s", col->name); - first_sma = false; - } else { - n = snprintf(command + length, - TSDB_MAX_ALLOWED_SQL_LEN - length, - ",%s", col->name); - } - - if (n < 0 || n > TSDB_MAX_ALLOWED_SQL_LEN - length) { - errorPrint("%s() LN%d snprintf overflow on %d iteral\n", - __func__, __LINE__, i); - break; - } else { - length += n; - } - } - } - if (!first_sma) { - snprintf(command + length, TSDB_MAX_ALLOWED_SQL_LEN - length, ")"); - } - infoPrint("create stable: <%s>\n", command); - - int ret = queryDbExec(database, stbInfo, command); - free(command); - return ret; -} - -#ifdef TD_VER_COMPATIBLE_3_0_0_0 -int32_t getVgroupsOfDb(SBenchConn *conn, SDataBase *database) { - int vgroups = 0; - char cmd[SHORT_1K_SQL_BUFF_LEN] = "\0"; - - snprintf(cmd, SHORT_1K_SQL_BUFF_LEN, - g_arguments->escape_character - ? "USE `%s`" - : "USE %s", - database->dbName); - - int32_t code; - TAOS_RES *res = NULL; - - res = taos_query(conn->taos, cmd); - code = taos_errno(res); - if (code) { - printErrCmdCodeStr(cmd, code, res); - return -1; - } - taos_free_result(res); - - snprintf(cmd, SHORT_1K_SQL_BUFF_LEN, "SHOW VGROUPS"); - res = taos_query(conn->taos, cmd); - code = taos_errno(res); - if (code) { - printErrCmdCodeStr(cmd, code, res); - return -1; - } - - TAOS_ROW row = NULL; - while ((row = taos_fetch_row(res)) != NULL) { - vgroups++; - } - debugPrint("%s() LN%d, vgroups: %d\n", __func__, __LINE__, vgroups); - taos_free_result(res); - - database->vgroups = vgroups; - database->vgArray = benchArrayInit(vgroups, sizeof(SVGroup)); - for (int32_t v = 0; (v < vgroups - && !g_arguments->terminate); v++) { - SVGroup *vg = benchCalloc(1, sizeof(SVGroup), true); - benchArrayPush(database->vgArray, vg); - } - - res = taos_query(conn->taos, cmd); - code = taos_errno(res); - if (code) { - printErrCmdCodeStr(cmd, code, res); - return -1; - } - - int32_t vgItem = 0; - while (((row = taos_fetch_row(res)) != NULL) - && !g_arguments->terminate) { - SVGroup *vg = benchArrayGet(database->vgArray, vgItem); - vg->vgId = *(int32_t*)row[0]; - vgItem++; - } - taos_free_result(res); - - return vgroups; -} -#endif // TD_VER_COMPATIBLE_3_0_0_0 - -int geneDbCreateCmd(SDataBase *database, char *command, int remainVnodes) { - int dataLen = 0; - int n; -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - if (g_arguments->nthreads_auto || (-1 != g_arguments->inputted_vgroups)) { - n = snprintf(command + dataLen, SHORT_1K_SQL_BUFF_LEN - dataLen, - g_arguments->escape_character - ? "CREATE DATABASE IF NOT EXISTS `%s` VGROUPS %d" - : "CREATE DATABASE IF NOT EXISTS %s VGROUPS %d", - database->dbName, - (-1 != g_arguments->inputted_vgroups)? - g_arguments->inputted_vgroups: - min(remainVnodes, toolsGetNumberOfCores())); - } else { - n = snprintf(command + dataLen, SHORT_1K_SQL_BUFF_LEN - dataLen, - g_arguments->escape_character - ? "CREATE DATABASE IF NOT EXISTS `%s`" - : "CREATE DATABASE IF NOT EXISTS %s", - database->dbName); - } -#else - n = snprintf(command + dataLen, SHORT_1K_SQL_BUFF_LEN - dataLen, - g_arguments->escape_character - ? "CREATE DATABASE IF NOT EXISTS `%s`" - : "CREATE DATABASE IF NOT EXISTS %s", database->dbName); -#endif // TD_VER_COMPATIBLE_3_0_0_0 - if (n < 0 || n >= SHORT_1K_SQL_BUFF_LEN - dataLen) { - errorPrint("%s() LN%d snprintf overflow\n", - __func__, __LINE__); - return -1; - } else { - dataLen += n; - } - - if (database->cfgs) { - for (int i = 0; i < database->cfgs->size; i++) { - SDbCfg* cfg = benchArrayGet(database->cfgs, i); - if (cfg->valuestring) { - n = snprintf(command + dataLen, - TSDB_MAX_ALLOWED_SQL_LEN - dataLen, - " %s %s", cfg->name, cfg->valuestring); - } else { - n = snprintf(command + dataLen, - TSDB_MAX_ALLOWED_SQL_LEN - dataLen, - " %s %d", cfg->name, cfg->valueint); - } - if (n < 0 || n >= TSDB_MAX_ALLOWED_SQL_LEN - dataLen) { - errorPrint("%s() LN%d snprintf overflow on %d\n", - __func__, __LINE__, i); - break; - } else { - dataLen += n; - } - } - } - - switch (database->precision) { - case TSDB_TIME_PRECISION_MILLI: - snprintf(command + dataLen, TSDB_MAX_ALLOWED_SQL_LEN - dataLen, - " PRECISION \'ms\';"); - break; - case TSDB_TIME_PRECISION_MICRO: - snprintf(command + dataLen, TSDB_MAX_ALLOWED_SQL_LEN - dataLen, - " PRECISION \'us\';"); - break; - case TSDB_TIME_PRECISION_NANO: - snprintf(command + dataLen, TSDB_MAX_ALLOWED_SQL_LEN - dataLen, - " PRECISION \'ns\';"); - break; - } - - return dataLen; -} - -int createDatabaseRest(SDataBase* database) { - int32_t code = 0; - char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; - - int sockfd = createSockFd(); - if (sockfd < 0) { - return -1; - } - - snprintf(command, SHORT_1K_SQL_BUFF_LEN, - g_arguments->escape_character - ? "DROP DATABASE IF EXISTS `%s`;" - : "DROP DATABASE IF EXISTS %s;", - database->dbName); - code = postProceSql(command, - database->dbName, - database->precision, - REST_IFACE, - 0, - g_arguments->port, - false, - sockfd, - NULL); - if (code != 0) { - errorPrint("Failed to drop database %s\n", database->dbName); - } else { - int remainVnodes = INT_MAX; - geneDbCreateCmd(database, command, remainVnodes); - code = postProceSql(command, - database->dbName, - database->precision, - REST_IFACE, - 0, - g_arguments->port, - false, - sockfd, - NULL); - int32_t trying = g_arguments->keep_trying; - while (code && trying) { - infoPrint("will sleep %"PRIu32" milliseconds then " - "re-create database %s\n", - g_arguments->trying_interval, database->dbName); - toolsMsleep(g_arguments->trying_interval); - code = postProceSql(command, - database->dbName, - database->precision, - REST_IFACE, - 0, - g_arguments->port, - false, - sockfd, - NULL); - if (trying != -1) { - trying--; - } - } - } - destroySockFd(sockfd); - return code; -} - -int32_t getRemainVnodes(SBenchConn *conn) { - int remainVnodes = 0; - char command[SHORT_1K_SQL_BUFF_LEN] = "SHOW DNODES"; - - TAOS_RES *res = taos_query(conn->taos, command); - int32_t code = taos_errno(res); - if (code) { - printErrCmdCodeStr(command, code, res); - closeBenchConn(conn); - return -1; - } - TAOS_ROW row = NULL; - while ((row = taos_fetch_row(res)) != NULL) { - remainVnodes += (*(int16_t*)(row[3]) - *(int16_t*)(row[2])); - } - debugPrint("%s() LN%d, remainVnodes: %d\n", - __func__, __LINE__, remainVnodes); - taos_free_result(res); - return remainVnodes; -} - -int createDatabaseTaosc(SDataBase* database) { - char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; - SBenchConn* conn = initBenchConn(); - if (NULL == conn) { - return -1; - } - if (g_arguments->taosc_version == 3) { - for (int i = 0; i < g_arguments->streams->size; i++) { - SSTREAM* stream = benchArrayGet(g_arguments->streams, i); - if (stream->drop) { - snprintf(command, SHORT_1K_SQL_BUFF_LEN, - "DROP STREAM IF EXISTS %s;", - stream->stream_name); - if (queryDbExecCall(conn, command)) { - closeBenchConn(conn); - return -1; - } - infoPrint("%s\n", command); - memset(command, 0, SHORT_1K_SQL_BUFF_LEN); - } - } - } - - snprintf(command, SHORT_1K_SQL_BUFF_LEN, - g_arguments->escape_character - ? "DROP DATABASE IF EXISTS `%s`;": - "DROP DATABASE IF EXISTS %s;", - database->dbName); - if (0 != queryDbExecCall(conn, command)) { -#ifdef WEBSOCKET - if (g_arguments->websocket) { - warnPrint("%s", "TDengine cloud normal users have no privilege " - "to drop database! DROP DATABASE failure is ignored!\n"); - } else { -#endif - closeBenchConn(conn); - return -1; -#ifdef WEBSOCKET - } -#endif - } - - int remainVnodes = INT_MAX; -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - if (g_arguments->nthreads_auto) { - remainVnodes = getRemainVnodes(conn); - if (0 >= remainVnodes) { - errorPrint("Remain vnodes %d, failed to create database\n", - remainVnodes); - return -1; - } - } -#endif - geneDbCreateCmd(database, command, remainVnodes); - - int32_t code = queryDbExecCall(conn, command); - int32_t trying = g_arguments->keep_trying; - while (code && trying) { - infoPrint("will sleep %"PRIu32" milliseconds then " - "re-create database %s\n", - g_arguments->trying_interval, database->dbName); - toolsMsleep(g_arguments->trying_interval); - code = queryDbExecCall(conn, command); - if (trying != -1) { - trying--; - } - } - - if (code) { -#ifdef WEBSOCKET - if (g_arguments->websocket) { - warnPrint("%s", "TDengine cloud normal users have no privilege " - "to create database! CREATE DATABASE " - "failure is ignored!\n"); - } else { -#endif - - closeBenchConn(conn); - errorPrint("\ncreate database %s failed!\n\n", - database->dbName); - return -1; -#ifdef WEBSOCKET - } -#endif - } - infoPrint("command to create database: <%s>\n", command); - -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - if (database->superTbls) { - if (g_arguments->nthreads_auto) { - int32_t vgroups = getVgroupsOfDb(conn, database); - if (vgroups <=0) { - closeBenchConn(conn); - errorPrint("Database %s's vgroups is %d\n", - database->dbName, vgroups); - return -1; - } - } - } -#endif // TD_VER_COMPATIBLE_3_0_0_0 - - closeBenchConn(conn); - return 0; -} - -int createDatabase(SDataBase* database) { - int ret = 0; - if (REST_IFACE == g_arguments->iface) { - ret = createDatabaseRest(database); - } else { - ret = createDatabaseTaosc(database); - } -#if 0 -#ifdef LINUX - infoPrint("%s() LN%d, ret: %d\n", __func__, __LINE__, ret); - sleep(10); - infoPrint("%s() LN%d, ret: %d\n", __func__, __LINE__, ret); -#elif defined(DARWIN) - sleep(2); -#else - Sleep(2); -#endif -#endif - - return ret; -} - -static int generateChildTblName(int len, char *buffer, SDataBase *database, - SSuperTable *stbInfo, uint64_t i, - char *ttl) { - if (0 == len) { - memset(buffer, 0, TSDB_MAX_ALLOWED_SQL_LEN); - len += snprintf(buffer + len, - TSDB_MAX_ALLOWED_SQL_LEN - len, "CREATE TABLE "); - } - - len += snprintf( - buffer + len, TSDB_MAX_ALLOWED_SQL_LEN - len, - g_arguments->escape_character - ? "`%s`.`%s%" PRIu64 "` USING `%s`.`%s` TAGS (%s) %s " - : "%s.%s%" PRIu64 " USING %s.%s TAGS (%s) %s ", - database->dbName, stbInfo->childTblPrefix, i, database->dbName, - stbInfo->stbName, - stbInfo->tagDataBuf + i * stbInfo->lenOfTags, ttl); - - return len; -} - -static int getBatchOfTblCreating(threadInfo *pThreadInfo, - SSuperTable *stbInfo) { - BArray *batchArray = stbInfo->batchTblCreatingNumbersArray; - if (batchArray) { - int *batch = benchArrayGet( - batchArray, pThreadInfo->posOfTblCreatingBatch); - pThreadInfo->posOfTblCreatingBatch++; - if (pThreadInfo->posOfTblCreatingBatch == batchArray->size) { - pThreadInfo->posOfTblCreatingBatch = 0; - } - return *batch; - } - return 0; -} - -static int getIntervalOfTblCreating(threadInfo *pThreadInfo, - SSuperTable *stbInfo) { - BArray *intervalArray = stbInfo->batchTblCreatingIntervalsArray; - if (intervalArray) { - int *interval = benchArrayGet( - intervalArray, pThreadInfo->posOfTblCreatingInterval); - pThreadInfo->posOfTblCreatingInterval++; - if (pThreadInfo->posOfTblCreatingInterval == intervalArray->size) { - pThreadInfo->posOfTblCreatingInterval = 0; - } - return *interval; - } - return 0; -} - -static void *createTable(void *sarg) { - if (g_arguments->supplementInsert) { - return NULL; - } - - threadInfo * pThreadInfo = (threadInfo *)sarg; - SDataBase * database = pThreadInfo->dbInfo; - SSuperTable *stbInfo = pThreadInfo->stbInfo; -#ifdef LINUX - prctl(PR_SET_NAME, "createTable"); -#endif - uint64_t lastPrintTime = toolsGetTimestampMs(); - pThreadInfo->buffer = benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, false); - int len = 0; - int batchNum = 0; - infoPrint( - "thread[%d] start creating table from %" PRIu64 " to %" PRIu64 - "\n", - pThreadInfo->threadID, pThreadInfo->start_table_from, - pThreadInfo->end_table_to); - - char ttl[SMALL_BUFF_LEN] = ""; - if (stbInfo->ttl != 0) { - snprintf(ttl, SMALL_BUFF_LEN, "TTL %d", stbInfo->ttl); - } - - int smallBatchCount = 0; - for (uint64_t i = pThreadInfo->start_table_from + stbInfo->childTblFrom; - (i <= (pThreadInfo->end_table_to + stbInfo->childTblFrom) - && !g_arguments->terminate); i++) { - if (g_arguments->terminate) { - goto create_table_end; - } - if (!stbInfo->use_metric || stbInfo->tags->size == 0) { - if (stbInfo->childTblCount == 1) { - snprintf(pThreadInfo->buffer, TSDB_MAX_ALLOWED_SQL_LEN, - g_arguments->escape_character - ? "CREATE TABLE `%s`.`%s` %s;" - : "CREATE TABLE %s.%s %s;", - database->dbName, stbInfo->stbName, - stbInfo->colsOfCreateChildTable); - } else { - snprintf(pThreadInfo->buffer, TSDB_MAX_ALLOWED_SQL_LEN, - g_arguments->escape_character - ? "CREATE TABLE `%s`.`%s` %s;" - : "CREATE TABLE %s.%s %s;", - database->dbName, - stbInfo->childTblArray[i]->name, - stbInfo->colsOfCreateChildTable); - } - batchNum++; - } else { - if (0 == len) { - batchNum = 0; - } - len = generateChildTblName(len, pThreadInfo->buffer, - database, stbInfo, i, ttl); - - batchNum++; - smallBatchCount++; - - int smallBatch = getBatchOfTblCreating(pThreadInfo, stbInfo); - if ((!smallBatch || (smallBatchCount == smallBatch)) - && (batchNum < stbInfo->batchTblCreatingNum) - && ((TSDB_MAX_ALLOWED_SQL_LEN - len) >= - (stbInfo->lenOfTags + EXTRA_SQL_LEN))) { - continue; - } else { - smallBatchCount = 0; - } - } - - len = 0; - - int ret = 0; - debugPrint("thread[%d] creating table: %s\n", pThreadInfo->threadID, - pThreadInfo->buffer); - if (REST_IFACE == stbInfo->iface) { - ret = queryDbExecRest(pThreadInfo->buffer, - database->dbName, - database->precision, - stbInfo->iface, - stbInfo->lineProtocol, - stbInfo->tcpTransfer, - pThreadInfo->sockfd); - } else { - ret = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); - int32_t trying = g_arguments->keep_trying; - while (ret && trying) { - infoPrint("will sleep %"PRIu32" milliseconds then re-create " - "table %s\n", - g_arguments->trying_interval, pThreadInfo->buffer); - toolsMsleep(g_arguments->trying_interval); - ret = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); - if (trying != -1) { - trying--; - } - } - } - - if (0 != ret) { - g_fail = true; - goto create_table_end; - } - uint64_t intervalOfTblCreating = getIntervalOfTblCreating(pThreadInfo, - stbInfo); - if (intervalOfTblCreating) { - debugPrint("will sleep %"PRIu64" milliseconds " - "for table creating interval\n", intervalOfTblCreating); - toolsMsleep(intervalOfTblCreating); - } - - pThreadInfo->tables_created += batchNum; - batchNum = 0; - uint64_t currentPrintTime = toolsGetTimestampMs(); - if (currentPrintTime - lastPrintTime > PRINT_STAT_INTERVAL) { - infoPrint( - "thread[%d] already created %" PRId64 " tables\n", - pThreadInfo->threadID, pThreadInfo->tables_created); - lastPrintTime = currentPrintTime; - } - } - - if (0 != len) { - int ret = 0; - debugPrint("thread[%d] creating table: %s\n", pThreadInfo->threadID, - pThreadInfo->buffer); - if (REST_IFACE == stbInfo->iface) { - ret = queryDbExecRest(pThreadInfo->buffer, - database->dbName, - database->precision, - stbInfo->iface, - stbInfo->lineProtocol, - stbInfo->tcpTransfer, - pThreadInfo->sockfd); - } else { - ret = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); - } - if (0 != ret) { - g_fail = true; - goto create_table_end; - } - pThreadInfo->tables_created += batchNum; - debugPrint("thread[%d] already created %" PRId64 " tables\n", - pThreadInfo->threadID, pThreadInfo->tables_created); - } -create_table_end: - tmfree(pThreadInfo->buffer); - pThreadInfo->buffer = NULL; - return NULL; -} - -static int startMultiThreadCreateChildTable( - SDataBase* database, SSuperTable* stbInfo) { - int code = -1; - int threads = g_arguments->table_threads; - int64_t ntables; - if (stbInfo->childTblTo > 0) { - ntables = stbInfo->childTblTo - stbInfo->childTblFrom; - } else { - ntables = stbInfo->childTblCount; - } - pthread_t *pids = benchCalloc(1, threads * sizeof(pthread_t), false); - threadInfo *infos = benchCalloc(1, threads * sizeof(threadInfo), false); - uint64_t tableFrom = 0; - if (threads < 1) { - threads = 1; - } - - int64_t a = ntables / threads; - if (a < 1) { - threads = (int)ntables; - a = 1; - } - - if (ntables == 0) { - errorPrint("failed to create child table, childTblCount: %"PRId64"\n", - ntables); - goto over; - } - int64_t b = ntables % threads; - - int threadCnt = 0; - for (uint32_t i = 0; (i < threads && !g_arguments->terminate); i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->threadID = i; - pThreadInfo->stbInfo = stbInfo; - pThreadInfo->dbInfo = database; - if (REST_IFACE == stbInfo->iface) { - int sockfd = createSockFd(); - if (sockfd < 0) { - FREE_PIDS_INFOS_RETURN_MINUS_1(); - } - pThreadInfo->sockfd = sockfd; - } else { - pThreadInfo->conn = initBenchConn(); - if (NULL == pThreadInfo->conn) { - goto over; - } - } - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = i < b ? a + 1 : a; - pThreadInfo->end_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->tables_created = 0; - pthread_create(pids + i, NULL, createTable, pThreadInfo); - threadCnt ++; - } - - for (int i = 0; i < threadCnt; i++) { - pthread_join(pids[i], NULL); - } - - if (g_arguments->terminate) toolsMsleep(100); - - for (int i = 0; i < threadCnt; i++) { - threadInfo *pThreadInfo = infos + i; - g_arguments->actualChildTables += pThreadInfo->tables_created; - - if ((REST_IFACE != stbInfo->iface) && pThreadInfo->conn) { - closeBenchConn(pThreadInfo->conn); - } - } - - if (g_fail) { - goto over; - } - code = 0; -over: - free(pids); - free(infos); - return code; -} - -static int createChildTables() { - int32_t code; - infoPrint("start creating %" PRId64 " table(s) with %d thread(s)\n", - g_arguments->totalChildTables, g_arguments->table_threads); - if (g_arguments->fpOfInsertResult) { - infoPrintToFile(g_arguments->fpOfInsertResult, - "start creating %" PRId64 " table(s) with %d thread(s)\n", - g_arguments->totalChildTables, g_arguments->table_threads); - } - double start = (double)toolsGetTimestampMs(); - - for (int i = 0; (i < g_arguments->databases->size - && !g_arguments->terminate); i++) { - SDataBase * database = benchArrayGet(g_arguments->databases, i); - if (database->superTbls) { - for (int j = 0; (j < database->superTbls->size - && !g_arguments->terminate); j++) { - SSuperTable * stbInfo = benchArrayGet(database->superTbls, j); - if (stbInfo->autoTblCreating || stbInfo->iface == SML_IFACE - || stbInfo->iface == SML_REST_IFACE) { - g_arguments->autoCreatedChildTables += - stbInfo->childTblCount; - continue; - } - if (stbInfo->childTblExists) { - g_arguments->existedChildTables += - stbInfo->childTblCount; - continue; - } - debugPrint("colsOfCreateChildTable: %s\n", - stbInfo->colsOfCreateChildTable); - - code = startMultiThreadCreateChildTable(database, stbInfo); - if (code && !g_arguments->terminate) { - return code; - } - } - } - } - - double end = (double)toolsGetTimestampMs(); - succPrint( - "Spent %.4f seconds to create %" PRId64 - " table(s) with %d thread(s), already exist %" PRId64 - " table(s), actual %" PRId64 " table(s) pre created, %" PRId64 - " table(s) will be auto created\n", - (end - start) / 1000.0, g_arguments->totalChildTables, - g_arguments->table_threads, g_arguments->existedChildTables, - g_arguments->actualChildTables, - g_arguments->autoCreatedChildTables); - return 0; -} - -static void freeChildTable(SChildTable *childTbl, int colsSize) { - if (childTbl->useOwnSample) { - if (childTbl->childCols) { - for (int col = 0; col < colsSize; col++) { - ChildField *childCol = - benchArrayGet(childTbl->childCols, col); - if (childCol) { - tmfree(childCol->stmtData.data); - tmfree(childCol->stmtData.is_null); - } - } - benchArrayDestroy(childTbl->childCols); - } - tmfree(childTbl->sampleDataBuf); - } - tmfree(childTbl); -} - -void postFreeResource() { - if (!g_arguments->terminate) { - tmfclose(g_arguments->fpOfInsertResult); - } - - for (int i = 0; i < g_arguments->databases->size; i++) { - SDataBase * database = benchArrayGet(g_arguments->databases, i); - if (database->cfgs) { - for (int c = 0; c < database->cfgs->size; c++) { - SDbCfg *cfg = benchArrayGet(database->cfgs, c); - if ((NULL == root) && (0 == strcmp(cfg->name, "replica"))) { - tmfree(cfg->name); - cfg->name = NULL; - } - } - benchArrayDestroy(database->cfgs); - } - if (database->superTbls) { - for (uint64_t j = 0; j < database->superTbls->size; j++) { - SSuperTable * stbInfo = benchArrayGet(database->superTbls, j); - tmfree(stbInfo->colsOfCreateChildTable); - stbInfo->colsOfCreateChildTable = NULL; - tmfree(stbInfo->sampleDataBuf); - stbInfo->sampleDataBuf = NULL; - tmfree(stbInfo->tagDataBuf); - stbInfo->tagDataBuf = NULL; - tmfree(stbInfo->partialColNameBuf); - stbInfo->partialColNameBuf = NULL; - benchArrayDestroy(stbInfo->batchTblCreatingNumbersArray); - benchArrayDestroy(stbInfo->batchTblCreatingIntervalsArray); - for (int k = 0; k < stbInfo->tags->size; k++) { - Field * tag = benchArrayGet(stbInfo->tags, k); - tmfree(tag->stmtData.data); - tag->stmtData.data = NULL; - } - benchArrayDestroy(stbInfo->tags); - - for (int k = 0; k < stbInfo->cols->size; k++) { - Field * col = benchArrayGet(stbInfo->cols, k); - tmfree(col->stmtData.data); - col->stmtData.data = NULL; - tmfree(col->stmtData.is_null); - col->stmtData.is_null = NULL; - } - if (g_arguments->test_mode == INSERT_TEST) { - if (stbInfo->childTblArray) { - for (int64_t child = 0; child < stbInfo->childTblCount; - child++) { - SChildTable *childTbl = - stbInfo->childTblArray[child]; - if (childTbl) { - freeChildTable(childTbl, stbInfo->cols->size); - } - } - } - } - benchArrayDestroy(stbInfo->cols); - tmfree(stbInfo->childTblArray); - stbInfo->childTblArray = NULL; - benchArrayDestroy(stbInfo->tsmas); -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - if ((0 == stbInfo->interlaceRows) - && (g_arguments->nthreads_auto)) { - for (int32_t v = 0; v < database->vgroups; v++) { - SVGroup *vg = benchArrayGet(database->vgArray, v); - tmfree(vg->childTblArray); - vg->childTblArray = NULL; - } - } -#endif // TD_VER_COMPATIBLE_3_0_0_0 - } -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - if (database->vgArray) - benchArrayDestroy(database->vgArray); -#endif // TD_VER_COMPATIBLE_3_0_0_0 - benchArrayDestroy(database->superTbls); - } - } - benchArrayDestroy(g_arguments->databases); - benchArrayDestroy(g_arguments->streams); - tools_cJSON_Delete(root); -} - -int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) { - SDataBase * database = pThreadInfo->dbInfo; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - TAOS_RES * res = NULL; - int32_t code = 0; - uint16_t iface = stbInfo->iface; - - int32_t trying = (stbInfo->keep_trying)? - stbInfo->keep_trying:g_arguments->keep_trying; - int32_t trying_interval = stbInfo->trying_interval? - stbInfo->trying_interval:g_arguments->trying_interval; - int protocol = stbInfo->lineProtocol; - - switch (iface) { - case TAOSC_IFACE: - debugPrint("buffer: %s\n", pThreadInfo->buffer); - code = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); - while (code && trying && !g_arguments->terminate) { - infoPrint("will sleep %"PRIu32" milliseconds then re-insert\n", - trying_interval); - toolsMsleep(trying_interval); - code = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); - if (trying != -1) { - trying--; - } - } - break; - - case REST_IFACE: - debugPrint("buffer: %s\n", pThreadInfo->buffer); - code = postProceSql(pThreadInfo->buffer, - database->dbName, - database->precision, - stbInfo->iface, - stbInfo->lineProtocol, - g_arguments->port, - stbInfo->tcpTransfer, - pThreadInfo->sockfd, - pThreadInfo->filePath); - while (code && trying && !g_arguments->terminate) { - infoPrint("will sleep %"PRIu32" milliseconds then re-insert\n", - trying_interval); - toolsMsleep(trying_interval); - code = postProceSql(pThreadInfo->buffer, - database->dbName, - database->precision, - stbInfo->iface, - stbInfo->lineProtocol, - g_arguments->port, - stbInfo->tcpTransfer, - pThreadInfo->sockfd, - pThreadInfo->filePath); - if (trying != -1) { - trying--; - } - } - break; - - case STMT_IFACE: - code = taos_stmt_execute(pThreadInfo->conn->stmt); - if (code) { - errorPrint( - "failed to execute insert statement. reason: %s\n", - taos_stmt_errstr(pThreadInfo->conn->stmt)); - code = -1; - } - break; - - case SML_IFACE: - res = taos_schemaless_insert( - pThreadInfo->conn->taos, pThreadInfo->lines, - (TSDB_SML_JSON_PROTOCOL == protocol - || SML_JSON_TAOS_FORMAT == protocol) - ? 0 : k, - (SML_JSON_TAOS_FORMAT == protocol) - ? TSDB_SML_JSON_PROTOCOL : protocol, - (TSDB_SML_LINE_PROTOCOL == protocol) - ? database->sml_precision - : TSDB_SML_TIMESTAMP_NOT_CONFIGURED); - code = taos_errno(res); - trying = stbInfo->keep_trying; - while (code && trying && !g_arguments->terminate) { - taos_free_result(res); - infoPrint("will sleep %"PRIu32" milliseconds then re-insert\n", - trying_interval); - toolsMsleep(trying_interval); - res = taos_schemaless_insert( - pThreadInfo->conn->taos, pThreadInfo->lines, - (TSDB_SML_JSON_PROTOCOL == protocol - || SML_JSON_TAOS_FORMAT == protocol) - ? 0 : k, - (SML_JSON_TAOS_FORMAT == protocol) - ? TSDB_SML_JSON_PROTOCOL : protocol, - (TSDB_SML_LINE_PROTOCOL == protocol) - ? database->sml_precision - : TSDB_SML_TIMESTAMP_NOT_CONFIGURED); - code = taos_errno(res); - if (trying != -1) { - trying--; - } - } - - if (code != TSDB_CODE_SUCCESS && !g_arguments->terminate) { - debugPrint("Failed to execute " - "schemaless insert content: %s\n\n", - pThreadInfo->lines?(pThreadInfo->lines[0]? - pThreadInfo->lines[0]:""):""); - errorPrint( - "failed to execute schemaless insert. " - "code: 0x%08x reason: %s\n\n", - code, taos_errstr(res)); - } - taos_free_result(res); - break; - - case SML_REST_IFACE: { - if (TSDB_SML_JSON_PROTOCOL == protocol - || SML_JSON_TAOS_FORMAT == protocol) { - code = postProceSql(pThreadInfo->lines[0], database->dbName, - database->precision, stbInfo->iface, - protocol, g_arguments->port, - stbInfo->tcpTransfer, - pThreadInfo->sockfd, pThreadInfo->filePath); - } else { - int len = 0; - for (int i = 0; i < k; i++) { - if (strlen(pThreadInfo->lines[i]) != 0) { - int n; - if (TSDB_SML_TELNET_PROTOCOL == protocol - && stbInfo->tcpTransfer) { - n = snprintf(pThreadInfo->buffer + len, - TSDB_MAX_ALLOWED_SQL_LEN - len, - "put %s\n", pThreadInfo->lines[i]); - } else { - n = snprintf(pThreadInfo->buffer + len, - TSDB_MAX_ALLOWED_SQL_LEN - len, - "%s\n", - pThreadInfo->lines[i]); - } - if (n < 0 || n >= TSDB_MAX_ALLOWED_SQL_LEN - len) { - errorPrint("%s() LN%d snprintf overflow on %d\n", - __func__, __LINE__, i); - break; - } else { - len += n; - } - } else { - break; - } - } - if (g_arguments->terminate) { - break; - } - code = postProceSql(pThreadInfo->buffer, database->dbName, - database->precision, - stbInfo->iface, protocol, - g_arguments->port, - stbInfo->tcpTransfer, - pThreadInfo->sockfd, pThreadInfo->filePath); - } - break; - } - } - return code; -} - -static int smartContinueIfFail(threadInfo *pThreadInfo, - SChildTable *childTbl, - int64_t i, - char *ttl) { - SDataBase * database = pThreadInfo->dbInfo; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - char *buffer = - benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, false); - snprintf( - buffer, TSDB_MAX_ALLOWED_SQL_LEN, - g_arguments->escape_character ? - "CREATE TABLE `%s`.`%s` USING `%s`.`%s` TAGS (%s) %s " - : "CREATE TABLE %s.%s USING %s.%s TAGS (%s) %s ", - database->dbName, childTbl->name, database->dbName, - stbInfo->stbName, - stbInfo->tagDataBuf + i * stbInfo->lenOfTags, ttl); - debugPrint("creating table: %s\n", buffer); - int ret; - if (REST_IFACE == stbInfo->iface) { - ret = queryDbExecRest(buffer, - database->dbName, - database->precision, - stbInfo->iface, - stbInfo->lineProtocol, - stbInfo->tcpTransfer, - pThreadInfo->sockfd); - } else { - ret = queryDbExecCall(pThreadInfo->conn, buffer); - int32_t trying = g_arguments->keep_trying; - while (ret && trying) { - infoPrint("will sleep %"PRIu32" milliseconds then " - "re-create table %s\n", - g_arguments->trying_interval, buffer); - toolsMsleep(g_arguments->trying_interval); - ret = queryDbExecCall(pThreadInfo->conn, buffer); - if (trying != -1) { - trying--; - } - } - } - tmfree(buffer); - - return ret; -} - -static void cleanupAndPrint(threadInfo *pThreadInfo, char *mode) { - if (pThreadInfo) { - if (pThreadInfo->json_array) { - tools_cJSON_Delete(pThreadInfo->json_array); - pThreadInfo->json_array = NULL; - } - if (0 == pThreadInfo->totalDelay) { - pThreadInfo->totalDelay = 1; - } - succPrint( - "thread[%d] %s mode, completed total inserted rows: %" PRIu64 - ", %.2f records/second\n", - pThreadInfo->threadID, - mode, - pThreadInfo->totalInsertRows, - (double)(pThreadInfo->totalInsertRows / - ((double)pThreadInfo->totalDelay / 1E6))); - } -} - -static int64_t getDisorderTs(SSuperTable *stbInfo, int *disorderRange) { - int64_t disorderTs = 0; - int64_t startTimestamp = stbInfo->startTimestamp; - if (stbInfo->disorderRatio > 0) { - int rand_num = taosRandom() % 100; - if (rand_num < stbInfo->disorderRatio) { - (*disorderRange)--; - if (0 == *disorderRange) { - *disorderRange = stbInfo->disorderRange; - } - disorderTs = startTimestamp - *disorderRange; - debugPrint("rand_num: %d, < disorderRatio: %d, " - "disorderTs: %"PRId64"\n", - rand_num, stbInfo->disorderRatio, - disorderTs); - } - } - return disorderTs; -} - -static void *syncWriteInterlace(void *sarg) { - threadInfo * pThreadInfo = (threadInfo *)sarg; - SDataBase * database = pThreadInfo->dbInfo; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - infoPrint( - "thread[%d] start interlace inserting into table from " - "%" PRIu64 " to %" PRIu64 "\n", - pThreadInfo->threadID, pThreadInfo->start_table_from, - pThreadInfo->end_table_to); - - int64_t insertRows = stbInfo->insertRows; - int32_t interlaceRows = stbInfo->interlaceRows; - int64_t pos = 0; - uint32_t batchPerTblTimes = g_arguments->reqPerReq / interlaceRows; - uint64_t lastPrintTime = toolsGetTimestampMs(); - uint64_t lastTotalInsertRows = 0; - int64_t startTs = toolsGetTimestampUs(); - int64_t endTs; - uint64_t tableSeq = pThreadInfo->start_table_from; - int disorderRange = stbInfo->disorderRange; - - while (insertRows > 0) { - int64_t tmp_total_insert_rows = 0; - uint32_t generated = 0; - if (insertRows <= interlaceRows) { - interlaceRows = insertRows; - } - for (int i = 0; i < batchPerTblTimes; i++) { - if (g_arguments->terminate) { - goto free_of_interlace; - } - int64_t timestamp = pThreadInfo->start_time; - SChildTable *childTbl = stbInfo->childTblArray[tableSeq]; - char * tableName = - stbInfo->childTblArray[tableSeq]->name; - char *sampleDataBuf = childTbl->useOwnSample? - childTbl->sampleDataBuf: - stbInfo->sampleDataBuf; - char ttl[SMALL_BUFF_LEN] = ""; - if (stbInfo->ttl != 0) { - snprintf(ttl, SMALL_BUFF_LEN, "TTL %d", stbInfo->ttl); - } - switch (stbInfo->iface) { - case REST_IFACE: - case TAOSC_IFACE: { - char escapedTbName[TSDB_TABLE_NAME_LEN+2] = "\0"; - if (g_arguments->escape_character) { - snprintf(escapedTbName, TSDB_TABLE_NAME_LEN+2, "`%s`", - tableName); - } else { - snprintf(escapedTbName, TSDB_TABLE_NAME_LEN+2, "%s", - tableName); - } - if (i == 0) { - ds_add_str(&pThreadInfo->buffer, STR_INSERT_INTO); - } - if (stbInfo->partialColNum == stbInfo->cols->size) { - if (stbInfo->autoTblCreating) { - ds_add_strs(&pThreadInfo->buffer, 8, - escapedTbName, - " USING `", - stbInfo->stbName, - "` TAGS (", - stbInfo->tagDataBuf - + stbInfo->lenOfTags * tableSeq, - ") ", ttl, " VALUES "); - } else { - ds_add_strs(&pThreadInfo->buffer, 2, - escapedTbName, " VALUES "); - } - } else { - if (stbInfo->autoTblCreating) { - ds_add_strs(&pThreadInfo->buffer, 10, - escapedTbName, - " (", - stbInfo->partialColNameBuf, - ") USING `", - stbInfo->stbName, - "` TAGS (", - stbInfo->tagDataBuf - + stbInfo->lenOfTags * tableSeq, - ") ", ttl, " VALUES "); - } else { - ds_add_strs(&pThreadInfo->buffer, 4, - escapedTbName, - "(", - stbInfo->partialColNameBuf, - ") VALUES "); - } - } - - for (int64_t j = 0; j < interlaceRows; j++) { - int64_t disorderTs = getDisorderTs(stbInfo, - &disorderRange); - char time_string[BIGINT_BUFF_LEN]; - snprintf(time_string, BIGINT_BUFF_LEN, "%"PRId64"", - disorderTs?disorderTs:timestamp); - ds_add_strs(&pThreadInfo->buffer, 5, - "(", - time_string, - ",", - sampleDataBuf + pos * stbInfo->lenOfCols, - ") "); - if (ds_len(pThreadInfo->buffer) - > stbInfo->max_sql_len) { - errorPrint("sql buffer length (%"PRIu64") " - "is larger than max sql length " - "(%"PRId64")\n", - ds_len(pThreadInfo->buffer), - stbInfo->max_sql_len); - goto free_of_interlace; - } - generated++; - pos++; - if (pos >= g_arguments->prepared_rand) { - pos = 0; - } - timestamp += stbInfo->timestamp_step; - } - break; - } - case STMT_IFACE: { - char escapedTbName[TSDB_TABLE_NAME_LEN+2] = "\0"; - if (g_arguments->escape_character) { - snprintf(escapedTbName, TSDB_TABLE_NAME_LEN+2, - "`%s`", tableName); - } else { - snprintf(escapedTbName, TSDB_TABLE_NAME_LEN, "%s", - tableName); - } - if (taos_stmt_set_tbname(pThreadInfo->conn->stmt, - escapedTbName)) { - errorPrint( - "taos_stmt_set_tbname(%s) failed, reason: %s\n", - tableName, - taos_stmt_errstr(pThreadInfo->conn->stmt)); - g_fail = true; - goto free_of_interlace; - } - generated = - bindParamBatch(pThreadInfo, interlaceRows, - timestamp, childTbl); - break; - } - case SML_REST_IFACE: - case SML_IFACE: { - int protocol = stbInfo->lineProtocol; - for (int64_t j = 0; j < interlaceRows; j++) { - int64_t disorderTs = getDisorderTs(stbInfo, - &disorderRange); - if (TSDB_SML_JSON_PROTOCOL == protocol) { - tools_cJSON *tag = tools_cJSON_Duplicate( - tools_cJSON_GetArrayItem( - pThreadInfo->sml_json_tags, - (int)tableSeq - - pThreadInfo->start_table_from), - true); - generateSmlJsonCols( - pThreadInfo->json_array, tag, stbInfo, - database->sml_precision, - disorderTs?disorderTs:timestamp); - } else if (SML_JSON_TAOS_FORMAT == protocol) { - tools_cJSON *tag = tools_cJSON_Duplicate( - tools_cJSON_GetArrayItem( - pThreadInfo->sml_json_tags, - (int)tableSeq - - pThreadInfo->start_table_from), - true); - generateSmlTaosJsonCols( - pThreadInfo->json_array, tag, stbInfo, - database->sml_precision, - disorderTs?disorderTs:timestamp); - } else if (TSDB_SML_LINE_PROTOCOL == protocol) { - snprintf( - pThreadInfo->lines[generated], - stbInfo->lenOfCols + stbInfo->lenOfTags, - "%s %s %" PRId64 "", - pThreadInfo - ->sml_tags[(int)tableSeq - - pThreadInfo->start_table_from], - sampleDataBuf + pos * stbInfo->lenOfCols, - disorderTs?disorderTs:timestamp); - } else { - snprintf( - pThreadInfo->lines[generated], - stbInfo->lenOfCols + stbInfo->lenOfTags, - "%s %" PRId64 " %s %s", stbInfo->stbName, - disorderTs?disorderTs:timestamp, - sampleDataBuf + pos * stbInfo->lenOfCols, - pThreadInfo - ->sml_tags[(int)tableSeq - - pThreadInfo->start_table_from]); - } - generated++; - timestamp += stbInfo->timestamp_step; - } - if (TSDB_SML_JSON_PROTOCOL == protocol - || SML_JSON_TAOS_FORMAT == protocol) { - pThreadInfo->lines[0] = - tools_cJSON_PrintUnformatted( - pThreadInfo->json_array); - } - break; - } - } - tableSeq++; - tmp_total_insert_rows += interlaceRows; - if (tableSeq > pThreadInfo->end_table_to) { - tableSeq = pThreadInfo->start_table_from; - pThreadInfo->start_time += - interlaceRows * stbInfo->timestamp_step; - if (!stbInfo->non_stop) { - insertRows -= interlaceRows; - } - if (stbInfo->insert_interval > 0) { - debugPrint("%s() LN%d, insert_interval: %"PRIu64"\n", - __func__, __LINE__, stbInfo->insert_interval); - perfPrint("sleep %" PRIu64 " ms\n", - stbInfo->insert_interval); - toolsMsleep((int32_t)stbInfo->insert_interval); - } - break; - } - } - - startTs = toolsGetTimestampUs(); - if (execInsert(pThreadInfo, generated)) { - g_fail = true; - goto free_of_interlace; - } - endTs = toolsGetTimestampUs(); - - pThreadInfo->totalInsertRows += tmp_total_insert_rows; - - if (g_arguments->terminate) { - goto free_of_interlace; - } - - int protocol = stbInfo->lineProtocol; - switch (stbInfo->iface) { - case TAOSC_IFACE: - case REST_IFACE: - debugPrint("pThreadInfo->buffer: %s\n", - pThreadInfo->buffer); - free_ds(&pThreadInfo->buffer); - pThreadInfo->buffer = new_ds(0); - break; - case SML_REST_IFACE: - memset(pThreadInfo->buffer, 0, - g_arguments->reqPerReq * (pThreadInfo->max_sql_len + 1)); - case SML_IFACE: - if (TSDB_SML_JSON_PROTOCOL == protocol - || SML_JSON_TAOS_FORMAT == protocol) { - debugPrint("pThreadInfo->lines[0]: %s\n", - pThreadInfo->lines[0]); - if (pThreadInfo->json_array && !g_arguments->terminate) { - tools_cJSON_Delete(pThreadInfo->json_array); - pThreadInfo->json_array = NULL; - } - pThreadInfo->json_array = tools_cJSON_CreateArray(); - if (pThreadInfo->lines && pThreadInfo->lines[0]) { - tmfree(pThreadInfo->lines[0]); - pThreadInfo->lines[0] = NULL; - } - } else { - for (int j = 0; j < generated; j++) { - if (pThreadInfo && pThreadInfo->lines - && !g_arguments->terminate) { - debugPrint("pThreadInfo->lines[%d]: %s\n", j, - pThreadInfo->lines[j]); - memset(pThreadInfo->lines[j], 0, - pThreadInfo->max_sql_len); - } - } - } - break; - case STMT_IFACE: - break; - } - - int64_t delay = endTs - startTs; - if (delay <=0) { - debugPrint("thread[%d]: startTS: %"PRId64", endTS: %"PRId64"\n", - pThreadInfo->threadID, startTs, endTs); - } else { - perfPrint("insert execution time is %10.2f ms\n", - delay / 1E6); - - int64_t * pdelay = benchCalloc(1, sizeof(int64_t), false); - *pdelay = delay; - if (benchArrayPush(pThreadInfo->delayList, pdelay) == NULL) { - tmfree(pdelay); - } - pThreadInfo->totalDelay += delay; - } - - int64_t currentPrintTime = toolsGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - infoPrint( - "thread[%d] has currently inserted rows: %" PRIu64 - ", peroid insert rate: %.3f rows/s \n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - (double)(pThreadInfo->totalInsertRows - lastTotalInsertRows) * 1000.0/(currentPrintTime - lastPrintTime)); - lastPrintTime = currentPrintTime; - lastTotalInsertRows = pThreadInfo->totalInsertRows; - } - } -free_of_interlace: - cleanupAndPrint(pThreadInfo, "interlace"); - return NULL; -} - -static int32_t prepareProgressDataStmt( - threadInfo *pThreadInfo, - SChildTable *childTbl, - int64_t *timestamp, uint64_t i, char *ttl) { - SSuperTable *stbInfo = pThreadInfo->stbInfo; - char escapedTbName[TSDB_TABLE_NAME_LEN + 2] = "\0"; - if (g_arguments->escape_character) { - snprintf(escapedTbName, TSDB_TABLE_NAME_LEN + 2, - "`%s`", childTbl->name); - } else { - snprintf(escapedTbName, TSDB_TABLE_NAME_LEN, "%s", - childTbl->name); - } - if (taos_stmt_set_tbname(pThreadInfo->conn->stmt, - escapedTbName)) { - errorPrint( - "taos_stmt_set_tbname(%s) failed," - "reason: %s\n", escapedTbName, - taos_stmt_errstr(pThreadInfo->conn->stmt)); - return -1; - } - int32_t generated = bindParamBatch( - pThreadInfo, - (g_arguments->reqPerReq > (stbInfo->insertRows - i)) - ? (stbInfo->insertRows - i) - : g_arguments->reqPerReq, - *timestamp, childTbl); - *timestamp += generated * stbInfo->timestamp_step; - return generated; -} - -static void makeTimestampDisorder( - int64_t *timestamp, SSuperTable *stbInfo) { - int64_t startTimestamp = stbInfo->startTimestamp; - int disorderRange = stbInfo->disorderRange; - int rand_num = taosRandom() % 100; - if (rand_num < stbInfo->disorderRatio) { - disorderRange--; - if (0 == disorderRange) { - disorderRange = stbInfo->disorderRange; - } - *timestamp = startTimestamp - disorderRange; - debugPrint("rand_num: %d, < disorderRatio: %d" - ", ts: %"PRId64"\n", - rand_num, - stbInfo->disorderRatio, - *timestamp); - } -} - -static int32_t prepareProgressDataSmlJsonText( - threadInfo *pThreadInfo, - uint64_t tableSeq, - int64_t *timestamp, uint64_t i, char *ttl) { - // prepareProgressDataSmlJsonText - SSuperTable *stbInfo = pThreadInfo->stbInfo; - int32_t generated = 0; - - int len = 0; - - char *line = pThreadInfo->lines[0]; - uint32_t line_buf_len = pThreadInfo->line_buf_len; - - strncat(line + len, "[", 2); - len += 1; - - int32_t pos = 0; - for (int j = 0; (j < g_arguments->reqPerReq) - && !g_arguments->terminate; j++) { - strncat(line + len, "{", 2); - len += 1; - int n; - n = snprintf(line + len, line_buf_len - len, - "\"timestamp\":%"PRId64",", *timestamp); - if (n < 0 || n >= line_buf_len - len) { - errorPrint("%s() LN%d snprintf overflow on %d\n", - __func__, __LINE__, j); - return -1; - } else { - len += n; - } - - n = snprintf(line + len, line_buf_len - len, "%s", - pThreadInfo->sml_json_value_array[tableSeq]); - if (n < 0 || n >= line_buf_len - len) { - errorPrint("%s() LN%d snprintf overflow on %d\n", - __func__, __LINE__, j); - return -1; - } else { - len += n; - } - n = snprintf(line + len, line_buf_len - len, "\"tags\":%s,", - pThreadInfo->sml_tags_json_array[tableSeq]); - if (n < 0 || n >= line_buf_len - len) { - errorPrint("%s() LN%d snprintf overflow on %d\n", - __func__, __LINE__, j); - return -1; - } else { - len += n; - } - n = snprintf(line + len, line_buf_len - len, - "\"metric\":\"%s\"}", stbInfo->stbName); - if (n < 0 || n >= line_buf_len - len) { - errorPrint("%s() LN%d snprintf overflow on %d\n", - __func__, __LINE__, j); - return -1; - } else { - len += n; - } - - pos++; - if (pos >= g_arguments->prepared_rand) { - pos = 0; - } - *timestamp += stbInfo->timestamp_step; - if (stbInfo->disorderRatio > 0) { - makeTimestampDisorder(timestamp, stbInfo); - } - generated++; - if (i + generated >= stbInfo->insertRows) { - break; - } - if ((j+1) < g_arguments->reqPerReq) { - strncat(line + len, ",", 2); - len += 1; - } - } - strncat(line + len, "]", 2); - - debugPrint("%s() LN%d, lines[0]: %s\n", - __func__, __LINE__, pThreadInfo->lines[0]); - return generated; -} - -static int32_t prepareProgressDataSmlJson( - threadInfo *pThreadInfo, - uint64_t tableSeq, - int64_t *timestamp, uint64_t i, char *ttl) { - // prepareProgressDataSmlJson - SDataBase * database = pThreadInfo->dbInfo; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - int32_t generated = 0; - - int32_t pos = 0; - int protocol = stbInfo->lineProtocol; - for (int j = 0; (j < g_arguments->reqPerReq) - && !g_arguments->terminate; j++) { - tools_cJSON *tag = tools_cJSON_Duplicate( - tools_cJSON_GetArrayItem( - pThreadInfo->sml_json_tags, - (int)tableSeq - - pThreadInfo->start_table_from), - true); - debugPrintJsonNoTime(tag); - if (TSDB_SML_JSON_PROTOCOL == protocol) { - generateSmlJsonCols( - pThreadInfo->json_array, tag, stbInfo, - database->sml_precision, *timestamp); - } else { - generateSmlTaosJsonCols( - pThreadInfo->json_array, tag, stbInfo, - database->sml_precision, *timestamp); - } - pos++; - if (pos >= g_arguments->prepared_rand) { - pos = 0; - } - *timestamp += stbInfo->timestamp_step; - if (stbInfo->disorderRatio > 0) { - makeTimestampDisorder(timestamp, stbInfo); - } - generated++; - if (i + generated >= stbInfo->insertRows) { - break; - } - } - - tmfree(pThreadInfo->lines[0]); - pThreadInfo->lines[0] = NULL; - pThreadInfo->lines[0] = - tools_cJSON_PrintUnformatted( - pThreadInfo->json_array); - debugPrint("pThreadInfo->lines[0]: %s\n", - pThreadInfo->lines[0]); - - return generated; -} - -static int32_t prepareProgressDataSmlLineOrTelnet( - threadInfo *pThreadInfo, uint64_t tableSeq, char *sampleDataBuf, - int64_t *timestamp, uint64_t i, char *ttl, int protocol) { - // prepareProgressDataSmlLine - SSuperTable *stbInfo = pThreadInfo->stbInfo; - int32_t generated = 0; - - int32_t pos = 0; - for (int j = 0; (j < g_arguments->reqPerReq) - && !g_arguments->terminate; j++) { - if (TSDB_SML_LINE_PROTOCOL == protocol) { - snprintf( - pThreadInfo->lines[j], - stbInfo->lenOfCols + stbInfo->lenOfTags, - "%s %s %" PRId64 "", - pThreadInfo->sml_tags[tableSeq - - pThreadInfo->start_table_from], - sampleDataBuf + pos * stbInfo->lenOfCols, - *timestamp); - } else { - snprintf( - pThreadInfo->lines[j], - stbInfo->lenOfCols + stbInfo->lenOfTags, - "%s %" PRId64 " %s %s", stbInfo->stbName, - *timestamp, - sampleDataBuf - + pos * stbInfo->lenOfCols, - pThreadInfo->sml_tags[tableSeq - -pThreadInfo->start_table_from]); - } - pos++; - if (pos >= g_arguments->prepared_rand) { - pos = 0; - } - *timestamp += stbInfo->timestamp_step; - if (stbInfo->disorderRatio > 0) { - makeTimestampDisorder(timestamp, stbInfo); - } - generated++; - if (i + generated >= stbInfo->insertRows) { - break; - } - } - return generated; -} - -static int32_t prepareProgressDataSml( - threadInfo *pThreadInfo, - SChildTable *childTbl, - uint64_t tableSeq, - int64_t *timestamp, uint64_t i, char *ttl) { - // prepareProgressDataSml - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - char *sampleDataBuf; - if (childTbl->useOwnSample) { - sampleDataBuf = childTbl->sampleDataBuf; - } else { - sampleDataBuf = stbInfo->sampleDataBuf; - } - int protocol = stbInfo->lineProtocol; - int32_t generated = -1; - switch (protocol) { - case TSDB_SML_LINE_PROTOCOL: - case TSDB_SML_TELNET_PROTOCOL: - generated = prepareProgressDataSmlLineOrTelnet( - pThreadInfo, - tableSeq, - sampleDataBuf, - timestamp, i, ttl, protocol); - break; - case TSDB_SML_JSON_PROTOCOL: - generated = prepareProgressDataSmlJsonText( - pThreadInfo, - tableSeq - pThreadInfo->start_table_from, - timestamp, i, ttl); - break; - case SML_JSON_TAOS_FORMAT: - generated = prepareProgressDataSmlJson( - pThreadInfo, - tableSeq, - timestamp, i, ttl); - break; - default: - errorPrint("%s() LN%d: unknown protcolor: %d\n", - __func__, __LINE__, protocol); - break; - } - - return generated; -} - -static int32_t prepareProgressDataSql( - threadInfo *pThreadInfo, - SChildTable *childTbl, uint64_t tableSeq, - char *sampleDataBuf, - int64_t *timestamp, uint64_t i, char *ttl, - int32_t *pos, uint64_t *len) { - // prepareProgressDataSql - int32_t generated = 0; - SDataBase *database = pThreadInfo->dbInfo; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - char * pstr = pThreadInfo->buffer; - int disorderRange = stbInfo->disorderRange; - - if (stbInfo->partialColNum == stbInfo->cols->size) { - if (stbInfo->autoTblCreating) { - *len = - snprintf(pstr, TSDB_MAX_ALLOWED_SQL_LEN, - g_arguments->escape_character - ? "%s `%s`.`%s` USING `%s`.`%s` TAGS (%s) %s VALUES " - : "%s %s.%s USING %s.%s TAGS (%s) %s VALUES ", - STR_INSERT_INTO, database->dbName, - childTbl->name, database->dbName, - stbInfo->stbName, - stbInfo->tagDataBuf + - stbInfo->lenOfTags * tableSeq, ttl); - } else { - *len = snprintf(pstr, TSDB_MAX_ALLOWED_SQL_LEN, - g_arguments->escape_character - ? "%s `%s`.`%s` VALUES " - : "%s %s.%s VALUES ", - STR_INSERT_INTO, - database->dbName, childTbl->name); - } - } else { - if (stbInfo->autoTblCreating) { - *len = snprintf( - pstr, TSDB_MAX_ALLOWED_SQL_LEN, - g_arguments->escape_character - ? "%s `%s`.`%s` (%s) USING `%s`.`%s` TAGS (%s) %s VALUES " - : "%s %s.%s (%s) USING %s.%s TAGS (%s) %s VALUES ", - STR_INSERT_INTO, database->dbName, - childTbl->name, - stbInfo->partialColNameBuf, - database->dbName, stbInfo->stbName, - stbInfo->tagDataBuf + - stbInfo->lenOfTags * tableSeq, ttl); - } else { - *len = snprintf(pstr, TSDB_MAX_ALLOWED_SQL_LEN, - g_arguments->escape_character - ? "%s `%s`.`%s` (%s) VALUES " - : "%s %s.%s (%s) VALUES ", - STR_INSERT_INTO, database->dbName, - childTbl->name, - stbInfo->partialColNameBuf); - } - } - - char *ownSampleDataBuf; - if (childTbl->useOwnSample) { - debugPrint("%s is using own sample data\n", - childTbl->name); - ownSampleDataBuf = childTbl->sampleDataBuf; - } else { - ownSampleDataBuf = stbInfo->sampleDataBuf; - } - for (int j = 0; j < g_arguments->reqPerReq; j++) { - if (stbInfo->useSampleTs - && (!stbInfo->random_data_source)) { - *len += - snprintf(pstr + *len, - TSDB_MAX_ALLOWED_SQL_LEN - *len, "(%s)", - sampleDataBuf + - *pos * stbInfo->lenOfCols); - } else { - int64_t disorderTs = getDisorderTs(stbInfo, &disorderRange); - *len += snprintf(pstr + *len, - TSDB_MAX_ALLOWED_SQL_LEN - *len, - "(%" PRId64 ",%s)", - disorderTs?disorderTs:*timestamp, - ownSampleDataBuf + - *pos * stbInfo->lenOfCols); - } - *pos += 1; - if (*pos >= g_arguments->prepared_rand) { - *pos = 0; - } - *timestamp += stbInfo->timestamp_step; - generated++; - if (*len > (TSDB_MAX_ALLOWED_SQL_LEN - - stbInfo->lenOfCols)) { - break; - } - if (i + generated >= stbInfo->insertRows) { - break; - } - } - - return generated; -} - -void *syncWriteProgressive(void *sarg) { - threadInfo * pThreadInfo = (threadInfo *)sarg; - SDataBase * database = pThreadInfo->dbInfo; - SSuperTable *stbInfo = pThreadInfo->stbInfo; - - // special deal flow for TAOSC_IFACE - if (insertDataMix(pThreadInfo, database, stbInfo)) { - // request be dealt by this function , so return - return NULL; - } - -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - if (g_arguments->nthreads_auto) { - if (0 == pThreadInfo->vg->tbCountPerVgId) { - return NULL; - } - } else { - infoPrint( - "thread[%d] start progressive inserting into table from " - "%" PRIu64 " to %" PRIu64 "\n", - pThreadInfo->threadID, pThreadInfo->start_table_from, - pThreadInfo->end_table_to + 1); - } -#else - infoPrint( - "thread[%d] start progressive inserting into table from " - "%" PRIu64 " to %" PRIu64 "\n", - pThreadInfo->threadID, pThreadInfo->start_table_from, - pThreadInfo->end_table_to + 1); -#endif - uint64_t lastPrintTime = toolsGetTimestampMs(); - uint64_t lastTotalInsertRows = 0; - int64_t startTs = toolsGetTimestampUs(); - int64_t endTs; - - for (uint64_t tableSeq = pThreadInfo->start_table_from; - tableSeq <= pThreadInfo->end_table_to; tableSeq++) { - char *sampleDataBuf; - SChildTable *childTbl; -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - if (g_arguments->nthreads_auto) { - childTbl = pThreadInfo->vg->childTblArray[tableSeq]; - } else { - childTbl = stbInfo->childTblArray[ - stbInfo->childTblExists? - tableSeq: - stbInfo->childTblFrom + tableSeq]; - } -#else - childTbl = stbInfo->childTblArray[ - stbInfo->childTblExists? - tableSeq: - stbInfo->childTblFrom + tableSeq]; -#endif - if (childTbl->useOwnSample) { - sampleDataBuf = childTbl->sampleDataBuf; - } else { - sampleDataBuf = stbInfo->sampleDataBuf; - } - - int64_t timestamp = pThreadInfo->start_time; - uint64_t len = 0; - int32_t pos = 0; - if (stbInfo->iface == STMT_IFACE && stbInfo->autoTblCreating) { - taos_stmt_close(pThreadInfo->conn->stmt); - pThreadInfo->conn->stmt = taos_stmt_init(pThreadInfo->conn->taos); - if (NULL == pThreadInfo->conn->stmt) { - errorPrint("taos_stmt_init() failed, reason: %s\n", - taos_errstr(NULL)); - g_fail = true; - goto free_of_progressive; - } - - if (prepareStmt(stbInfo, pThreadInfo->conn->stmt, tableSeq)) { - g_fail = true; - goto free_of_progressive; - } - } - - char ttl[SMALL_BUFF_LEN] = ""; - if (stbInfo->ttl != 0) { - snprintf(ttl, SMALL_BUFF_LEN, "TTL %d", stbInfo->ttl); - } - for (uint64_t i = 0; i < stbInfo->insertRows;) { - if (g_arguments->terminate) { - goto free_of_progressive; - } - int32_t generated = 0; - switch (stbInfo->iface) { - case TAOSC_IFACE: - case REST_IFACE: - generated = prepareProgressDataSql( - pThreadInfo, - childTbl, - tableSeq, - sampleDataBuf, - ×tamp, i, ttl, &pos, &len); - break; - case STMT_IFACE: { - generated = prepareProgressDataStmt( - pThreadInfo, - childTbl, ×tamp, i, ttl); - break; - } - case SML_REST_IFACE: - case SML_IFACE: - generated = prepareProgressDataSml( - pThreadInfo, - childTbl, - tableSeq, ×tamp, i, ttl); - break; - default: - break; - } - if (generated < 0) { - g_fail = true; - goto free_of_progressive; - } - if (!stbInfo->non_stop) { - i += generated; - } - // only measure insert - startTs = toolsGetTimestampUs(); - int code = execInsert(pThreadInfo, generated); - if (code) { - if (NO_IF_FAILED == stbInfo->continueIfFail) { - warnPrint("The super table parameter " - "continueIfFail: %d, STOP insertion!\n", - stbInfo->continueIfFail); - g_fail = true; - goto free_of_progressive; - } else if (YES_IF_FAILED == stbInfo->continueIfFail) { - infoPrint("The super table parameter " - "continueIfFail: %d, " - "will continue to insert ..\n", - stbInfo->continueIfFail); - } else if (SMART_IF_FAILED == stbInfo->continueIfFail) { - warnPrint("The super table parameter " - "continueIfFail: %d, will create table " - "then insert ..\n", - stbInfo->continueIfFail); - int ret = smartContinueIfFail( - pThreadInfo, - childTbl, i, ttl); - if (0 != ret) { - g_fail = true; - goto free_of_progressive; - } - - code = execInsert(pThreadInfo, generated); - if (code) { - g_fail = true; - goto free_of_progressive; - } - } else { - warnPrint("Unknown super table parameter " - "continueIfFail: %d\n", - stbInfo->continueIfFail); - g_fail = true; - goto free_of_progressive; - } - } - endTs = toolsGetTimestampUs()+1; - - if (stbInfo->insert_interval > 0) { - debugPrint("%s() LN%d, insert_interval: %"PRIu64"\n", - __func__, __LINE__, stbInfo->insert_interval); - perfPrint("sleep %" PRIu64 " ms\n", - stbInfo->insert_interval); - toolsMsleep((int32_t)stbInfo->insert_interval); - } - - pThreadInfo->totalInsertRows += generated; - - if (g_arguments->terminate) { - goto free_of_progressive; - } - int protocol = stbInfo->lineProtocol; - switch (stbInfo->iface) { - case REST_IFACE: - case TAOSC_IFACE: - memset(pThreadInfo->buffer, 0, pThreadInfo->max_sql_len); - break; - case SML_REST_IFACE: - memset(pThreadInfo->buffer, 0, - g_arguments->reqPerReq * - (pThreadInfo->max_sql_len + 1)); - case SML_IFACE: - if (TSDB_SML_JSON_PROTOCOL == protocol) { - memset(pThreadInfo->lines[0], 0, - pThreadInfo->line_buf_len); - } else if (SML_JSON_TAOS_FORMAT == protocol) { - if (pThreadInfo->lines && pThreadInfo->lines[0]) { - tmfree(pThreadInfo->lines[0]); - pThreadInfo->lines[0] = NULL; - } - if (pThreadInfo->json_array) { - tools_cJSON_Delete(pThreadInfo->json_array); - pThreadInfo->json_array = NULL; - } - pThreadInfo->json_array = tools_cJSON_CreateArray(); - } else { - for (int j = 0; j < generated; j++) { - debugPrint("pThreadInfo->lines[%d]: %s\n", - j, pThreadInfo->lines[j]); - memset(pThreadInfo->lines[j], 0, - pThreadInfo->max_sql_len); - } - } - break; - case STMT_IFACE: - break; - } - - int64_t delay = endTs - startTs; - if (delay <= 0) { - debugPrint("thread[%d]: startTs: %"PRId64", endTs: %"PRId64"\n", - pThreadInfo->threadID, startTs, endTs); - } else { - perfPrint("insert execution time is %.6f s\n", - delay / 1E6); - - int64_t * pDelay = benchCalloc(1, sizeof(int64_t), false); - *pDelay = delay; - if (benchArrayPush(pThreadInfo->delayList, pDelay) == NULL) { - tmfree(pDelay); - } - pThreadInfo->totalDelay += delay; - } - - int64_t currentPrintTime = toolsGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30 * 1000) { - infoPrint( - "thread[%d] has currently inserted rows: " - "%" PRId64 ", peroid insert rate: %.3f rows/s \n", - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - (double)(pThreadInfo->totalInsertRows - lastTotalInsertRows) * 1000.0/(currentPrintTime - lastPrintTime)); - lastPrintTime = currentPrintTime; - lastTotalInsertRows = pThreadInfo->totalInsertRows; - } - if (i >= stbInfo->insertRows) { - break; - } - } // insertRows - } // tableSeq -free_of_progressive: - cleanupAndPrint(pThreadInfo, "progressive"); - return NULL; -} - -static int initStmtDataValue(SSuperTable *stbInfo, SChildTable *childTbl) { - int32_t columnCount = stbInfo->cols->size; - - char *sampleDataBuf; - if (childTbl) { - sampleDataBuf = childTbl->sampleDataBuf; - } else { - sampleDataBuf = stbInfo->sampleDataBuf; - } - int64_t lenOfOneRow = stbInfo->lenOfCols; - - if (stbInfo->useSampleTs) { - columnCount += 1; // for skipping first column - } - for (int i=0; i < g_arguments->prepared_rand; i++) { - int cursor = 0; - - for (int c = 0; c < columnCount; c++) { - char *restStr = sampleDataBuf - + lenOfOneRow * i + cursor; - int lengthOfRest = strlen(restStr); - - int index = 0; - for (index = 0; index < lengthOfRest; index++) { - if (restStr[index] == ',') { - break; - } - } - - cursor += index + 1; // skip ',' too - if ((0 == c) && stbInfo->useSampleTs) { - continue; - } - - char *tmpStr = calloc(1, index + 1); - if (NULL == tmpStr) { - errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, index + 1); - return -1; - } - Field *col = benchArrayGet(stbInfo->cols, - (stbInfo->useSampleTs?c-1:c)); - char dataType = col->type; - - StmtData *stmtData; - if (childTbl) { - ChildField *childCol = - benchArrayGet(childTbl->childCols, - (stbInfo->useSampleTs?c-1:c)); - stmtData = &childCol->stmtData; - } else { - stmtData = &col->stmtData; - } - - strncpy(tmpStr, restStr, index); - - if (0 == strcmp(tmpStr, "NULL")) { - *(stmtData->is_null + i) = true; - } else { - switch (dataType) { - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - *((int32_t*)stmtData->data + i) = atoi(tmpStr); - break; - case TSDB_DATA_TYPE_FLOAT: - *((float*)stmtData->data +i) = (float)atof(tmpStr); - break; - case TSDB_DATA_TYPE_DOUBLE: - *((double*)stmtData->data + i) = atof(tmpStr); - break; - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - *((int8_t*)stmtData->data + i) = (int8_t)atoi(tmpStr); - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - *((int16_t*)stmtData->data + i) = (int16_t)atoi(tmpStr); - break; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - *((int64_t*)stmtData->data + i) = (int64_t)atol(tmpStr); - break; - case TSDB_DATA_TYPE_BOOL: - *((int8_t*)stmtData->data + i) = (int8_t)atoi(tmpStr); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - *((int64_t*)stmtData->data + i) = (int64_t)atol(tmpStr); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - { - size_t tmpLen = strlen(tmpStr); - debugPrint("%s() LN%d, index: %d, " - "tmpStr len: %"PRIu64", col->length: %d\n", - __func__, __LINE__, - i, (uint64_t)tmpLen, col->length); - if (tmpLen-2 > col->length) { - errorPrint("data length %"PRIu64" " - "is larger than column length %d\n", - (uint64_t)tmpLen, col->length); - } - if (tmpLen > 2) { - strncpy((char *)stmtData->data - + i * col->length, - tmpStr+1, - min(col->length, tmpLen - 2)); - } else { - strncpy((char *)stmtData->data - + i*col->length, - "", 1); - } - } - break; - default: - break; - } - } - free(tmpStr); - } - } - return 0; -} - -static void initStmtData(char dataType, void **data, uint32_t length) { - char *tmpP = NULL; - - switch (dataType) { - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - tmpP = calloc(1, sizeof(int) * g_arguments->prepared_rand); - assert(tmpP); - tmfree(*data); - *data = (void*)tmpP; - break; - - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - tmpP = calloc(1, sizeof(int8_t) * g_arguments->prepared_rand); - assert(tmpP); - tmfree(*data); - *data = (void*)tmpP; - break; - - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - tmpP = calloc(1, sizeof(int16_t) * g_arguments->prepared_rand); - assert(tmpP); - tmfree(*data); - *data = (void*)tmpP; - break; - - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - tmpP = calloc(1, sizeof(int64_t) * g_arguments->prepared_rand); - assert(tmpP); - tmfree(*data); - *data = (void*)tmpP; - break; - - case TSDB_DATA_TYPE_BOOL: - tmpP = calloc(1, sizeof(int8_t) * g_arguments->prepared_rand); - assert(tmpP); - tmfree(*data); - *data = (void*)tmpP; - break; - - case TSDB_DATA_TYPE_FLOAT: - tmpP = calloc(1, sizeof(float) * g_arguments->prepared_rand); - assert(tmpP); - tmfree(*data); - *data = (void*)tmpP; - break; - - case TSDB_DATA_TYPE_DOUBLE: - tmpP = calloc(1, sizeof(double) * g_arguments->prepared_rand); - assert(tmpP); - tmfree(*data); - *data = (void*)tmpP; - break; - - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - tmpP = calloc(1, g_arguments->prepared_rand * length); - assert(tmpP); - tmfree(*data); - *data = (void*)tmpP; - break; - - case TSDB_DATA_TYPE_TIMESTAMP: - tmpP = calloc(1, sizeof(int64_t) * g_arguments->prepared_rand); - assert(tmpP); - tmfree(*data); - *data = (void*)tmpP; - break; - - default: - errorPrint("Unknown data type: %s\n", - convertDatatypeToString(dataType)); - exit(EXIT_FAILURE); - } -} - -static int parseBufferToStmtBatchChildTbl(SSuperTable *stbInfo, - SChildTable* childTbl) { - int32_t columnCount = stbInfo->cols->size; - - for (int c = 0; c < columnCount; c++) { - Field *col = benchArrayGet(stbInfo->cols, c); - ChildField *childCol = benchArrayGet(childTbl->childCols, c); - char dataType = col->type; - - char *is_null = benchCalloc( - 1, sizeof(char) *g_arguments->prepared_rand, false); - - tmfree(childCol->stmtData.is_null); - childCol->stmtData.is_null = is_null; - - initStmtData(dataType, &(childCol->stmtData.data), col->length); - } - - return initStmtDataValue(stbInfo, childTbl); -} - -static int parseBufferToStmtBatch(SSuperTable* stbInfo) { - int32_t columnCount = stbInfo->cols->size; - - for (int c = 0; c < columnCount; c++) { - Field *col = benchArrayGet(stbInfo->cols, c); - char dataType = col->type; - - char *is_null = benchCalloc( - 1, sizeof(char) *g_arguments->prepared_rand, false); - tmfree(col->stmtData.is_null); - col->stmtData.is_null = is_null; - - initStmtData(dataType, &(col->stmtData.data), col->length); - } - - return initStmtDataValue(stbInfo, NULL); -} - -static int64_t fillChildTblNameByCount(SSuperTable *stbInfo) { - for (int64_t i = 0; i < stbInfo->childTblCount; i++) { - snprintf(stbInfo->childTblArray[i]->name, - TSDB_TABLE_NAME_LEN, - "%s%" PRIu64 "", - stbInfo->childTblPrefix, i); - debugPrint("%s(): %s\n", __func__, - stbInfo->childTblArray[i]->name); - } - - return stbInfo->childTblCount; -} - -static int64_t fillChildTblNameByFromTo(SDataBase *database, - SSuperTable* stbInfo) { - for (int64_t i = stbInfo->childTblFrom; i < stbInfo->childTblTo; i++) { - snprintf(stbInfo->childTblArray[i-stbInfo->childTblFrom]->name, - TSDB_TABLE_NAME_LEN, - "%s%" PRIu64 "", - stbInfo->childTblPrefix, i); - } - - return (stbInfo->childTblTo-stbInfo->childTblFrom); -} - -static int64_t fillChildTblNameByLimitOffset(SDataBase *database, - SSuperTable* stbInfo) { - SBenchConn* conn = initBenchConn(); - if (NULL == conn) { - return -1; - } - char cmd[SHORT_1K_SQL_BUFF_LEN] = "\0"; - if (g_arguments->taosc_version == 3) { - snprintf(cmd, SHORT_1K_SQL_BUFF_LEN, - "SELECT DISTINCT(TBNAME) FROM %s.`%s` LIMIT %" PRId64 - " OFFSET %" PRIu64 "", - database->dbName, stbInfo->stbName, stbInfo->childTblLimit, - stbInfo->childTblOffset); - } else { - snprintf(cmd, SHORT_1K_SQL_BUFF_LEN, - "SELECT TBNAME FROM %s.`%s` LIMIT %" PRId64 - " OFFSET %" PRIu64 "", - database->dbName, stbInfo->stbName, stbInfo->childTblLimit, - stbInfo->childTblOffset); - } - debugPrint("cmd: %s\n", cmd); - TAOS_RES *res = taos_query(conn->taos, cmd); - int32_t code = taos_errno(res); - int64_t count = 0; - if (code) { - printErrCmdCodeStr(cmd, code, res); - closeBenchConn(conn); - return -1; - } - TAOS_ROW row = NULL; - while ((row = taos_fetch_row(res)) != NULL) { - int *lengths = taos_fetch_lengths(res); - strncpy(stbInfo->childTblArray[count]->name, row[0], lengths[0]); - stbInfo->childTblArray[count]->name[lengths[0] + 1] = '\0'; - debugPrint("stbInfo->childTblArray[%" PRId64 "]->name: %s\n", - count, stbInfo->childTblArray[count]->name); - count++; - } - taos_free_result(res); - closeBenchConn(conn); - return count; -} - -static void preProcessArgument(SSuperTable *stbInfo) { - if (stbInfo->interlaceRows > g_arguments->reqPerReq) { - infoPrint( - "interlaceRows(%d) is larger than record per request(%u), which " - "will be set to %u\n", - stbInfo->interlaceRows, g_arguments->reqPerReq, - g_arguments->reqPerReq); - stbInfo->interlaceRows = g_arguments->reqPerReq; - } - - if (stbInfo->interlaceRows > stbInfo->insertRows) { - infoPrint( - "interlaceRows larger than insertRows %d > %" PRId64 "\n", - stbInfo->interlaceRows, stbInfo->insertRows); - infoPrint("%s", "interlaceRows will be set to 0\n"); - stbInfo->interlaceRows = 0; - } - - if (stbInfo->interlaceRows == 0 - && g_arguments->reqPerReq > stbInfo->insertRows) { - infoPrint("record per request (%u) is larger than " - "insert rows (%"PRIu64")" - " in progressive mode, which will be set to %"PRIu64"\n", - g_arguments->reqPerReq, stbInfo->insertRows, - stbInfo->insertRows); - g_arguments->reqPerReq = stbInfo->insertRows; - } - - if (stbInfo->interlaceRows > 0 && stbInfo->iface == STMT_IFACE - && stbInfo->autoTblCreating) { - infoPrint("%s", - "not support autocreate table with interlace row in stmt " - "insertion, will change to progressive mode\n"); - stbInfo->interlaceRows = 0; - } -} - -static int printTotalDelay(SDataBase *database, - int64_t totalDelay, - BArray *total_delay_list, - int threads, - int64_t totalInsertRows, - int64_t start, int64_t end) { - succPrint("Spent %.6f seconds to insert rows: %" PRIu64 - " with %d thread(s) into %s %.2f records/second\n", - (end - start)/1E6, totalInsertRows, threads, - database->dbName, - (double)(totalInsertRows / ((end - start)/1E6))); - if (!total_delay_list->size) { - return -1; - } - - succPrint("insert delay, " - "min: %.4fms, " - "avg: %.4fms, " - "p90: %.4fms, " - "p95: %.4fms, " - "p99: %.4fms, " - "max: %.4fms\n", - *(int64_t *)(benchArrayGet(total_delay_list, 0))/1E3, - (double)totalDelay/total_delay_list->size/1E3, - *(int64_t *)(benchArrayGet(total_delay_list, - (int32_t)(total_delay_list->size - * 0.9)))/1E3, - *(int64_t *)(benchArrayGet(total_delay_list, - (int32_t)(total_delay_list->size - * 0.95)))/1E3, - *(int64_t *)(benchArrayGet(total_delay_list, - (int32_t)(total_delay_list->size - * 0.99)))/1E3, - *(int64_t *)(benchArrayGet(total_delay_list, - (int32_t)(total_delay_list->size - - 1)))/1E3); - return 0; -} - -static int64_t fillChildTblNameImp(SDataBase *database, SSuperTable *stbInfo) { - int64_t ntables; - if (stbInfo->childTblLimit) { - ntables = fillChildTblNameByLimitOffset(database, stbInfo); - } else if (stbInfo->childTblFrom || stbInfo->childTblTo) { - ntables = fillChildTblNameByFromTo(database, stbInfo); - } else { - ntables = fillChildTblNameByCount(stbInfo); - } - return ntables; -} - -static int64_t fillChildTblName(SDataBase *database, SSuperTable *stbInfo) { - int64_t ntables = stbInfo->childTblCount; - stbInfo->childTblArray = benchCalloc(stbInfo->childTblCount, - sizeof(SChildTable*), true); - for (int64_t child = 0; child < stbInfo->childTblCount; child++) { - stbInfo->childTblArray[child] = - benchCalloc(1, sizeof(SChildTable), true); - } - - if (stbInfo->childTblCount == 1 && stbInfo->tags->size == 0) { - // Normal table - snprintf(stbInfo->childTblArray[0]->name, TSDB_TABLE_NAME_LEN, - "%s", stbInfo->stbName); - } else if ((stbInfo->iface != SML_IFACE - && stbInfo->iface != SML_REST_IFACE) - && stbInfo->childTblExists) { - ntables = fillChildTblNameImp(database, stbInfo); - } else { - ntables = fillChildTblNameByCount(stbInfo); - } - - return ntables; -} - -static int startMultiThreadInsertData(SDataBase* database, - SSuperTable* stbInfo) { - if ((stbInfo->iface == SML_IFACE || stbInfo->iface == SML_REST_IFACE) - && !stbInfo->use_metric) { - errorPrint("%s", "schemaless cannot work without stable\n"); - return -1; - } - - preProcessArgument(stbInfo); - - int64_t ntables; - if (stbInfo->childTblTo > 0) { - ntables = stbInfo->childTblTo - stbInfo->childTblFrom; - } else if (stbInfo->childTblLimit > 0 && stbInfo->childTblExists) { - ntables = stbInfo->childTblLimit; - } else { - ntables = stbInfo->childTblCount; - } - if (ntables == 0) { - return 0; - } - - uint64_t tableFrom = 0; - int32_t threads = g_arguments->nthreads; - int64_t a = 0, b = 0; - -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - if ((0 == stbInfo->interlaceRows) - && (g_arguments->nthreads_auto)) { - SBenchConn* conn = initBenchConn(); - if (NULL == conn) { - return -1; - } - - for (int64_t i = 0; i < stbInfo->childTblCount; i++) { - int vgId; - int ret = taos_get_table_vgId( - conn->taos, database->dbName, - stbInfo->childTblArray[i]->name, &vgId); - if (ret < 0) { - errorPrint("Failed to get %s db's %s table's vgId\n", - database->dbName, - stbInfo->childTblArray[i]->name); - closeBenchConn(conn); - return -1; - } - debugPrint("Db %s\'s table\'s %s vgId is: %d\n", - database->dbName, - stbInfo->childTblArray[i]->name, vgId); - for (int32_t v = 0; v < database->vgroups; v++) { - SVGroup *vg = benchArrayGet(database->vgArray, v); - if (vgId == vg->vgId) { - vg->tbCountPerVgId++; - } - } - } - - threads = 0; - for (int v = 0; v < database->vgroups; v++) { - SVGroup *vg = benchArrayGet(database->vgArray, v); - infoPrint("Total %"PRId64" tables on bb %s's vgroup %d (id: %d)\n", - vg->tbCountPerVgId, database->dbName, v, vg->vgId); - if (vg->tbCountPerVgId) { - threads++; - } else { - continue; - } - vg->childTblArray = benchCalloc( - vg->tbCountPerVgId, sizeof(SChildTable*), true); - vg->tbOffset = 0; - } - for (int64_t i = 0; i < stbInfo->childTblCount; i++) { - int vgId; - int ret = taos_get_table_vgId( - conn->taos, database->dbName, - stbInfo->childTblArray[i]->name, &vgId); - if (ret < 0) { - errorPrint("Failed to get %s db's %s table's vgId\n", - database->dbName, - stbInfo->childTblArray[i]->name); - - closeBenchConn(conn); - return -1; - } - debugPrint("Db %s\'s table\'s %s vgId is: %d\n", - database->dbName, - stbInfo->childTblArray[i]->name, vgId); - for (int32_t v = 0; v < database->vgroups; v++) { - SVGroup *vg = benchArrayGet(database->vgArray, v); - if (vgId == vg->vgId) { - vg->childTblArray[vg->tbOffset] = - stbInfo->childTblArray[i]; - vg->tbOffset++; - } - } - } - closeBenchConn(conn); - } else { - a = ntables / threads; - if (a < 1) { - threads = (int32_t)ntables; - a = 1; - } - b = 0; - if (threads != 0) { - b = ntables % threads; - } - } - - int32_t vgFrom = 0; -#else - a = ntables / threads; - if (a < 1) { - threads = (int32_t)ntables; - a = 1; - } - b = 0; - if (threads != 0) { - b = ntables % threads; - } -#endif // TD_VER_COMPATIBLE_3_0_0_0 - pthread_t *pids = benchCalloc(1, threads * sizeof(pthread_t), true); - threadInfo *infos = benchCalloc(1, threads * sizeof(threadInfo), true); - - for (int32_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->threadID = i; - pThreadInfo->dbInfo = database; - pThreadInfo->stbInfo = stbInfo; - pThreadInfo->start_time = stbInfo->startTimestamp; - pThreadInfo->totalInsertRows = 0; - pThreadInfo->samplePos = 0; -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - if ((0 == stbInfo->interlaceRows) - && (g_arguments->nthreads_auto)) { - int32_t j; - for (j = vgFrom; i < database->vgroups; j++) { - SVGroup *vg = benchArrayGet(database->vgArray, j); - if (0 == vg->tbCountPerVgId) { - continue; - } - pThreadInfo->vg = vg; - pThreadInfo->start_table_from = 0; - pThreadInfo->ntables = vg->tbCountPerVgId; - pThreadInfo->end_table_to = vg->tbCountPerVgId-1; - break; - } - vgFrom = j + 1; - } else { - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = i < b ? a + 1 : a; - pThreadInfo->end_table_to = (i < b)?(tableFrom+a):(tableFrom+a-1); - tableFrom = pThreadInfo->end_table_to + 1; - } -#else - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = i < b ? a + 1 : a; - pThreadInfo->end_table_to = (i < b)?(tableFrom+a):(tableFrom+a-1); - tableFrom = pThreadInfo->end_table_to + 1; -#endif // TD_VER_COMPATIBLE_3_0_0_0 - pThreadInfo->delayList = benchArrayInit(1, sizeof(int64_t)); - switch (stbInfo->iface) { - case REST_IFACE: { - if (stbInfo->interlaceRows > 0) { - pThreadInfo->buffer = new_ds(0); - } else { - pThreadInfo->buffer = - benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, true); - } - int sockfd = createSockFd(); - if (sockfd < 0) { - FREE_PIDS_INFOS_RETURN_MINUS_1(); - } - pThreadInfo->sockfd = sockfd; - break; - } - case STMT_IFACE: { - pThreadInfo->conn = initBenchConn(); - if (NULL == pThreadInfo->conn) { - FREE_PIDS_INFOS_RETURN_MINUS_1(); - } - pThreadInfo->conn->stmt = - taos_stmt_init(pThreadInfo->conn->taos); - if (NULL == pThreadInfo->conn->stmt) { - errorPrint("taos_stmt_init() failed, reason: %s\n", - taos_errstr(NULL)); - FREE_RESOURCE(); - return -1; - } - if (taos_select_db(pThreadInfo->conn->taos, database->dbName)) { - errorPrint("taos select database(%s) failed\n", - database->dbName); - FREE_RESOURCE(); - return -1; - } - if (!stbInfo->autoTblCreating) { - if (prepareStmt(stbInfo, pThreadInfo->conn->stmt, 0)) { - FREE_RESOURCE(); - return -1; - } - } - - pThreadInfo->bind_ts = benchCalloc(1, sizeof(int64_t), true); - pThreadInfo->bind_ts_array = - benchCalloc(1, sizeof(int64_t)*g_arguments->reqPerReq, - true); - pThreadInfo->bindParams = benchCalloc( - 1, sizeof(TAOS_MULTI_BIND)*(stbInfo->cols->size + 1), - true); - pThreadInfo->is_null = benchCalloc(1, g_arguments->reqPerReq, - true); - parseBufferToStmtBatch(stbInfo); - for (int64_t child = 0; - child < stbInfo->childTblCount; child++) { - SChildTable *childTbl = stbInfo->childTblArray[child]; - if (childTbl->useOwnSample) { - parseBufferToStmtBatchChildTbl(stbInfo, childTbl); - } - } - - break; - } - case SML_REST_IFACE: { - int sockfd = createSockFd(); - if (sockfd < 0) { - free(pids); - free(infos); - return -1; - } - pThreadInfo->sockfd = sockfd; - } - case SML_IFACE: { - pThreadInfo->conn = initBenchConn(); - if (pThreadInfo->conn == NULL) { - errorPrint("%s() init connection failed\n", __func__); - FREE_RESOURCE(); - return -1; - } - if (taos_select_db(pThreadInfo->conn->taos, database->dbName)) { - errorPrint("taos select database(%s) failed\n", - database->dbName); - FREE_RESOURCE(); - return -1; - } - pThreadInfo->max_sql_len = - stbInfo->lenOfCols + stbInfo->lenOfTags; - if (stbInfo->iface == SML_REST_IFACE) { - pThreadInfo->buffer = - benchCalloc(1, g_arguments->reqPerReq * - (1 + pThreadInfo->max_sql_len), true); - } - int protocol = stbInfo->lineProtocol; - if (TSDB_SML_JSON_PROTOCOL != protocol - && SML_JSON_TAOS_FORMAT != protocol) { - pThreadInfo->sml_tags = - (char **)benchCalloc(pThreadInfo->ntables, - sizeof(char *), true); - for (int t = 0; t < pThreadInfo->ntables; t++) { - pThreadInfo->sml_tags[t] = - benchCalloc(1, stbInfo->lenOfTags, true); - } - - for (int t = 0; t < pThreadInfo->ntables; t++) { - if (generateRandData( - stbInfo, pThreadInfo->sml_tags[t], - stbInfo->lenOfTags, - stbInfo->lenOfCols + stbInfo->lenOfTags, - stbInfo->tags, 1, true, NULL)) { - return -1; - } - debugPrint("pThreadInfo->sml_tags[%d]: %s\n", t, - pThreadInfo->sml_tags[t]); - } - pThreadInfo->lines = - benchCalloc(g_arguments->reqPerReq, - sizeof(char *), true); - - for (int j = 0; (j < g_arguments->reqPerReq - && !g_arguments->terminate); j++) { - pThreadInfo->lines[j] = - benchCalloc(1, pThreadInfo->max_sql_len, true); - } - } else { - pThreadInfo->json_array = tools_cJSON_CreateArray(); - pThreadInfo->sml_json_tags = tools_cJSON_CreateArray(); - pThreadInfo->sml_tags_json_array = (char **)benchCalloc( - pThreadInfo->ntables, sizeof(char *), true); - for (int t = 0; t < pThreadInfo->ntables; t++) { - if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { - generateSmlJsonTags( - pThreadInfo->sml_json_tags, - pThreadInfo->sml_tags_json_array, - stbInfo, - pThreadInfo->start_table_from, t); - } else { - generateSmlTaosJsonTags( - pThreadInfo->sml_json_tags, stbInfo, - pThreadInfo->start_table_from, t); - } - } - pThreadInfo->lines = (char **)benchCalloc( - 1, sizeof(char *), true); - if ((0 == stbInfo->interlaceRows) - && (TSDB_SML_JSON_PROTOCOL == protocol)) { - pThreadInfo->line_buf_len = - g_arguments->reqPerReq * - accumulateRowLen(pThreadInfo->stbInfo->tags, - pThreadInfo->stbInfo->iface); - debugPrint("%s() LN%d, line_buf_len=%d\n", - __func__, __LINE__, pThreadInfo->line_buf_len); - pThreadInfo->lines[0] = benchCalloc( - 1, pThreadInfo->line_buf_len, true); - pThreadInfo->sml_json_value_array = - (char **)benchCalloc( - pThreadInfo->ntables, sizeof(char *), true); - for (int t = 0; t < pThreadInfo->ntables; t++) { - generateSmlJsonValues( - pThreadInfo->sml_json_value_array, stbInfo, t); - } - } - } - break; - } - case TAOSC_IFACE: { - pThreadInfo->conn = initBenchConn(); - if (pThreadInfo->conn == NULL) { - errorPrint("%s() failed to connect\n", __func__); - FREE_RESOURCE(); - return -1; - } - char* command = benchCalloc(1, SHORT_1K_SQL_BUFF_LEN, false); - snprintf(command, SHORT_1K_SQL_BUFF_LEN, - g_arguments->escape_character - ? "USE `%s`" - : "USE %s", - database->dbName); - if (queryDbExecCall(pThreadInfo->conn, command)) { - errorPrint("taos select database(%s) failed\n", - database->dbName); - FREE_RESOURCE(); - tmfree(command); - return -1; - } - tmfree(command); - command = NULL; - - if (stbInfo->interlaceRows > 0) { - pThreadInfo->buffer = new_ds(0); - } else { - pThreadInfo->buffer = - benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, true); - if (g_arguments->check_sql) { - pThreadInfo->csql = - benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, true); - memset(pThreadInfo->csql, 0, TSDB_MAX_ALLOWED_SQL_LEN); - } - } - - break; - } - default: - break; - } - } - - infoPrint("Estimate memory usage: %.2fMB\n", - (double)g_memoryUsage / 1048576); - prompt(0); - - // create threads - int threadCnt = 0; - for (int i = 0; (i < threads && !g_arguments->terminate); i++) { - threadInfo *pThreadInfo = infos + i; - if (stbInfo->interlaceRows > 0) { - pthread_create(pids + i, NULL, - syncWriteInterlace, pThreadInfo); - } else { - pthread_create(pids + i, NULL, - syncWriteProgressive, pThreadInfo); - } - threadCnt ++; - } - - int64_t start = toolsGetTimestampUs(); - - // wait threads - for (int i = 0; i < threadCnt; i++) { - infoPrint(" pthread_join %d ...\n", i); - pthread_join(pids[i], NULL); - } - - int64_t end = toolsGetTimestampUs()+1; - - if (g_arguments->terminate) toolsMsleep(100); - - BArray * total_delay_list = benchArrayInit(1, sizeof(int64_t)); - int64_t totalDelay = 0; - uint64_t totalInsertRows = 0; - - // free threads resource - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - // free check sql - if (pThreadInfo->csql) { - tmfree(pThreadInfo->csql); - pThreadInfo->csql = NULL; - } - - int protocol = stbInfo->lineProtocol; - switch (stbInfo->iface) { - case REST_IFACE: - if (g_arguments->terminate) - toolsMsleep(100); - destroySockFd(pThreadInfo->sockfd); - if (stbInfo->interlaceRows > 0) { - free_ds(&pThreadInfo->buffer); - } else { - tmfree(pThreadInfo->buffer); - pThreadInfo->buffer = NULL; - } - break; - case SML_REST_IFACE: - if (g_arguments->terminate) - toolsMsleep(100); - tmfree(pThreadInfo->buffer); - // on-purpose no break here - case SML_IFACE: - if (TSDB_SML_JSON_PROTOCOL != protocol - && SML_JSON_TAOS_FORMAT != protocol) { - for (int t = 0; t < pThreadInfo->ntables; t++) { - tmfree(pThreadInfo->sml_tags[t]); - } - for (int j = 0; j < g_arguments->reqPerReq; j++) { - tmfree(pThreadInfo->lines[j]); - } - tmfree(pThreadInfo->sml_tags); - pThreadInfo->sml_tags = NULL; - } else { - for (int t = 0; t < pThreadInfo->ntables; t++) { - tmfree(pThreadInfo->sml_tags_json_array[t]); - } - tmfree(pThreadInfo->sml_tags_json_array); - pThreadInfo->sml_tags_json_array = NULL; - if (pThreadInfo->sml_json_tags) { - tools_cJSON_Delete(pThreadInfo->sml_json_tags); - pThreadInfo->sml_json_tags = NULL; - } - if (pThreadInfo->json_array) { - tools_cJSON_Delete(pThreadInfo->json_array); - pThreadInfo->json_array = NULL; - } - } - if (pThreadInfo->lines) { - if ((0 == stbInfo->interlaceRows) - && (TSDB_SML_JSON_PROTOCOL == protocol)) { - tmfree(pThreadInfo->lines[0]); - for (int t = 0; t < pThreadInfo->ntables; t++) { - tmfree(pThreadInfo->sml_json_value_array[t]); - } - tmfree(pThreadInfo->sml_json_value_array); - } - tmfree(pThreadInfo->lines); - pThreadInfo->lines = NULL; - } - break; - - case STMT_IFACE: - taos_stmt_close(pThreadInfo->conn->stmt); - tmfree(pThreadInfo->bind_ts); - tmfree(pThreadInfo->bind_ts_array); - tmfree(pThreadInfo->bindParams); - tmfree(pThreadInfo->is_null); - break; - - case TAOSC_IFACE: - if (stbInfo->interlaceRows > 0) { - free_ds(&pThreadInfo->buffer); - } else { - tmfree(pThreadInfo->buffer); - pThreadInfo->buffer = NULL; - } - break; - - default: - break; - } - totalInsertRows += pThreadInfo->totalInsertRows; - totalDelay += pThreadInfo->totalDelay; - benchArrayAddBatch(total_delay_list, pThreadInfo->delayList->pData, - pThreadInfo->delayList->size); - tmfree(pThreadInfo->delayList); - pThreadInfo->delayList = NULL; - // free conn - if (pThreadInfo->conn) { - closeBenchConn(pThreadInfo->conn); - pThreadInfo->conn = NULL; - } - } - - // calculate result - qsort(total_delay_list->pData, total_delay_list->size, - total_delay_list->elemSize, compare); - - if (g_arguments->terminate) toolsMsleep(100); - - free(pids); - free(infos); - - int ret = printTotalDelay(database, totalDelay, - total_delay_list, threads, - totalInsertRows, start, end); - benchArrayDestroy(total_delay_list); - if (g_fail || ret) { - return -1; - } - return 0; -} - -static int getStbInsertedRows(char* dbName, char* stbName, TAOS* taos) { - int rows = 0; - char command[SHORT_1K_SQL_BUFF_LEN]; - snprintf(command, SHORT_1K_SQL_BUFF_LEN, "SELECT COUNT(*) FROM %s.%s", - dbName, stbName); - TAOS_RES* res = taos_query(taos, command); - int code = taos_errno(res); - if (code != 0) { - printErrCmdCodeStr(command, code, res); - return -1; - } - TAOS_ROW row = taos_fetch_row(res); - if (row == NULL) { - rows = 0; - } else { - rows = (int)*(int64_t*)row[0]; - } - taos_free_result(res); - return rows; -} - -static void create_tsma(TSMA* tsma, SBenchConn* conn, char* stbName) { - char command[SHORT_1K_SQL_BUFF_LEN]; - int len = snprintf(command, SHORT_1K_SQL_BUFF_LEN, - "CREATE sma INDEX %s ON %s function(%s) " - "INTERVAL (%s) SLIDING (%s)", - tsma->name, stbName, tsma->func, - tsma->interval, tsma->sliding); - if (tsma->custom) { - snprintf(command + len, SHORT_1K_SQL_BUFF_LEN - len, - " %s", tsma->custom); - } - int code = queryDbExecCall(conn, command); - if (code == 0) { - infoPrint("successfully create tsma with command <%s>\n", command); - } -} - -static void* create_tsmas(void* args) { - tsmaThreadInfo* pThreadInfo = (tsmaThreadInfo*) args; - int inserted_rows = 0; - SBenchConn* conn = initBenchConn(); - if (NULL == conn) { - return NULL; - } - int finished = 0; - if (taos_select_db(conn->taos, pThreadInfo->dbName)) { - errorPrint("failed to use database (%s)\n", pThreadInfo->dbName); - closeBenchConn(conn); - return NULL; - } - while (finished < pThreadInfo->tsmas->size && inserted_rows >= 0) { - inserted_rows = (int)getStbInsertedRows( - pThreadInfo->dbName, pThreadInfo->stbName, conn->taos); - for (int i = 0; i < pThreadInfo->tsmas->size; i++) { - TSMA* tsma = benchArrayGet(pThreadInfo->tsmas, i); - if (!tsma->done && inserted_rows >= tsma->start_when_inserted) { - create_tsma(tsma, conn, pThreadInfo->stbName); - tsma->done = true; - finished++; - break; - } - } - toolsMsleep(10); - } - benchArrayDestroy(pThreadInfo->tsmas); - closeBenchConn(conn); - return NULL; -} - -static int32_t createStream(SSTREAM* stream) { - int32_t code = -1; - char * command = benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, false); - snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, "DROP STREAM IF EXISTS %s", - stream->stream_name); - infoPrint("%s\n", command); - SBenchConn* conn = initBenchConn(); - if (NULL == conn) { - goto END_STREAM; - } - - code = queryDbExecCall(conn, command); - int32_t trying = g_arguments->keep_trying; - while (code && trying) { - infoPrint("will sleep %"PRIu32" milliseconds then re-drop stream %s\n", - g_arguments->trying_interval, stream->stream_name); - toolsMsleep(g_arguments->trying_interval); - code = queryDbExecCall(conn, command); - if (trying != -1) { - trying--; - } - } - - if (code) { - closeBenchConn(conn); - goto END_STREAM; - } - - memset(command, 0, TSDB_MAX_ALLOWED_SQL_LEN); - int pos = snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, - "CREATE STREAM IF NOT EXISTS %s ", stream->stream_name); - if (stream->trigger_mode[0] != '\0') { - pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, - "TRIGGER %s ", stream->trigger_mode); - } - if (stream->watermark[0] != '\0') { - pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, - "WATERMARK %s ", stream->watermark); - } - if (stream->ignore_update[0] != '\0') { - pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, - "IGNORE UPDATE %s ", stream->ignore_update); - } - if (stream->ignore_expired[0] != '\0') { - pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, - "IGNORE EXPIRED %s ", stream->ignore_expired); - } - if (stream->fill_history[0] != '\0') { - pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, - "FILL_HISTORY %s ", stream->fill_history); - } - pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, - "INTO %s ", stream->stream_stb); - if (stream->stream_stb_field[0] != '\0') { - pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, - "%s ", stream->stream_stb_field); - } - if (stream->stream_tag_field[0] != '\0') { - pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, - "TAGS%s ", stream->stream_tag_field); - } - if (stream->subtable[0] != '\0') { - pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, - "SUBTABLE%s ", stream->subtable); - } - snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, - "as %s", stream->source_sql); - infoPrint("%s\n", command); - - code = queryDbExecCall(conn, command); - trying = g_arguments->keep_trying; - while (code && trying) { - infoPrint("will sleep %"PRIu32" milliseconds " - "then re-create stream %s\n", - g_arguments->trying_interval, stream->stream_name); - toolsMsleep(g_arguments->trying_interval); - code = queryDbExecCall(conn, command); - if (trying != -1) { - trying--; - } - } - - closeBenchConn(conn); -END_STREAM: - tmfree(command); - return code; -} - -int insertTestProcess() { - prompt(0); - - encodeAuthBase64(); - for (int i = 0; i < g_arguments->databases->size; i++) { - if (REST_IFACE == g_arguments->iface) { - if (0 != convertServAddr(g_arguments->iface, - false, - 1)) { - return -1; - } - } - SDataBase * database = benchArrayGet(g_arguments->databases, i); - - if (database->drop && !(g_arguments->supplementInsert)) { - if (database->superTbls) { - SSuperTable * stbInfo = benchArrayGet(database->superTbls, 0); - if (stbInfo && (REST_IFACE == stbInfo->iface)) { - if (0 != convertServAddr(stbInfo->iface, - stbInfo->tcpTransfer, - stbInfo->lineProtocol)) { - return -1; - } - } - } - if (createDatabase(database)) { - errorPrint("failed to create database (%s)\n", - database->dbName); - return -1; - } - succPrint("created database (%s)\n", database->dbName); - } - } - for (int i = 0; i < g_arguments->databases->size; i++) { - SDataBase * database = benchArrayGet(g_arguments->databases, i); - if (database->superTbls) { - for (int j = 0; j < database->superTbls->size; j++) { - SSuperTable * stbInfo = benchArrayGet(database->superTbls, j); - if (stbInfo->iface != SML_IFACE - && stbInfo->iface != SML_REST_IFACE - && !stbInfo->childTblExists) { -#ifdef WEBSOCKET - if (g_arguments->websocket) { - dropSuperTable(database, stbInfo); - } -#endif - if (getSuperTableFromServer(database, stbInfo) != 0) { - if (createSuperTable(database, stbInfo)) { - return -1; - } - } - } - fillChildTblName(database, stbInfo); - if (0 != prepareSampleData(database, stbInfo)) { - return -1; - } - } - } - } - - if (g_arguments->taosc_version == 3) { - for (int i = 0; i < g_arguments->databases->size; i++) { - SDataBase* database = benchArrayGet(g_arguments->databases, i); - if (database->superTbls) { - for (int j = 0; (j < database->superTbls->size - && !g_arguments->terminate); j++) { - SSuperTable* stbInfo = - benchArrayGet(database->superTbls, j); - if (stbInfo->tsmas == NULL) { - continue; - } - if (stbInfo->tsmas->size > 0) { - tsmaThreadInfo* pThreadInfo = - benchCalloc(1, sizeof(tsmaThreadInfo), true); - pthread_t tsmas_pid = {0}; - pThreadInfo->dbName = database->dbName; - pThreadInfo->stbName = stbInfo->stbName; - pThreadInfo->tsmas = stbInfo->tsmas; - pthread_create(&tsmas_pid, NULL, - create_tsmas, pThreadInfo); - } - } - } - } - } - - if (createChildTables()) return -1; - - if (g_arguments->taosc_version == 3) { - for (int j = 0; j < g_arguments->streams->size; j++) { - SSTREAM * stream = benchArrayGet(g_arguments->streams, j); - if (stream->drop) { - if (createStream(stream)) { - return -1; - } - } - } - } - - // create sub threads for inserting data - for (int i = 0; i < g_arguments->databases->size; i++) { - SDataBase * database = benchArrayGet(g_arguments->databases, i); - if (database->superTbls) { - for (uint64_t j = 0; j < database->superTbls->size; j++) { - SSuperTable * stbInfo = benchArrayGet(database->superTbls, j); - if (stbInfo->insertRows == 0) { - continue; - } - prompt(stbInfo->non_stop); - if (startMultiThreadInsertData(database, stbInfo)) { - return -1; - } - } - } - } - return 0; -} +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the MIT license as published by the Free Software + * Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + */ + +#include +#include +#include + +#define FREE_PIDS_INFOS_RETURN_MINUS_1() \ + do { \ + tmfree(pids); \ + tmfree(infos); \ + return -1; \ + } while (0) + +#define FREE_RESOURCE() \ + do { \ + if (pThreadInfo->conn) \ + closeBenchConn(pThreadInfo->conn); \ + benchArrayDestroy(pThreadInfo->delayList); \ + tmfree(pids); \ + tmfree(infos); \ + } while (0) \ + +static int getSuperTableFromServerRest( + SDataBase* database, SSuperTable* stbInfo, char *command) { + + return -1; + // TODO(me): finish full implementation +#if 0 + int sockfd = createSockFd(); + if (sockfd < 0) { + return -1; + } + + int code = postProceSql(command, + database->dbName, + database->precision, + REST_IFACE, + 0, + g_arguments->port, + false, + sockfd, + NULL); + + destroySockFd(sockfd); +#endif // 0 +} + +static int getSuperTableFromServerTaosc( + SDataBase* database, SSuperTable* stbInfo, char *command) { +#ifdef WEBSOCKET + if (g_arguments->websocket) { + return -1; + } +#endif + TAOS_RES * res; + TAOS_ROW row = NULL; + SBenchConn* conn = initBenchConn(); + if (NULL == conn) { + return -1; + } + + res = taos_query(conn->taos, command); + int32_t code = taos_errno(res); + if (code != 0) { + printWarnCmdCodeStr(command, code, res); + infoPrint("stable %s does not exist, will create one\n", + stbInfo->stbName); + closeBenchConn(conn); + return -1; + } + infoPrint("find stable<%s>, will get meta data from server\n", + stbInfo->stbName); + benchArrayClear(stbInfo->tags); + benchArrayClear(stbInfo->cols); + int count = 0; + while ((row = taos_fetch_row(res)) != NULL) { + if (count == 0) { + count++; + continue; + } + int32_t *lengths = taos_fetch_lengths(res); + if (lengths == NULL) { + errorPrint("%s", "failed to execute taos_fetch_length\n"); + taos_free_result(res); + closeBenchConn(conn); + return -1; + } + if (strncasecmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "tag", + strlen("tag")) == 0) { + Field* tag = benchCalloc(1, sizeof(Field), true); + benchArrayPush(stbInfo->tags, tag); + tag = benchArrayGet(stbInfo->tags, stbInfo->tags->size - 1); + tag->type = convertStringToDatatype( + (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + lengths[TSDB_DESCRIBE_METRIC_TYPE_INDEX]); + tag->length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + tag->min = convertDatatypeToDefaultMin(tag->type); + tag->max = convertDatatypeToDefaultMax(tag->type); + tstrncpy(tag->name, + (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + lengths[TSDB_DESCRIBE_METRIC_FIELD_INDEX] + 1); + } else { + Field * col = benchCalloc(1, sizeof(Field), true); + benchArrayPush(stbInfo->cols, col); + col = benchArrayGet(stbInfo->cols, stbInfo->cols->size - 1); + col->type = convertStringToDatatype( + (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + lengths[TSDB_DESCRIBE_METRIC_TYPE_INDEX]); + col->length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + col->min = convertDatatypeToDefaultMin(col->type); + col->max = convertDatatypeToDefaultMax(col->type); + tstrncpy(col->name, + (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + lengths[TSDB_DESCRIBE_METRIC_FIELD_INDEX] + 1); + } + } + taos_free_result(res); + closeBenchConn(conn); + return 0; +} + +static int getSuperTableFromServer(SDataBase* database, SSuperTable* stbInfo) { + int ret = 0; + + char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; + snprintf(command, SHORT_1K_SQL_BUFF_LEN, + "DESCRIBE `%s`.`%s`", database->dbName, + stbInfo->stbName); + + if (REST_IFACE == stbInfo->iface) { + ret = getSuperTableFromServerRest(database, stbInfo, command); + } else { + ret = getSuperTableFromServerTaosc(database, stbInfo, command); + } + + return ret; +} + +static int queryDbExec(SDataBase *database, + SSuperTable *stbInfo, char *command) { + int ret = 0; + if (REST_IFACE == stbInfo->iface) { + if (0 != convertServAddr(stbInfo->iface, false, 1)) { + errorPrint("%s", "Failed to convert server address\n"); + return -1; + } + int sockfd = createSockFd(); + if (sockfd < 0) { + ret = -1; + } else { + ret = queryDbExecRest(command, + database->dbName, + database->precision, + stbInfo->iface, + stbInfo->lineProtocol, + stbInfo->tcpTransfer, + sockfd); + destroySockFd(sockfd); + } + } else { + SBenchConn* conn = initBenchConn(); + if (NULL == conn) { + ret = -1; + } else { + ret = queryDbExecCall(conn, command); + int32_t trying = g_arguments->keep_trying; + while (ret && trying) { + infoPrint("will sleep %"PRIu32" milliseconds then re-create " + "supertable %s\n", + g_arguments->trying_interval, stbInfo->stbName); + toolsMsleep(g_arguments->trying_interval); + ret = queryDbExecCall(conn, command); + if (trying != -1) { + trying--; + } + } + if (0 != ret) { + errorPrint("create supertable %s failed!\n\n", + stbInfo->stbName); + ret = -1; + } + closeBenchConn(conn); + } + } + + return ret; +} + +#ifdef WEBSOCKET +static void dropSuperTable(SDataBase* database, SSuperTable* stbInfo) { + if (g_arguments->supplementInsert) { + return; + } + + char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; + snprintf(command, sizeof(command), + g_arguments->escape_character + ? "DROP TABLE `%s`.`%s`" + : "DROP TABLE %s.%s", + database->dbName, + stbInfo->stbName); + + infoPrint("drop stable: <%s>\n", command); + queryDbExec(database, stbInfo, command); + + return; +} +#endif // WEBSOCKET + +static int createSuperTable(SDataBase* database, SSuperTable* stbInfo) { + if (g_arguments->supplementInsert) { + return 0; + } + + uint32_t col_buffer_len = (TSDB_COL_NAME_LEN + 15) * stbInfo->cols->size; + char *colsBuf = benchCalloc(1, col_buffer_len, false); + char* command = benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, false); + int len = 0; + + for (int colIndex = 0; colIndex < stbInfo->cols->size; colIndex++) { + Field * col = benchArrayGet(stbInfo->cols, colIndex); + int n; + if (col->type == TSDB_DATA_TYPE_BINARY || + col->type == TSDB_DATA_TYPE_NCHAR) { + n = snprintf(colsBuf + len, col_buffer_len - len, + ",%s %s(%d)", col->name, + convertDatatypeToString(col->type), col->length); + } else { + n = snprintf(colsBuf + len, col_buffer_len - len, + ",%s %s", col->name, + convertDatatypeToString(col->type)); + } + if (n < 0 || n >= col_buffer_len - len) { + errorPrint("%s() LN%d, snprintf overflow on %d\n", + __func__, __LINE__, colIndex); + break; + } else { + len += n; + } + } + + // save for creating child table + stbInfo->colsOfCreateChildTable = + (char *)benchCalloc(len + TIMESTAMP_BUFF_LEN, 1, true); + + snprintf(stbInfo->colsOfCreateChildTable, len + TIMESTAMP_BUFF_LEN, + "(ts timestamp%s)", colsBuf); + + if (stbInfo->tags->size == 0) { + free(colsBuf); + free(command); + return 0; + } + + uint32_t tag_buffer_len = (TSDB_COL_NAME_LEN + 15) * stbInfo->tags->size; + char *tagsBuf = benchCalloc(1, tag_buffer_len, false); + int tagIndex; + len = 0; + + int n; + n = snprintf(tagsBuf + len, tag_buffer_len - len, "("); + if (n < 0 || n >= tag_buffer_len - len) { + errorPrint("%s() LN%d snprintf overflow\n", + __func__, __LINE__); + free(colsBuf); + free(command); + tmfree(tagsBuf); + return -1; + } else { + len += n; + } + for (tagIndex = 0; tagIndex < stbInfo->tags->size; tagIndex++) { + Field *tag = benchArrayGet(stbInfo->tags, tagIndex); + if (tag->type == TSDB_DATA_TYPE_BINARY || + tag->type == TSDB_DATA_TYPE_NCHAR) { + n = snprintf(tagsBuf + len, tag_buffer_len - len, + "%s %s(%d),", tag->name, + convertDatatypeToString(tag->type), tag->length); + } else if (tag->type == TSDB_DATA_TYPE_JSON) { + n = snprintf(tagsBuf + len, tag_buffer_len - len, + "%s json", tag->name); + if (n < 0 || n >= tag_buffer_len - len) { + errorPrint("%s() LN%d snprintf overflow on %d\n", + __func__, __LINE__, tagIndex); + break; + } else { + len += n; + } + goto skip; + } else { + n = snprintf(tagsBuf + len, tag_buffer_len - len, + "%s %s,", tag->name, + convertDatatypeToString(tag->type)); + } + + if (n < 0 || n >= tag_buffer_len - len) { + errorPrint("%s() LN%d snprintf overflow on %d\n", + __func__, __LINE__, tagIndex); + break; + } else { + len += n; + } + } + len -= 1; +skip: + snprintf(tagsBuf + len, tag_buffer_len - len, ")"); + + int length = snprintf( + command, TSDB_MAX_ALLOWED_SQL_LEN, + g_arguments->escape_character + ? "CREATE TABLE `%s`.`%s` (ts TIMESTAMP%s) TAGS %s" + : "CREATE TABLE %s.%s (ts TIMESTAMP%s) TAGS %s", + database->dbName, stbInfo->stbName, colsBuf, tagsBuf); + tmfree(colsBuf); + tmfree(tagsBuf); + if (stbInfo->comment != NULL) { + length += snprintf(command + length, TSDB_MAX_ALLOWED_SQL_LEN - length, + " COMMENT '%s'", stbInfo->comment); + } + if (stbInfo->delay >= 0) { + length += snprintf(command + length, + TSDB_MAX_ALLOWED_SQL_LEN - length, " DELAY %d", + stbInfo->delay); + } + if (stbInfo->file_factor >= 0) { + length += + snprintf(command + length, + TSDB_MAX_ALLOWED_SQL_LEN - length, " FILE_FACTOR %f", + (float)stbInfo->file_factor / 100); + } + if (stbInfo->rollup != NULL) { + length += snprintf(command + length, + TSDB_MAX_ALLOWED_SQL_LEN - length, + " ROLLUP(%s)", stbInfo->rollup); + } + + if (stbInfo->max_delay != NULL) { + length += snprintf(command + length, + TSDB_MAX_ALLOWED_SQL_LEN - length, + " MAX_DELAY %s", stbInfo->max_delay); + } + + if (stbInfo->watermark != NULL) { + length += snprintf(command + length, + TSDB_MAX_ALLOWED_SQL_LEN - length, + " WATERMARK %s", stbInfo->watermark); + } + + // not support ttl in super table + /* + if (stbInfo->ttl != 0) { + length += snprintf(command + length, + TSDB_MAX_ALLOWED_SQL_LEN - length, + " TTL %d", stbInfo->ttl); + } + */ + + bool first_sma = true; + for (int i = 0; i < stbInfo->cols->size; i++) { + Field * col = benchArrayGet(stbInfo->cols, i); + if (col->sma) { + if (first_sma) { + n = snprintf(command + length, + TSDB_MAX_ALLOWED_SQL_LEN - length, + " SMA(%s", col->name); + first_sma = false; + } else { + n = snprintf(command + length, + TSDB_MAX_ALLOWED_SQL_LEN - length, + ",%s", col->name); + } + + if (n < 0 || n > TSDB_MAX_ALLOWED_SQL_LEN - length) { + errorPrint("%s() LN%d snprintf overflow on %d iteral\n", + __func__, __LINE__, i); + break; + } else { + length += n; + } + } + } + if (!first_sma) { + snprintf(command + length, TSDB_MAX_ALLOWED_SQL_LEN - length, ")"); + } + infoPrint("create stable: <%s>\n", command); + + int ret = queryDbExec(database, stbInfo, command); + free(command); + return ret; +} + +#ifdef TD_VER_COMPATIBLE_3_0_0_0 +int32_t getVgroupsOfDb(SBenchConn *conn, SDataBase *database) { + int vgroups = 0; + char cmd[SHORT_1K_SQL_BUFF_LEN] = "\0"; + + snprintf(cmd, SHORT_1K_SQL_BUFF_LEN, + g_arguments->escape_character + ? "USE `%s`" + : "USE %s", + database->dbName); + + int32_t code; + TAOS_RES *res = NULL; + + res = taos_query(conn->taos, cmd); + code = taos_errno(res); + if (code) { + printErrCmdCodeStr(cmd, code, res); + return -1; + } + taos_free_result(res); + + snprintf(cmd, SHORT_1K_SQL_BUFF_LEN, "SHOW VGROUPS"); + res = taos_query(conn->taos, cmd); + code = taos_errno(res); + if (code) { + printErrCmdCodeStr(cmd, code, res); + return -1; + } + + TAOS_ROW row = NULL; + while ((row = taos_fetch_row(res)) != NULL) { + vgroups++; + } + debugPrint("%s() LN%d, vgroups: %d\n", __func__, __LINE__, vgroups); + taos_free_result(res); + + database->vgroups = vgroups; + database->vgArray = benchArrayInit(vgroups, sizeof(SVGroup)); + for (int32_t v = 0; (v < vgroups + && !g_arguments->terminate); v++) { + SVGroup *vg = benchCalloc(1, sizeof(SVGroup), true); + benchArrayPush(database->vgArray, vg); + } + + res = taos_query(conn->taos, cmd); + code = taos_errno(res); + if (code) { + printErrCmdCodeStr(cmd, code, res); + return -1; + } + + int32_t vgItem = 0; + while (((row = taos_fetch_row(res)) != NULL) + && !g_arguments->terminate) { + SVGroup *vg = benchArrayGet(database->vgArray, vgItem); + vg->vgId = *(int32_t*)row[0]; + vgItem++; + } + taos_free_result(res); + + return vgroups; +} +#endif // TD_VER_COMPATIBLE_3_0_0_0 + +int geneDbCreateCmd(SDataBase *database, char *command, int remainVnodes) { + int dataLen = 0; + int n; +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + if (g_arguments->nthreads_auto || (-1 != g_arguments->inputted_vgroups)) { + n = snprintf(command + dataLen, SHORT_1K_SQL_BUFF_LEN - dataLen, + g_arguments->escape_character + ? "CREATE DATABASE IF NOT EXISTS `%s` VGROUPS %d" + : "CREATE DATABASE IF NOT EXISTS %s VGROUPS %d", + database->dbName, + (-1 != g_arguments->inputted_vgroups)? + g_arguments->inputted_vgroups: + min(remainVnodes, toolsGetNumberOfCores())); + } else { + n = snprintf(command + dataLen, SHORT_1K_SQL_BUFF_LEN - dataLen, + g_arguments->escape_character + ? "CREATE DATABASE IF NOT EXISTS `%s`" + : "CREATE DATABASE IF NOT EXISTS %s", + database->dbName); + } +#else + n = snprintf(command + dataLen, SHORT_1K_SQL_BUFF_LEN - dataLen, + g_arguments->escape_character + ? "CREATE DATABASE IF NOT EXISTS `%s`" + : "CREATE DATABASE IF NOT EXISTS %s", database->dbName); +#endif // TD_VER_COMPATIBLE_3_0_0_0 + if (n < 0 || n >= SHORT_1K_SQL_BUFF_LEN - dataLen) { + errorPrint("%s() LN%d snprintf overflow\n", + __func__, __LINE__); + return -1; + } else { + dataLen += n; + } + + if (database->cfgs) { + for (int i = 0; i < database->cfgs->size; i++) { + SDbCfg* cfg = benchArrayGet(database->cfgs, i); + if (cfg->valuestring) { + n = snprintf(command + dataLen, + TSDB_MAX_ALLOWED_SQL_LEN - dataLen, + " %s %s", cfg->name, cfg->valuestring); + } else { + n = snprintf(command + dataLen, + TSDB_MAX_ALLOWED_SQL_LEN - dataLen, + " %s %d", cfg->name, cfg->valueint); + } + if (n < 0 || n >= TSDB_MAX_ALLOWED_SQL_LEN - dataLen) { + errorPrint("%s() LN%d snprintf overflow on %d\n", + __func__, __LINE__, i); + break; + } else { + dataLen += n; + } + } + } + + switch (database->precision) { + case TSDB_TIME_PRECISION_MILLI: + snprintf(command + dataLen, TSDB_MAX_ALLOWED_SQL_LEN - dataLen, + " PRECISION \'ms\';"); + break; + case TSDB_TIME_PRECISION_MICRO: + snprintf(command + dataLen, TSDB_MAX_ALLOWED_SQL_LEN - dataLen, + " PRECISION \'us\';"); + break; + case TSDB_TIME_PRECISION_NANO: + snprintf(command + dataLen, TSDB_MAX_ALLOWED_SQL_LEN - dataLen, + " PRECISION \'ns\';"); + break; + } + + return dataLen; +} + +int createDatabaseRest(SDataBase* database) { + int32_t code = 0; + char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; + + int sockfd = createSockFd(); + if (sockfd < 0) { + return -1; + } + + snprintf(command, SHORT_1K_SQL_BUFF_LEN, + g_arguments->escape_character + ? "DROP DATABASE IF EXISTS `%s`;" + : "DROP DATABASE IF EXISTS %s;", + database->dbName); + code = postProceSql(command, + database->dbName, + database->precision, + REST_IFACE, + 0, + g_arguments->port, + false, + sockfd, + NULL); + if (code != 0) { + errorPrint("Failed to drop database %s\n", database->dbName); + } else { + int remainVnodes = INT_MAX; + geneDbCreateCmd(database, command, remainVnodes); + code = postProceSql(command, + database->dbName, + database->precision, + REST_IFACE, + 0, + g_arguments->port, + false, + sockfd, + NULL); + int32_t trying = g_arguments->keep_trying; + while (code && trying) { + infoPrint("will sleep %"PRIu32" milliseconds then " + "re-create database %s\n", + g_arguments->trying_interval, database->dbName); + toolsMsleep(g_arguments->trying_interval); + code = postProceSql(command, + database->dbName, + database->precision, + REST_IFACE, + 0, + g_arguments->port, + false, + sockfd, + NULL); + if (trying != -1) { + trying--; + } + } + } + destroySockFd(sockfd); + return code; +} + +int32_t getRemainVnodes(SBenchConn *conn) { + int remainVnodes = 0; + char command[SHORT_1K_SQL_BUFF_LEN] = "SHOW DNODES"; + + TAOS_RES *res = taos_query(conn->taos, command); + int32_t code = taos_errno(res); + if (code) { + printErrCmdCodeStr(command, code, res); + closeBenchConn(conn); + return -1; + } + TAOS_ROW row = NULL; + while ((row = taos_fetch_row(res)) != NULL) { + remainVnodes += (*(int16_t*)(row[3]) - *(int16_t*)(row[2])); + } + debugPrint("%s() LN%d, remainVnodes: %d\n", + __func__, __LINE__, remainVnodes); + taos_free_result(res); + return remainVnodes; +} + +int createDatabaseTaosc(SDataBase* database) { + char command[SHORT_1K_SQL_BUFF_LEN] = "\0"; + SBenchConn* conn = initBenchConn(); + if (NULL == conn) { + return -1; + } + if (g_arguments->taosc_version == 3) { + for (int i = 0; i < g_arguments->streams->size; i++) { + SSTREAM* stream = benchArrayGet(g_arguments->streams, i); + if (stream->drop) { + snprintf(command, SHORT_1K_SQL_BUFF_LEN, + "DROP STREAM IF EXISTS %s;", + stream->stream_name); + if (queryDbExecCall(conn, command)) { + closeBenchConn(conn); + return -1; + } + infoPrint("%s\n", command); + memset(command, 0, SHORT_1K_SQL_BUFF_LEN); + } + } + } + + snprintf(command, SHORT_1K_SQL_BUFF_LEN, + g_arguments->escape_character + ? "DROP DATABASE IF EXISTS `%s`;": + "DROP DATABASE IF EXISTS %s;", + database->dbName); + if (0 != queryDbExecCall(conn, command)) { +#ifdef WEBSOCKET + if (g_arguments->websocket) { + warnPrint("%s", "TDengine cloud normal users have no privilege " + "to drop database! DROP DATABASE failure is ignored!\n"); + } else { +#endif + closeBenchConn(conn); + return -1; +#ifdef WEBSOCKET + } +#endif + } + + int remainVnodes = INT_MAX; +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + if (g_arguments->nthreads_auto) { + remainVnodes = getRemainVnodes(conn); + if (0 >= remainVnodes) { + errorPrint("Remain vnodes %d, failed to create database\n", + remainVnodes); + return -1; + } + } +#endif + geneDbCreateCmd(database, command, remainVnodes); + + int32_t code = queryDbExecCall(conn, command); + int32_t trying = g_arguments->keep_trying; + while (code && trying) { + infoPrint("will sleep %"PRIu32" milliseconds then " + "re-create database %s\n", + g_arguments->trying_interval, database->dbName); + toolsMsleep(g_arguments->trying_interval); + code = queryDbExecCall(conn, command); + if (trying != -1) { + trying--; + } + } + + if (code) { +#ifdef WEBSOCKET + if (g_arguments->websocket) { + warnPrint("%s", "TDengine cloud normal users have no privilege " + "to create database! CREATE DATABASE " + "failure is ignored!\n"); + } else { +#endif + + closeBenchConn(conn); + errorPrint("\ncreate database %s failed!\n\n", + database->dbName); + return -1; +#ifdef WEBSOCKET + } +#endif + } + infoPrint("command to create database: <%s>\n", command); + +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + if (database->superTbls) { + if (g_arguments->nthreads_auto) { + int32_t vgroups = getVgroupsOfDb(conn, database); + if (vgroups <=0) { + closeBenchConn(conn); + errorPrint("Database %s's vgroups is %d\n", + database->dbName, vgroups); + return -1; + } + } + } +#endif // TD_VER_COMPATIBLE_3_0_0_0 + + closeBenchConn(conn); + return 0; +} + +int createDatabase(SDataBase* database) { + int ret = 0; + if (REST_IFACE == g_arguments->iface) { + ret = createDatabaseRest(database); + } else { + ret = createDatabaseTaosc(database); + } +#if 0 +#ifdef LINUX + infoPrint("%s() LN%d, ret: %d\n", __func__, __LINE__, ret); + sleep(10); + infoPrint("%s() LN%d, ret: %d\n", __func__, __LINE__, ret); +#elif defined(DARWIN) + sleep(2); +#else + Sleep(2); +#endif +#endif + + return ret; +} + +static int generateChildTblName(int len, char *buffer, SDataBase *database, + SSuperTable *stbInfo, uint64_t i, + char *ttl) { + if (0 == len) { + memset(buffer, 0, TSDB_MAX_ALLOWED_SQL_LEN); + len += snprintf(buffer + len, + TSDB_MAX_ALLOWED_SQL_LEN - len, "CREATE TABLE "); + } + + len += snprintf( + buffer + len, TSDB_MAX_ALLOWED_SQL_LEN - len, + g_arguments->escape_character + ? "`%s`.`%s%" PRIu64 "` USING `%s`.`%s` TAGS (%s) %s " + : "%s.%s%" PRIu64 " USING %s.%s TAGS (%s) %s ", + database->dbName, stbInfo->childTblPrefix, i, database->dbName, + stbInfo->stbName, + stbInfo->tagDataBuf + i * stbInfo->lenOfTags, ttl); + + return len; +} + +static int getBatchOfTblCreating(threadInfo *pThreadInfo, + SSuperTable *stbInfo) { + BArray *batchArray = stbInfo->batchTblCreatingNumbersArray; + if (batchArray) { + int *batch = benchArrayGet( + batchArray, pThreadInfo->posOfTblCreatingBatch); + pThreadInfo->posOfTblCreatingBatch++; + if (pThreadInfo->posOfTblCreatingBatch == batchArray->size) { + pThreadInfo->posOfTblCreatingBatch = 0; + } + return *batch; + } + return 0; +} + +static int getIntervalOfTblCreating(threadInfo *pThreadInfo, + SSuperTable *stbInfo) { + BArray *intervalArray = stbInfo->batchTblCreatingIntervalsArray; + if (intervalArray) { + int *interval = benchArrayGet( + intervalArray, pThreadInfo->posOfTblCreatingInterval); + pThreadInfo->posOfTblCreatingInterval++; + if (pThreadInfo->posOfTblCreatingInterval == intervalArray->size) { + pThreadInfo->posOfTblCreatingInterval = 0; + } + return *interval; + } + return 0; +} + +static void *createTable(void *sarg) { + if (g_arguments->supplementInsert) { + return NULL; + } + + threadInfo * pThreadInfo = (threadInfo *)sarg; + SDataBase * database = pThreadInfo->dbInfo; + SSuperTable *stbInfo = pThreadInfo->stbInfo; +#ifdef LINUX + prctl(PR_SET_NAME, "createTable"); +#endif + uint64_t lastPrintTime = toolsGetTimestampMs(); + pThreadInfo->buffer = benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, false); + int len = 0; + int batchNum = 0; + infoPrint( + "thread[%d] start creating table from %" PRIu64 " to %" PRIu64 + "\n", + pThreadInfo->threadID, pThreadInfo->start_table_from, + pThreadInfo->end_table_to); + + char ttl[SMALL_BUFF_LEN] = ""; + if (stbInfo->ttl != 0) { + snprintf(ttl, SMALL_BUFF_LEN, "TTL %d", stbInfo->ttl); + } + + int smallBatchCount = 0; + for (uint64_t i = pThreadInfo->start_table_from + stbInfo->childTblFrom; + (i <= (pThreadInfo->end_table_to + stbInfo->childTblFrom) + && !g_arguments->terminate); i++) { + if (g_arguments->terminate) { + goto create_table_end; + } + if (!stbInfo->use_metric || stbInfo->tags->size == 0) { + if (stbInfo->childTblCount == 1) { + snprintf(pThreadInfo->buffer, TSDB_MAX_ALLOWED_SQL_LEN, + g_arguments->escape_character + ? "CREATE TABLE `%s`.`%s` %s;" + : "CREATE TABLE %s.%s %s;", + database->dbName, stbInfo->stbName, + stbInfo->colsOfCreateChildTable); + } else { + snprintf(pThreadInfo->buffer, TSDB_MAX_ALLOWED_SQL_LEN, + g_arguments->escape_character + ? "CREATE TABLE `%s`.`%s` %s;" + : "CREATE TABLE %s.%s %s;", + database->dbName, + stbInfo->childTblArray[i]->name, + stbInfo->colsOfCreateChildTable); + } + batchNum++; + } else { + if (0 == len) { + batchNum = 0; + } + len = generateChildTblName(len, pThreadInfo->buffer, + database, stbInfo, i, ttl); + + batchNum++; + smallBatchCount++; + + int smallBatch = getBatchOfTblCreating(pThreadInfo, stbInfo); + if ((!smallBatch || (smallBatchCount == smallBatch)) + && (batchNum < stbInfo->batchTblCreatingNum) + && ((TSDB_MAX_ALLOWED_SQL_LEN - len) >= + (stbInfo->lenOfTags + EXTRA_SQL_LEN))) { + continue; + } else { + smallBatchCount = 0; + } + } + + len = 0; + + int ret = 0; + debugPrint("thread[%d] creating table: %s\n", pThreadInfo->threadID, + pThreadInfo->buffer); + if (REST_IFACE == stbInfo->iface) { + ret = queryDbExecRest(pThreadInfo->buffer, + database->dbName, + database->precision, + stbInfo->iface, + stbInfo->lineProtocol, + stbInfo->tcpTransfer, + pThreadInfo->sockfd); + } else { + ret = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); + int32_t trying = g_arguments->keep_trying; + while (ret && trying) { + infoPrint("will sleep %"PRIu32" milliseconds then re-create " + "table %s\n", + g_arguments->trying_interval, pThreadInfo->buffer); + toolsMsleep(g_arguments->trying_interval); + ret = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); + if (trying != -1) { + trying--; + } + } + } + + if (0 != ret) { + g_fail = true; + goto create_table_end; + } + uint64_t intervalOfTblCreating = getIntervalOfTblCreating(pThreadInfo, + stbInfo); + if (intervalOfTblCreating) { + debugPrint("will sleep %"PRIu64" milliseconds " + "for table creating interval\n", intervalOfTblCreating); + toolsMsleep(intervalOfTblCreating); + } + + pThreadInfo->tables_created += batchNum; + batchNum = 0; + uint64_t currentPrintTime = toolsGetTimestampMs(); + if (currentPrintTime - lastPrintTime > PRINT_STAT_INTERVAL) { + infoPrint( + "thread[%d] already created %" PRId64 " tables\n", + pThreadInfo->threadID, pThreadInfo->tables_created); + lastPrintTime = currentPrintTime; + } + } + + if (0 != len) { + int ret = 0; + debugPrint("thread[%d] creating table: %s\n", pThreadInfo->threadID, + pThreadInfo->buffer); + if (REST_IFACE == stbInfo->iface) { + ret = queryDbExecRest(pThreadInfo->buffer, + database->dbName, + database->precision, + stbInfo->iface, + stbInfo->lineProtocol, + stbInfo->tcpTransfer, + pThreadInfo->sockfd); + } else { + ret = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); + } + if (0 != ret) { + g_fail = true; + goto create_table_end; + } + pThreadInfo->tables_created += batchNum; + debugPrint("thread[%d] already created %" PRId64 " tables\n", + pThreadInfo->threadID, pThreadInfo->tables_created); + } +create_table_end: + tmfree(pThreadInfo->buffer); + pThreadInfo->buffer = NULL; + return NULL; +} + +static int startMultiThreadCreateChildTable( + SDataBase* database, SSuperTable* stbInfo) { + int code = -1; + int threads = g_arguments->table_threads; + int64_t ntables; + if (stbInfo->childTblTo > 0) { + ntables = stbInfo->childTblTo - stbInfo->childTblFrom; + } else { + ntables = stbInfo->childTblCount; + } + pthread_t *pids = benchCalloc(1, threads * sizeof(pthread_t), false); + threadInfo *infos = benchCalloc(1, threads * sizeof(threadInfo), false); + uint64_t tableFrom = 0; + if (threads < 1) { + threads = 1; + } + + int64_t a = ntables / threads; + if (a < 1) { + threads = (int)ntables; + a = 1; + } + + if (ntables == 0) { + errorPrint("failed to create child table, childTblCount: %"PRId64"\n", + ntables); + goto over; + } + int64_t b = ntables % threads; + + int threadCnt = 0; + for (uint32_t i = 0; (i < threads && !g_arguments->terminate); i++) { + threadInfo *pThreadInfo = infos + i; + pThreadInfo->threadID = i; + pThreadInfo->stbInfo = stbInfo; + pThreadInfo->dbInfo = database; + if (REST_IFACE == stbInfo->iface) { + int sockfd = createSockFd(); + if (sockfd < 0) { + FREE_PIDS_INFOS_RETURN_MINUS_1(); + } + pThreadInfo->sockfd = sockfd; + } else { + pThreadInfo->conn = initBenchConn(); + if (NULL == pThreadInfo->conn) { + goto over; + } + } + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = i < b ? a + 1 : a; + pThreadInfo->end_table_to = i < b ? tableFrom + a : tableFrom + a - 1; + tableFrom = pThreadInfo->end_table_to + 1; + pThreadInfo->tables_created = 0; + pthread_create(pids + i, NULL, createTable, pThreadInfo); + threadCnt ++; + } + + for (int i = 0; i < threadCnt; i++) { + pthread_join(pids[i], NULL); + } + + if (g_arguments->terminate) toolsMsleep(100); + + for (int i = 0; i < threadCnt; i++) { + threadInfo *pThreadInfo = infos + i; + g_arguments->actualChildTables += pThreadInfo->tables_created; + + if ((REST_IFACE != stbInfo->iface) && pThreadInfo->conn) { + closeBenchConn(pThreadInfo->conn); + } + } + + if (g_fail) { + goto over; + } + code = 0; +over: + free(pids); + free(infos); + return code; +} + +static int createChildTables() { + int32_t code; + infoPrint("start creating %" PRId64 " table(s) with %d thread(s)\n", + g_arguments->totalChildTables, g_arguments->table_threads); + if (g_arguments->fpOfInsertResult) { + infoPrintToFile(g_arguments->fpOfInsertResult, + "start creating %" PRId64 " table(s) with %d thread(s)\n", + g_arguments->totalChildTables, g_arguments->table_threads); + } + double start = (double)toolsGetTimestampMs(); + + for (int i = 0; (i < g_arguments->databases->size + && !g_arguments->terminate); i++) { + SDataBase * database = benchArrayGet(g_arguments->databases, i); + if (database->superTbls) { + for (int j = 0; (j < database->superTbls->size + && !g_arguments->terminate); j++) { + SSuperTable * stbInfo = benchArrayGet(database->superTbls, j); + if (stbInfo->autoTblCreating || stbInfo->iface == SML_IFACE + || stbInfo->iface == SML_REST_IFACE) { + g_arguments->autoCreatedChildTables += + stbInfo->childTblCount; + continue; + } + if (stbInfo->childTblExists) { + g_arguments->existedChildTables += + stbInfo->childTblCount; + continue; + } + debugPrint("colsOfCreateChildTable: %s\n", + stbInfo->colsOfCreateChildTable); + + code = startMultiThreadCreateChildTable(database, stbInfo); + if (code && !g_arguments->terminate) { + return code; + } + } + } + } + + double end = (double)toolsGetTimestampMs(); + succPrint( + "Spent %.4f seconds to create %" PRId64 + " table(s) with %d thread(s), already exist %" PRId64 + " table(s), actual %" PRId64 " table(s) pre created, %" PRId64 + " table(s) will be auto created\n", + (end - start) / 1000.0, g_arguments->totalChildTables, + g_arguments->table_threads, g_arguments->existedChildTables, + g_arguments->actualChildTables, + g_arguments->autoCreatedChildTables); + return 0; +} + +static void freeChildTable(SChildTable *childTbl, int colsSize) { + if (childTbl->useOwnSample) { + if (childTbl->childCols) { + for (int col = 0; col < colsSize; col++) { + ChildField *childCol = + benchArrayGet(childTbl->childCols, col); + if (childCol) { + tmfree(childCol->stmtData.data); + tmfree(childCol->stmtData.is_null); + } + } + benchArrayDestroy(childTbl->childCols); + } + tmfree(childTbl->sampleDataBuf); + } + tmfree(childTbl); +} + +void postFreeResource() { + if (!g_arguments->terminate) { + tmfclose(g_arguments->fpOfInsertResult); + } + + for (int i = 0; i < g_arguments->databases->size; i++) { + SDataBase * database = benchArrayGet(g_arguments->databases, i); + if (database->cfgs) { + for (int c = 0; c < database->cfgs->size; c++) { + SDbCfg *cfg = benchArrayGet(database->cfgs, c); + if ((NULL == root) && (0 == strcmp(cfg->name, "replica"))) { + tmfree(cfg->name); + cfg->name = NULL; + } + } + benchArrayDestroy(database->cfgs); + } + if (database->superTbls) { + for (uint64_t j = 0; j < database->superTbls->size; j++) { + SSuperTable * stbInfo = benchArrayGet(database->superTbls, j); + tmfree(stbInfo->colsOfCreateChildTable); + stbInfo->colsOfCreateChildTable = NULL; + tmfree(stbInfo->sampleDataBuf); + stbInfo->sampleDataBuf = NULL; + tmfree(stbInfo->tagDataBuf); + stbInfo->tagDataBuf = NULL; + tmfree(stbInfo->partialColNameBuf); + stbInfo->partialColNameBuf = NULL; + benchArrayDestroy(stbInfo->batchTblCreatingNumbersArray); + benchArrayDestroy(stbInfo->batchTblCreatingIntervalsArray); + for (int k = 0; k < stbInfo->tags->size; k++) { + Field * tag = benchArrayGet(stbInfo->tags, k); + tmfree(tag->stmtData.data); + tag->stmtData.data = NULL; + } + benchArrayDestroy(stbInfo->tags); + + for (int k = 0; k < stbInfo->cols->size; k++) { + Field * col = benchArrayGet(stbInfo->cols, k); + tmfree(col->stmtData.data); + col->stmtData.data = NULL; + tmfree(col->stmtData.is_null); + col->stmtData.is_null = NULL; + } + if (g_arguments->test_mode == INSERT_TEST) { + if (stbInfo->childTblArray) { + for (int64_t child = 0; child < stbInfo->childTblCount; + child++) { + SChildTable *childTbl = + stbInfo->childTblArray[child]; + if (childTbl) { + freeChildTable(childTbl, stbInfo->cols->size); + } + } + } + } + benchArrayDestroy(stbInfo->cols); + tmfree(stbInfo->childTblArray); + stbInfo->childTblArray = NULL; + benchArrayDestroy(stbInfo->tsmas); +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + if ((0 == stbInfo->interlaceRows) + && (g_arguments->nthreads_auto)) { + for (int32_t v = 0; v < database->vgroups; v++) { + SVGroup *vg = benchArrayGet(database->vgArray, v); + tmfree(vg->childTblArray); + vg->childTblArray = NULL; + } + } +#endif // TD_VER_COMPATIBLE_3_0_0_0 + } +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + if (database->vgArray) + benchArrayDestroy(database->vgArray); +#endif // TD_VER_COMPATIBLE_3_0_0_0 + benchArrayDestroy(database->superTbls); + } + } + benchArrayDestroy(g_arguments->databases); + benchArrayDestroy(g_arguments->streams); + tools_cJSON_Delete(root); +} + +int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) { + SDataBase * database = pThreadInfo->dbInfo; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_RES * res = NULL; + int32_t code = 0; + uint16_t iface = stbInfo->iface; + + int32_t trying = (stbInfo->keep_trying)? + stbInfo->keep_trying:g_arguments->keep_trying; + int32_t trying_interval = stbInfo->trying_interval? + stbInfo->trying_interval:g_arguments->trying_interval; + int protocol = stbInfo->lineProtocol; + + switch (iface) { + case TAOSC_IFACE: + debugPrint("buffer: %s\n", pThreadInfo->buffer); + code = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); + while (code && trying && !g_arguments->terminate) { + infoPrint("will sleep %"PRIu32" milliseconds then re-insert\n", + trying_interval); + toolsMsleep(trying_interval); + code = queryDbExecCall(pThreadInfo->conn, pThreadInfo->buffer); + if (trying != -1) { + trying--; + } + } + break; + + case REST_IFACE: + debugPrint("buffer: %s\n", pThreadInfo->buffer); + code = postProceSql(pThreadInfo->buffer, + database->dbName, + database->precision, + stbInfo->iface, + stbInfo->lineProtocol, + g_arguments->port, + stbInfo->tcpTransfer, + pThreadInfo->sockfd, + pThreadInfo->filePath); + while (code && trying && !g_arguments->terminate) { + infoPrint("will sleep %"PRIu32" milliseconds then re-insert\n", + trying_interval); + toolsMsleep(trying_interval); + code = postProceSql(pThreadInfo->buffer, + database->dbName, + database->precision, + stbInfo->iface, + stbInfo->lineProtocol, + g_arguments->port, + stbInfo->tcpTransfer, + pThreadInfo->sockfd, + pThreadInfo->filePath); + if (trying != -1) { + trying--; + } + } + break; + + case STMT_IFACE: + code = taos_stmt_execute(pThreadInfo->conn->stmt); + if (code) { + errorPrint( + "failed to execute insert statement. reason: %s\n", + taos_stmt_errstr(pThreadInfo->conn->stmt)); + code = -1; + } + break; + + case SML_IFACE: + res = taos_schemaless_insert( + pThreadInfo->conn->taos, pThreadInfo->lines, + (TSDB_SML_JSON_PROTOCOL == protocol + || SML_JSON_TAOS_FORMAT == protocol) + ? 0 : k, + (SML_JSON_TAOS_FORMAT == protocol) + ? TSDB_SML_JSON_PROTOCOL : protocol, + (TSDB_SML_LINE_PROTOCOL == protocol) + ? database->sml_precision + : TSDB_SML_TIMESTAMP_NOT_CONFIGURED); + code = taos_errno(res); + trying = stbInfo->keep_trying; + while (code && trying && !g_arguments->terminate) { + taos_free_result(res); + infoPrint("will sleep %"PRIu32" milliseconds then re-insert\n", + trying_interval); + toolsMsleep(trying_interval); + res = taos_schemaless_insert( + pThreadInfo->conn->taos, pThreadInfo->lines, + (TSDB_SML_JSON_PROTOCOL == protocol + || SML_JSON_TAOS_FORMAT == protocol) + ? 0 : k, + (SML_JSON_TAOS_FORMAT == protocol) + ? TSDB_SML_JSON_PROTOCOL : protocol, + (TSDB_SML_LINE_PROTOCOL == protocol) + ? database->sml_precision + : TSDB_SML_TIMESTAMP_NOT_CONFIGURED); + code = taos_errno(res); + if (trying != -1) { + trying--; + } + } + + if (code != TSDB_CODE_SUCCESS && !g_arguments->terminate) { + debugPrint("Failed to execute " + "schemaless insert content: %s\n\n", + pThreadInfo->lines?(pThreadInfo->lines[0]? + pThreadInfo->lines[0]:""):""); + errorPrint( + "failed to execute schemaless insert. " + "code: 0x%08x reason: %s\n\n", + code, taos_errstr(res)); + } + taos_free_result(res); + break; + + case SML_REST_IFACE: { + if (TSDB_SML_JSON_PROTOCOL == protocol + || SML_JSON_TAOS_FORMAT == protocol) { + code = postProceSql(pThreadInfo->lines[0], database->dbName, + database->precision, stbInfo->iface, + protocol, g_arguments->port, + stbInfo->tcpTransfer, + pThreadInfo->sockfd, pThreadInfo->filePath); + } else { + int len = 0; + for (int i = 0; i < k; i++) { + if (strlen(pThreadInfo->lines[i]) != 0) { + int n; + if (TSDB_SML_TELNET_PROTOCOL == protocol + && stbInfo->tcpTransfer) { + n = snprintf(pThreadInfo->buffer + len, + TSDB_MAX_ALLOWED_SQL_LEN - len, + "put %s\n", pThreadInfo->lines[i]); + } else { + n = snprintf(pThreadInfo->buffer + len, + TSDB_MAX_ALLOWED_SQL_LEN - len, + "%s\n", + pThreadInfo->lines[i]); + } + if (n < 0 || n >= TSDB_MAX_ALLOWED_SQL_LEN - len) { + errorPrint("%s() LN%d snprintf overflow on %d\n", + __func__, __LINE__, i); + break; + } else { + len += n; + } + } else { + break; + } + } + if (g_arguments->terminate) { + break; + } + code = postProceSql(pThreadInfo->buffer, database->dbName, + database->precision, + stbInfo->iface, protocol, + g_arguments->port, + stbInfo->tcpTransfer, + pThreadInfo->sockfd, pThreadInfo->filePath); + } + break; + } + } + return code; +} + +static int smartContinueIfFail(threadInfo *pThreadInfo, + SChildTable *childTbl, + int64_t i, + char *ttl) { + SDataBase * database = pThreadInfo->dbInfo; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + char *buffer = + benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, false); + snprintf( + buffer, TSDB_MAX_ALLOWED_SQL_LEN, + g_arguments->escape_character ? + "CREATE TABLE `%s`.`%s` USING `%s`.`%s` TAGS (%s) %s " + : "CREATE TABLE %s.%s USING %s.%s TAGS (%s) %s ", + database->dbName, childTbl->name, database->dbName, + stbInfo->stbName, + stbInfo->tagDataBuf + i * stbInfo->lenOfTags, ttl); + debugPrint("creating table: %s\n", buffer); + int ret; + if (REST_IFACE == stbInfo->iface) { + ret = queryDbExecRest(buffer, + database->dbName, + database->precision, + stbInfo->iface, + stbInfo->lineProtocol, + stbInfo->tcpTransfer, + pThreadInfo->sockfd); + } else { + ret = queryDbExecCall(pThreadInfo->conn, buffer); + int32_t trying = g_arguments->keep_trying; + while (ret && trying) { + infoPrint("will sleep %"PRIu32" milliseconds then " + "re-create table %s\n", + g_arguments->trying_interval, buffer); + toolsMsleep(g_arguments->trying_interval); + ret = queryDbExecCall(pThreadInfo->conn, buffer); + if (trying != -1) { + trying--; + } + } + } + tmfree(buffer); + + return ret; +} + +static void cleanupAndPrint(threadInfo *pThreadInfo, char *mode) { + if (pThreadInfo) { + if (pThreadInfo->json_array) { + tools_cJSON_Delete(pThreadInfo->json_array); + pThreadInfo->json_array = NULL; + } + if (0 == pThreadInfo->totalDelay) { + pThreadInfo->totalDelay = 1; + } + succPrint( + "thread[%d] %s mode, completed total inserted rows: %" PRIu64 + ", %.2f records/second\n", + pThreadInfo->threadID, + mode, + pThreadInfo->totalInsertRows, + (double)(pThreadInfo->totalInsertRows / + ((double)pThreadInfo->totalDelay / 1E6))); + } +} + +static int64_t getDisorderTs(SSuperTable *stbInfo, int *disorderRange) { + int64_t disorderTs = 0; + int64_t startTimestamp = stbInfo->startTimestamp; + if (stbInfo->disorderRatio > 0) { + int rand_num = taosRandom() % 100; + if (rand_num < stbInfo->disorderRatio) { + (*disorderRange)--; + if (0 == *disorderRange) { + *disorderRange = stbInfo->disorderRange; + } + disorderTs = startTimestamp - *disorderRange; + debugPrint("rand_num: %d, < disorderRatio: %d, " + "disorderTs: %"PRId64"\n", + rand_num, stbInfo->disorderRatio, + disorderTs); + } + } + return disorderTs; +} + +static void *syncWriteInterlace(void *sarg) { + threadInfo * pThreadInfo = (threadInfo *)sarg; + SDataBase * database = pThreadInfo->dbInfo; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + infoPrint( + "thread[%d] start interlace inserting into table from " + "%" PRIu64 " to %" PRIu64 "\n", + pThreadInfo->threadID, pThreadInfo->start_table_from, + pThreadInfo->end_table_to); + + int64_t insertRows = stbInfo->insertRows; + int32_t interlaceRows = stbInfo->interlaceRows; + int64_t pos = 0; + uint32_t batchPerTblTimes = g_arguments->reqPerReq / interlaceRows; + uint64_t lastPrintTime = toolsGetTimestampMs(); + uint64_t lastTotalInsertRows = 0; + int64_t startTs = toolsGetTimestampUs(); + int64_t endTs; + uint64_t tableSeq = pThreadInfo->start_table_from; + int disorderRange = stbInfo->disorderRange; + + while (insertRows > 0) { + int64_t tmp_total_insert_rows = 0; + uint32_t generated = 0; + if (insertRows <= interlaceRows) { + interlaceRows = insertRows; + } + for (int i = 0; i < batchPerTblTimes; i++) { + if (g_arguments->terminate) { + goto free_of_interlace; + } + int64_t timestamp = pThreadInfo->start_time; + SChildTable *childTbl = stbInfo->childTblArray[tableSeq]; + char * tableName = + stbInfo->childTblArray[tableSeq]->name; + char *sampleDataBuf = childTbl->useOwnSample? + childTbl->sampleDataBuf: + stbInfo->sampleDataBuf; + char ttl[SMALL_BUFF_LEN] = ""; + if (stbInfo->ttl != 0) { + snprintf(ttl, SMALL_BUFF_LEN, "TTL %d", stbInfo->ttl); + } + switch (stbInfo->iface) { + case REST_IFACE: + case TAOSC_IFACE: { + char escapedTbName[TSDB_TABLE_NAME_LEN+2] = "\0"; + if (g_arguments->escape_character) { + snprintf(escapedTbName, TSDB_TABLE_NAME_LEN+2, "`%s`", + tableName); + } else { + snprintf(escapedTbName, TSDB_TABLE_NAME_LEN+2, "%s", + tableName); + } + if (i == 0) { + ds_add_str(&pThreadInfo->buffer, STR_INSERT_INTO); + } + if (stbInfo->partialColNum == stbInfo->cols->size) { + if (stbInfo->autoTblCreating) { + ds_add_strs(&pThreadInfo->buffer, 8, + escapedTbName, + " USING `", + stbInfo->stbName, + "` TAGS (", + stbInfo->tagDataBuf + + stbInfo->lenOfTags * tableSeq, + ") ", ttl, " VALUES "); + } else { + ds_add_strs(&pThreadInfo->buffer, 2, + escapedTbName, " VALUES "); + } + } else { + if (stbInfo->autoTblCreating) { + ds_add_strs(&pThreadInfo->buffer, 10, + escapedTbName, + " (", + stbInfo->partialColNameBuf, + ") USING `", + stbInfo->stbName, + "` TAGS (", + stbInfo->tagDataBuf + + stbInfo->lenOfTags * tableSeq, + ") ", ttl, " VALUES "); + } else { + ds_add_strs(&pThreadInfo->buffer, 4, + escapedTbName, + "(", + stbInfo->partialColNameBuf, + ") VALUES "); + } + } + + for (int64_t j = 0; j < interlaceRows; j++) { + int64_t disorderTs = getDisorderTs(stbInfo, + &disorderRange); + char time_string[BIGINT_BUFF_LEN]; + snprintf(time_string, BIGINT_BUFF_LEN, "%"PRId64"", + disorderTs?disorderTs:timestamp); + ds_add_strs(&pThreadInfo->buffer, 5, + "(", + time_string, + ",", + sampleDataBuf + pos * stbInfo->lenOfCols, + ") "); + if (ds_len(pThreadInfo->buffer) + > stbInfo->max_sql_len) { + errorPrint("sql buffer length (%"PRIu64") " + "is larger than max sql length " + "(%"PRId64")\n", + ds_len(pThreadInfo->buffer), + stbInfo->max_sql_len); + goto free_of_interlace; + } + generated++; + pos++; + if (pos >= g_arguments->prepared_rand) { + pos = 0; + } + timestamp += stbInfo->timestamp_step; + } + break; + } + case STMT_IFACE: { + char escapedTbName[TSDB_TABLE_NAME_LEN+2] = "\0"; + if (g_arguments->escape_character) { + snprintf(escapedTbName, TSDB_TABLE_NAME_LEN+2, + "`%s`", tableName); + } else { + snprintf(escapedTbName, TSDB_TABLE_NAME_LEN, "%s", + tableName); + } + if (taos_stmt_set_tbname(pThreadInfo->conn->stmt, + escapedTbName)) { + errorPrint( + "taos_stmt_set_tbname(%s) failed, reason: %s\n", + tableName, + taos_stmt_errstr(pThreadInfo->conn->stmt)); + g_fail = true; + goto free_of_interlace; + } + generated = + bindParamBatch(pThreadInfo, interlaceRows, + timestamp, childTbl); + break; + } + case SML_REST_IFACE: + case SML_IFACE: { + int protocol = stbInfo->lineProtocol; + for (int64_t j = 0; j < interlaceRows; j++) { + int64_t disorderTs = getDisorderTs(stbInfo, + &disorderRange); + if (TSDB_SML_JSON_PROTOCOL == protocol) { + tools_cJSON *tag = tools_cJSON_Duplicate( + tools_cJSON_GetArrayItem( + pThreadInfo->sml_json_tags, + (int)tableSeq - + pThreadInfo->start_table_from), + true); + generateSmlJsonCols( + pThreadInfo->json_array, tag, stbInfo, + database->sml_precision, + disorderTs?disorderTs:timestamp); + } else if (SML_JSON_TAOS_FORMAT == protocol) { + tools_cJSON *tag = tools_cJSON_Duplicate( + tools_cJSON_GetArrayItem( + pThreadInfo->sml_json_tags, + (int)tableSeq - + pThreadInfo->start_table_from), + true); + generateSmlTaosJsonCols( + pThreadInfo->json_array, tag, stbInfo, + database->sml_precision, + disorderTs?disorderTs:timestamp); + } else if (TSDB_SML_LINE_PROTOCOL == protocol) { + snprintf( + pThreadInfo->lines[generated], + stbInfo->lenOfCols + stbInfo->lenOfTags, + "%s %s %" PRId64 "", + pThreadInfo + ->sml_tags[(int)tableSeq - + pThreadInfo->start_table_from], + sampleDataBuf + pos * stbInfo->lenOfCols, + disorderTs?disorderTs:timestamp); + } else { + snprintf( + pThreadInfo->lines[generated], + stbInfo->lenOfCols + stbInfo->lenOfTags, + "%s %" PRId64 " %s %s", stbInfo->stbName, + disorderTs?disorderTs:timestamp, + sampleDataBuf + pos * stbInfo->lenOfCols, + pThreadInfo + ->sml_tags[(int)tableSeq - + pThreadInfo->start_table_from]); + } + generated++; + timestamp += stbInfo->timestamp_step; + } + if (TSDB_SML_JSON_PROTOCOL == protocol + || SML_JSON_TAOS_FORMAT == protocol) { + pThreadInfo->lines[0] = + tools_cJSON_PrintUnformatted( + pThreadInfo->json_array); + } + break; + } + } + tableSeq++; + tmp_total_insert_rows += interlaceRows; + if (tableSeq > pThreadInfo->end_table_to) { + tableSeq = pThreadInfo->start_table_from; + pThreadInfo->start_time += + interlaceRows * stbInfo->timestamp_step; + if (!stbInfo->non_stop) { + insertRows -= interlaceRows; + } + if (stbInfo->insert_interval > 0) { + debugPrint("%s() LN%d, insert_interval: %"PRIu64"\n", + __func__, __LINE__, stbInfo->insert_interval); + perfPrint("sleep %" PRIu64 " ms\n", + stbInfo->insert_interval); + toolsMsleep((int32_t)stbInfo->insert_interval); + } + break; + } + } + + startTs = toolsGetTimestampUs(); + if (execInsert(pThreadInfo, generated)) { + g_fail = true; + goto free_of_interlace; + } + endTs = toolsGetTimestampUs(); + + pThreadInfo->totalInsertRows += tmp_total_insert_rows; + + if (g_arguments->terminate) { + goto free_of_interlace; + } + + int protocol = stbInfo->lineProtocol; + switch (stbInfo->iface) { + case TAOSC_IFACE: + case REST_IFACE: + debugPrint("pThreadInfo->buffer: %s\n", + pThreadInfo->buffer); + free_ds(&pThreadInfo->buffer); + pThreadInfo->buffer = new_ds(0); + break; + case SML_REST_IFACE: + memset(pThreadInfo->buffer, 0, + g_arguments->reqPerReq * (pThreadInfo->max_sql_len + 1)); + case SML_IFACE: + if (TSDB_SML_JSON_PROTOCOL == protocol + || SML_JSON_TAOS_FORMAT == protocol) { + debugPrint("pThreadInfo->lines[0]: %s\n", + pThreadInfo->lines[0]); + if (pThreadInfo->json_array && !g_arguments->terminate) { + tools_cJSON_Delete(pThreadInfo->json_array); + pThreadInfo->json_array = NULL; + } + pThreadInfo->json_array = tools_cJSON_CreateArray(); + if (pThreadInfo->lines && pThreadInfo->lines[0]) { + tmfree(pThreadInfo->lines[0]); + pThreadInfo->lines[0] = NULL; + } + } else { + for (int j = 0; j < generated; j++) { + if (pThreadInfo && pThreadInfo->lines + && !g_arguments->terminate) { + debugPrint("pThreadInfo->lines[%d]: %s\n", j, + pThreadInfo->lines[j]); + memset(pThreadInfo->lines[j], 0, + pThreadInfo->max_sql_len); + } + } + } + break; + case STMT_IFACE: + break; + } + + int64_t delay = endTs - startTs; + if (delay <=0) { + debugPrint("thread[%d]: startTS: %"PRId64", endTS: %"PRId64"\n", + pThreadInfo->threadID, startTs, endTs); + } else { + perfPrint("insert execution time is %10.2f ms\n", + delay / 1E6); + + int64_t * pdelay = benchCalloc(1, sizeof(int64_t), false); + *pdelay = delay; + if (benchArrayPush(pThreadInfo->delayList, pdelay) == NULL) { + tmfree(pdelay); + } + pThreadInfo->totalDelay += delay; + } + + int64_t currentPrintTime = toolsGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30 * 1000) { + infoPrint( + "thread[%d] has currently inserted rows: %" PRIu64 + ", peroid insert rate: %.3f rows/s \n", + pThreadInfo->threadID, pThreadInfo->totalInsertRows, + (double)(pThreadInfo->totalInsertRows - lastTotalInsertRows) * 1000.0/(currentPrintTime - lastPrintTime)); + lastPrintTime = currentPrintTime; + lastTotalInsertRows = pThreadInfo->totalInsertRows; + } + } +free_of_interlace: + cleanupAndPrint(pThreadInfo, "interlace"); + return NULL; +} + +static int32_t prepareProgressDataStmt( + threadInfo *pThreadInfo, + SChildTable *childTbl, + int64_t *timestamp, uint64_t i, char *ttl) { + SSuperTable *stbInfo = pThreadInfo->stbInfo; + char escapedTbName[TSDB_TABLE_NAME_LEN + 2] = "\0"; + if (g_arguments->escape_character) { + snprintf(escapedTbName, TSDB_TABLE_NAME_LEN + 2, + "`%s`", childTbl->name); + } else { + snprintf(escapedTbName, TSDB_TABLE_NAME_LEN, "%s", + childTbl->name); + } + if (taos_stmt_set_tbname(pThreadInfo->conn->stmt, + escapedTbName)) { + errorPrint( + "taos_stmt_set_tbname(%s) failed," + "reason: %s\n", escapedTbName, + taos_stmt_errstr(pThreadInfo->conn->stmt)); + return -1; + } + int32_t generated = bindParamBatch( + pThreadInfo, + (g_arguments->reqPerReq > (stbInfo->insertRows - i)) + ? (stbInfo->insertRows - i) + : g_arguments->reqPerReq, + *timestamp, childTbl); + *timestamp += generated * stbInfo->timestamp_step; + return generated; +} + +static void makeTimestampDisorder( + int64_t *timestamp, SSuperTable *stbInfo) { + int64_t startTimestamp = stbInfo->startTimestamp; + int disorderRange = stbInfo->disorderRange; + int rand_num = taosRandom() % 100; + if (rand_num < stbInfo->disorderRatio) { + disorderRange--; + if (0 == disorderRange) { + disorderRange = stbInfo->disorderRange; + } + *timestamp = startTimestamp - disorderRange; + debugPrint("rand_num: %d, < disorderRatio: %d" + ", ts: %"PRId64"\n", + rand_num, + stbInfo->disorderRatio, + *timestamp); + } +} + +static int32_t prepareProgressDataSmlJsonText( + threadInfo *pThreadInfo, + uint64_t tableSeq, + int64_t *timestamp, uint64_t i, char *ttl) { + // prepareProgressDataSmlJsonText + SSuperTable *stbInfo = pThreadInfo->stbInfo; + int32_t generated = 0; + + int len = 0; + + char *line = pThreadInfo->lines[0]; + uint32_t line_buf_len = pThreadInfo->line_buf_len; + + strncat(line + len, "[", 2); + len += 1; + + int32_t pos = 0; + for (int j = 0; (j < g_arguments->reqPerReq) + && !g_arguments->terminate; j++) { + strncat(line + len, "{", 2); + len += 1; + int n; + n = snprintf(line + len, line_buf_len - len, + "\"timestamp\":%"PRId64",", *timestamp); + if (n < 0 || n >= line_buf_len - len) { + errorPrint("%s() LN%d snprintf overflow on %d\n", + __func__, __LINE__, j); + return -1; + } else { + len += n; + } + + n = snprintf(line + len, line_buf_len - len, "%s", + pThreadInfo->sml_json_value_array[tableSeq]); + if (n < 0 || n >= line_buf_len - len) { + errorPrint("%s() LN%d snprintf overflow on %d\n", + __func__, __LINE__, j); + return -1; + } else { + len += n; + } + n = snprintf(line + len, line_buf_len - len, "\"tags\":%s,", + pThreadInfo->sml_tags_json_array[tableSeq]); + if (n < 0 || n >= line_buf_len - len) { + errorPrint("%s() LN%d snprintf overflow on %d\n", + __func__, __LINE__, j); + return -1; + } else { + len += n; + } + n = snprintf(line + len, line_buf_len - len, + "\"metric\":\"%s\"}", stbInfo->stbName); + if (n < 0 || n >= line_buf_len - len) { + errorPrint("%s() LN%d snprintf overflow on %d\n", + __func__, __LINE__, j); + return -1; + } else { + len += n; + } + + pos++; + if (pos >= g_arguments->prepared_rand) { + pos = 0; + } + *timestamp += stbInfo->timestamp_step; + if (stbInfo->disorderRatio > 0) { + makeTimestampDisorder(timestamp, stbInfo); + } + generated++; + if (i + generated >= stbInfo->insertRows) { + break; + } + if ((j+1) < g_arguments->reqPerReq) { + strncat(line + len, ",", 2); + len += 1; + } + } + strncat(line + len, "]", 2); + + debugPrint("%s() LN%d, lines[0]: %s\n", + __func__, __LINE__, pThreadInfo->lines[0]); + return generated; +} + +static int32_t prepareProgressDataSmlJson( + threadInfo *pThreadInfo, + uint64_t tableSeq, + int64_t *timestamp, uint64_t i, char *ttl) { + // prepareProgressDataSmlJson + SDataBase * database = pThreadInfo->dbInfo; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + int32_t generated = 0; + + int32_t pos = 0; + int protocol = stbInfo->lineProtocol; + for (int j = 0; (j < g_arguments->reqPerReq) + && !g_arguments->terminate; j++) { + tools_cJSON *tag = tools_cJSON_Duplicate( + tools_cJSON_GetArrayItem( + pThreadInfo->sml_json_tags, + (int)tableSeq - + pThreadInfo->start_table_from), + true); + debugPrintJsonNoTime(tag); + if (TSDB_SML_JSON_PROTOCOL == protocol) { + generateSmlJsonCols( + pThreadInfo->json_array, tag, stbInfo, + database->sml_precision, *timestamp); + } else { + generateSmlTaosJsonCols( + pThreadInfo->json_array, tag, stbInfo, + database->sml_precision, *timestamp); + } + pos++; + if (pos >= g_arguments->prepared_rand) { + pos = 0; + } + *timestamp += stbInfo->timestamp_step; + if (stbInfo->disorderRatio > 0) { + makeTimestampDisorder(timestamp, stbInfo); + } + generated++; + if (i + generated >= stbInfo->insertRows) { + break; + } + } + + tmfree(pThreadInfo->lines[0]); + pThreadInfo->lines[0] = NULL; + pThreadInfo->lines[0] = + tools_cJSON_PrintUnformatted( + pThreadInfo->json_array); + debugPrint("pThreadInfo->lines[0]: %s\n", + pThreadInfo->lines[0]); + + return generated; +} + +static int32_t prepareProgressDataSmlLineOrTelnet( + threadInfo *pThreadInfo, uint64_t tableSeq, char *sampleDataBuf, + int64_t *timestamp, uint64_t i, char *ttl, int protocol) { + // prepareProgressDataSmlLine + SSuperTable *stbInfo = pThreadInfo->stbInfo; + int32_t generated = 0; + + int32_t pos = 0; + for (int j = 0; (j < g_arguments->reqPerReq) + && !g_arguments->terminate; j++) { + if (TSDB_SML_LINE_PROTOCOL == protocol) { + snprintf( + pThreadInfo->lines[j], + stbInfo->lenOfCols + stbInfo->lenOfTags, + "%s %s %" PRId64 "", + pThreadInfo->sml_tags[tableSeq + - pThreadInfo->start_table_from], + sampleDataBuf + pos * stbInfo->lenOfCols, + *timestamp); + } else { + snprintf( + pThreadInfo->lines[j], + stbInfo->lenOfCols + stbInfo->lenOfTags, + "%s %" PRId64 " %s %s", stbInfo->stbName, + *timestamp, + sampleDataBuf + + pos * stbInfo->lenOfCols, + pThreadInfo->sml_tags[tableSeq + -pThreadInfo->start_table_from]); + } + pos++; + if (pos >= g_arguments->prepared_rand) { + pos = 0; + } + *timestamp += stbInfo->timestamp_step; + if (stbInfo->disorderRatio > 0) { + makeTimestampDisorder(timestamp, stbInfo); + } + generated++; + if (i + generated >= stbInfo->insertRows) { + break; + } + } + return generated; +} + +static int32_t prepareProgressDataSml( + threadInfo *pThreadInfo, + SChildTable *childTbl, + uint64_t tableSeq, + int64_t *timestamp, uint64_t i, char *ttl) { + // prepareProgressDataSml + SSuperTable *stbInfo = pThreadInfo->stbInfo; + + char *sampleDataBuf; + if (childTbl->useOwnSample) { + sampleDataBuf = childTbl->sampleDataBuf; + } else { + sampleDataBuf = stbInfo->sampleDataBuf; + } + int protocol = stbInfo->lineProtocol; + int32_t generated = -1; + switch (protocol) { + case TSDB_SML_LINE_PROTOCOL: + case TSDB_SML_TELNET_PROTOCOL: + generated = prepareProgressDataSmlLineOrTelnet( + pThreadInfo, + tableSeq, + sampleDataBuf, + timestamp, i, ttl, protocol); + break; + case TSDB_SML_JSON_PROTOCOL: + generated = prepareProgressDataSmlJsonText( + pThreadInfo, + tableSeq - pThreadInfo->start_table_from, + timestamp, i, ttl); + break; + case SML_JSON_TAOS_FORMAT: + generated = prepareProgressDataSmlJson( + pThreadInfo, + tableSeq, + timestamp, i, ttl); + break; + default: + errorPrint("%s() LN%d: unknown protcolor: %d\n", + __func__, __LINE__, protocol); + break; + } + + return generated; +} + +static int32_t prepareProgressDataSql( + threadInfo *pThreadInfo, + SChildTable *childTbl, uint64_t tableSeq, + char *sampleDataBuf, + int64_t *timestamp, uint64_t i, char *ttl, + int32_t *pos, uint64_t *len) { + // prepareProgressDataSql + int32_t generated = 0; + SDataBase *database = pThreadInfo->dbInfo; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + char * pstr = pThreadInfo->buffer; + int disorderRange = stbInfo->disorderRange; + + if (stbInfo->partialColNum == stbInfo->cols->size) { + if (stbInfo->autoTblCreating) { + *len = + snprintf(pstr, TSDB_MAX_ALLOWED_SQL_LEN, + g_arguments->escape_character + ? "%s `%s`.`%s` USING `%s`.`%s` TAGS (%s) %s VALUES " + : "%s %s.%s USING %s.%s TAGS (%s) %s VALUES ", + STR_INSERT_INTO, database->dbName, + childTbl->name, database->dbName, + stbInfo->stbName, + stbInfo->tagDataBuf + + stbInfo->lenOfTags * tableSeq, ttl); + } else { + *len = snprintf(pstr, TSDB_MAX_ALLOWED_SQL_LEN, + g_arguments->escape_character + ? "%s `%s`.`%s` VALUES " + : "%s %s.%s VALUES ", + STR_INSERT_INTO, + database->dbName, childTbl->name); + } + } else { + if (stbInfo->autoTblCreating) { + *len = snprintf( + pstr, TSDB_MAX_ALLOWED_SQL_LEN, + g_arguments->escape_character + ? "%s `%s`.`%s` (%s) USING `%s`.`%s` TAGS (%s) %s VALUES " + : "%s %s.%s (%s) USING %s.%s TAGS (%s) %s VALUES ", + STR_INSERT_INTO, database->dbName, + childTbl->name, + stbInfo->partialColNameBuf, + database->dbName, stbInfo->stbName, + stbInfo->tagDataBuf + + stbInfo->lenOfTags * tableSeq, ttl); + } else { + *len = snprintf(pstr, TSDB_MAX_ALLOWED_SQL_LEN, + g_arguments->escape_character + ? "%s `%s`.`%s` (%s) VALUES " + : "%s %s.%s (%s) VALUES ", + STR_INSERT_INTO, database->dbName, + childTbl->name, + stbInfo->partialColNameBuf); + } + } + + char *ownSampleDataBuf; + if (childTbl->useOwnSample) { + debugPrint("%s is using own sample data\n", + childTbl->name); + ownSampleDataBuf = childTbl->sampleDataBuf; + } else { + ownSampleDataBuf = stbInfo->sampleDataBuf; + } + for (int j = 0; j < g_arguments->reqPerReq; j++) { + if (stbInfo->useSampleTs + && (!stbInfo->random_data_source)) { + *len += + snprintf(pstr + *len, + TSDB_MAX_ALLOWED_SQL_LEN - *len, "(%s)", + sampleDataBuf + + *pos * stbInfo->lenOfCols); + } else { + int64_t disorderTs = getDisorderTs(stbInfo, &disorderRange); + *len += snprintf(pstr + *len, + TSDB_MAX_ALLOWED_SQL_LEN - *len, + "(%" PRId64 ",%s)", + disorderTs?disorderTs:*timestamp, + ownSampleDataBuf + + *pos * stbInfo->lenOfCols); + } + *pos += 1; + if (*pos >= g_arguments->prepared_rand) { + *pos = 0; + } + *timestamp += stbInfo->timestamp_step; + generated++; + if (*len > (TSDB_MAX_ALLOWED_SQL_LEN + - stbInfo->lenOfCols)) { + break; + } + if (i + generated >= stbInfo->insertRows) { + break; + } + } + + return generated; +} + +void *syncWriteProgressive(void *sarg) { + threadInfo * pThreadInfo = (threadInfo *)sarg; + SDataBase * database = pThreadInfo->dbInfo; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + + // special deal flow for TAOSC_IFACE + if (insertDataMix(pThreadInfo, database, stbInfo)) { + // request be dealt by this function , so return + return NULL; + } + +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + if (g_arguments->nthreads_auto) { + if (0 == pThreadInfo->vg->tbCountPerVgId) { + return NULL; + } + } else { + infoPrint( + "thread[%d] start progressive inserting into table from " + "%" PRIu64 " to %" PRIu64 "\n", + pThreadInfo->threadID, pThreadInfo->start_table_from, + pThreadInfo->end_table_to + 1); + } +#else + infoPrint( + "thread[%d] start progressive inserting into table from " + "%" PRIu64 " to %" PRIu64 "\n", + pThreadInfo->threadID, pThreadInfo->start_table_from, + pThreadInfo->end_table_to + 1); +#endif + uint64_t lastPrintTime = toolsGetTimestampMs(); + uint64_t lastTotalInsertRows = 0; + int64_t startTs = toolsGetTimestampUs(); + int64_t endTs; + + for (uint64_t tableSeq = pThreadInfo->start_table_from; + tableSeq <= pThreadInfo->end_table_to; tableSeq++) { + char *sampleDataBuf; + SChildTable *childTbl; +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + if (g_arguments->nthreads_auto) { + childTbl = pThreadInfo->vg->childTblArray[tableSeq]; + } else { + childTbl = stbInfo->childTblArray[ + stbInfo->childTblExists? + tableSeq: + stbInfo->childTblFrom + tableSeq]; + } +#else + childTbl = stbInfo->childTblArray[ + stbInfo->childTblExists? + tableSeq: + stbInfo->childTblFrom + tableSeq]; +#endif + if (childTbl->useOwnSample) { + sampleDataBuf = childTbl->sampleDataBuf; + } else { + sampleDataBuf = stbInfo->sampleDataBuf; + } + + int64_t timestamp = pThreadInfo->start_time; + uint64_t len = 0; + int32_t pos = 0; + if (stbInfo->iface == STMT_IFACE && stbInfo->autoTblCreating) { + taos_stmt_close(pThreadInfo->conn->stmt); + pThreadInfo->conn->stmt = taos_stmt_init(pThreadInfo->conn->taos); + if (NULL == pThreadInfo->conn->stmt) { + errorPrint("taos_stmt_init() failed, reason: %s\n", + taos_errstr(NULL)); + g_fail = true; + goto free_of_progressive; + } + + if (prepareStmt(stbInfo, pThreadInfo->conn->stmt, tableSeq)) { + g_fail = true; + goto free_of_progressive; + } + } + + char ttl[SMALL_BUFF_LEN] = ""; + if (stbInfo->ttl != 0) { + snprintf(ttl, SMALL_BUFF_LEN, "TTL %d", stbInfo->ttl); + } + for (uint64_t i = 0; i < stbInfo->insertRows;) { + if (g_arguments->terminate) { + goto free_of_progressive; + } + int32_t generated = 0; + switch (stbInfo->iface) { + case TAOSC_IFACE: + case REST_IFACE: + generated = prepareProgressDataSql( + pThreadInfo, + childTbl, + tableSeq, + sampleDataBuf, + ×tamp, i, ttl, &pos, &len); + break; + case STMT_IFACE: { + generated = prepareProgressDataStmt( + pThreadInfo, + childTbl, ×tamp, i, ttl); + break; + } + case SML_REST_IFACE: + case SML_IFACE: + generated = prepareProgressDataSml( + pThreadInfo, + childTbl, + tableSeq, ×tamp, i, ttl); + break; + default: + break; + } + if (generated < 0) { + g_fail = true; + goto free_of_progressive; + } + if (!stbInfo->non_stop) { + i += generated; + } + // only measure insert + startTs = toolsGetTimestampUs(); + int code = execInsert(pThreadInfo, generated); + if (code) { + if (NO_IF_FAILED == stbInfo->continueIfFail) { + warnPrint("The super table parameter " + "continueIfFail: %d, STOP insertion!\n", + stbInfo->continueIfFail); + g_fail = true; + goto free_of_progressive; + } else if (YES_IF_FAILED == stbInfo->continueIfFail) { + infoPrint("The super table parameter " + "continueIfFail: %d, " + "will continue to insert ..\n", + stbInfo->continueIfFail); + } else if (SMART_IF_FAILED == stbInfo->continueIfFail) { + warnPrint("The super table parameter " + "continueIfFail: %d, will create table " + "then insert ..\n", + stbInfo->continueIfFail); + int ret = smartContinueIfFail( + pThreadInfo, + childTbl, i, ttl); + if (0 != ret) { + g_fail = true; + goto free_of_progressive; + } + + code = execInsert(pThreadInfo, generated); + if (code) { + g_fail = true; + goto free_of_progressive; + } + } else { + warnPrint("Unknown super table parameter " + "continueIfFail: %d\n", + stbInfo->continueIfFail); + g_fail = true; + goto free_of_progressive; + } + } + endTs = toolsGetTimestampUs()+1; + + if (stbInfo->insert_interval > 0) { + debugPrint("%s() LN%d, insert_interval: %"PRIu64"\n", + __func__, __LINE__, stbInfo->insert_interval); + perfPrint("sleep %" PRIu64 " ms\n", + stbInfo->insert_interval); + toolsMsleep((int32_t)stbInfo->insert_interval); + } + + pThreadInfo->totalInsertRows += generated; + + if (g_arguments->terminate) { + goto free_of_progressive; + } + int protocol = stbInfo->lineProtocol; + switch (stbInfo->iface) { + case REST_IFACE: + case TAOSC_IFACE: + memset(pThreadInfo->buffer, 0, pThreadInfo->max_sql_len); + break; + case SML_REST_IFACE: + memset(pThreadInfo->buffer, 0, + g_arguments->reqPerReq * + (pThreadInfo->max_sql_len + 1)); + case SML_IFACE: + if (TSDB_SML_JSON_PROTOCOL == protocol) { + memset(pThreadInfo->lines[0], 0, + pThreadInfo->line_buf_len); + } else if (SML_JSON_TAOS_FORMAT == protocol) { + if (pThreadInfo->lines && pThreadInfo->lines[0]) { + tmfree(pThreadInfo->lines[0]); + pThreadInfo->lines[0] = NULL; + } + if (pThreadInfo->json_array) { + tools_cJSON_Delete(pThreadInfo->json_array); + pThreadInfo->json_array = NULL; + } + pThreadInfo->json_array = tools_cJSON_CreateArray(); + } else { + for (int j = 0; j < generated; j++) { + debugPrint("pThreadInfo->lines[%d]: %s\n", + j, pThreadInfo->lines[j]); + memset(pThreadInfo->lines[j], 0, + pThreadInfo->max_sql_len); + } + } + break; + case STMT_IFACE: + break; + } + + int64_t delay = endTs - startTs; + if (delay <= 0) { + debugPrint("thread[%d]: startTs: %"PRId64", endTs: %"PRId64"\n", + pThreadInfo->threadID, startTs, endTs); + } else { + perfPrint("insert execution time is %.6f s\n", + delay / 1E6); + + int64_t * pDelay = benchCalloc(1, sizeof(int64_t), false); + *pDelay = delay; + if (benchArrayPush(pThreadInfo->delayList, pDelay) == NULL) { + tmfree(pDelay); + } + pThreadInfo->totalDelay += delay; + } + + int64_t currentPrintTime = toolsGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30 * 1000) { + infoPrint( + "thread[%d] has currently inserted rows: " + "%" PRId64 ", peroid insert rate: %.3f rows/s \n", + pThreadInfo->threadID, pThreadInfo->totalInsertRows, + (double)(pThreadInfo->totalInsertRows - lastTotalInsertRows) * 1000.0/(currentPrintTime - lastPrintTime)); + lastPrintTime = currentPrintTime; + lastTotalInsertRows = pThreadInfo->totalInsertRows; + } + if (i >= stbInfo->insertRows) { + break; + } + } // insertRows + } // tableSeq +free_of_progressive: + cleanupAndPrint(pThreadInfo, "progressive"); + return NULL; +} + +static int initStmtDataValue(SSuperTable *stbInfo, SChildTable *childTbl) { + int32_t columnCount = stbInfo->cols->size; + + char *sampleDataBuf; + if (childTbl) { + sampleDataBuf = childTbl->sampleDataBuf; + } else { + sampleDataBuf = stbInfo->sampleDataBuf; + } + int64_t lenOfOneRow = stbInfo->lenOfCols; + + if (stbInfo->useSampleTs) { + columnCount += 1; // for skipping first column + } + for (int i=0; i < g_arguments->prepared_rand; i++) { + int cursor = 0; + + for (int c = 0; c < columnCount; c++) { + char *restStr = sampleDataBuf + + lenOfOneRow * i + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index++) { + if (restStr[index] == ',') { + break; + } + } + + cursor += index + 1; // skip ',' too + if ((0 == c) && stbInfo->useSampleTs) { + continue; + } + + char *tmpStr = calloc(1, index + 1); + if (NULL == tmpStr) { + errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, index + 1); + return -1; + } + Field *col = benchArrayGet(stbInfo->cols, + (stbInfo->useSampleTs?c-1:c)); + char dataType = col->type; + + StmtData *stmtData; + if (childTbl) { + ChildField *childCol = + benchArrayGet(childTbl->childCols, + (stbInfo->useSampleTs?c-1:c)); + stmtData = &childCol->stmtData; + } else { + stmtData = &col->stmtData; + } + + strncpy(tmpStr, restStr, index); + + if (0 == strcmp(tmpStr, "NULL")) { + *(stmtData->is_null + i) = true; + } else { + switch (dataType) { + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + *((int32_t*)stmtData->data + i) = atoi(tmpStr); + break; + case TSDB_DATA_TYPE_FLOAT: + *((float*)stmtData->data +i) = (float)atof(tmpStr); + break; + case TSDB_DATA_TYPE_DOUBLE: + *((double*)stmtData->data + i) = atof(tmpStr); + break; + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + *((int8_t*)stmtData->data + i) = (int8_t)atoi(tmpStr); + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + *((int16_t*)stmtData->data + i) = (int16_t)atoi(tmpStr); + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + *((int64_t*)stmtData->data + i) = (int64_t)atol(tmpStr); + break; + case TSDB_DATA_TYPE_BOOL: + *((int8_t*)stmtData->data + i) = (int8_t)atoi(tmpStr); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + *((int64_t*)stmtData->data + i) = (int64_t)atol(tmpStr); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + { + size_t tmpLen = strlen(tmpStr); + debugPrint("%s() LN%d, index: %d, " + "tmpStr len: %"PRIu64", col->length: %d\n", + __func__, __LINE__, + i, (uint64_t)tmpLen, col->length); + if (tmpLen-2 > col->length) { + errorPrint("data length %"PRIu64" " + "is larger than column length %d\n", + (uint64_t)tmpLen, col->length); + } + if (tmpLen > 2) { + strncpy((char *)stmtData->data + + i * col->length, + tmpStr+1, + min(col->length, tmpLen - 2)); + } else { + strncpy((char *)stmtData->data + + i*col->length, + "", 1); + } + } + break; + default: + break; + } + } + free(tmpStr); + } + } + return 0; +} + +static void initStmtData(char dataType, void **data, uint32_t length) { + char *tmpP = NULL; + + switch (dataType) { + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + tmpP = calloc(1, sizeof(int) * g_arguments->prepared_rand); + assert(tmpP); + tmfree(*data); + *data = (void*)tmpP; + break; + + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + tmpP = calloc(1, sizeof(int8_t) * g_arguments->prepared_rand); + assert(tmpP); + tmfree(*data); + *data = (void*)tmpP; + break; + + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + tmpP = calloc(1, sizeof(int16_t) * g_arguments->prepared_rand); + assert(tmpP); + tmfree(*data); + *data = (void*)tmpP; + break; + + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + tmpP = calloc(1, sizeof(int64_t) * g_arguments->prepared_rand); + assert(tmpP); + tmfree(*data); + *data = (void*)tmpP; + break; + + case TSDB_DATA_TYPE_BOOL: + tmpP = calloc(1, sizeof(int8_t) * g_arguments->prepared_rand); + assert(tmpP); + tmfree(*data); + *data = (void*)tmpP; + break; + + case TSDB_DATA_TYPE_FLOAT: + tmpP = calloc(1, sizeof(float) * g_arguments->prepared_rand); + assert(tmpP); + tmfree(*data); + *data = (void*)tmpP; + break; + + case TSDB_DATA_TYPE_DOUBLE: + tmpP = calloc(1, sizeof(double) * g_arguments->prepared_rand); + assert(tmpP); + tmfree(*data); + *data = (void*)tmpP; + break; + + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + tmpP = calloc(1, g_arguments->prepared_rand * length); + assert(tmpP); + tmfree(*data); + *data = (void*)tmpP; + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + tmpP = calloc(1, sizeof(int64_t) * g_arguments->prepared_rand); + assert(tmpP); + tmfree(*data); + *data = (void*)tmpP; + break; + + default: + errorPrint("Unknown data type: %s\n", + convertDatatypeToString(dataType)); + exit(EXIT_FAILURE); + } +} + +static int parseBufferToStmtBatchChildTbl(SSuperTable *stbInfo, + SChildTable* childTbl) { + int32_t columnCount = stbInfo->cols->size; + + for (int c = 0; c < columnCount; c++) { + Field *col = benchArrayGet(stbInfo->cols, c); + ChildField *childCol = benchArrayGet(childTbl->childCols, c); + char dataType = col->type; + + char *is_null = benchCalloc( + 1, sizeof(char) *g_arguments->prepared_rand, false); + + tmfree(childCol->stmtData.is_null); + childCol->stmtData.is_null = is_null; + + initStmtData(dataType, &(childCol->stmtData.data), col->length); + } + + return initStmtDataValue(stbInfo, childTbl); +} + +static int parseBufferToStmtBatch(SSuperTable* stbInfo) { + int32_t columnCount = stbInfo->cols->size; + + for (int c = 0; c < columnCount; c++) { + Field *col = benchArrayGet(stbInfo->cols, c); + char dataType = col->type; + + char *is_null = benchCalloc( + 1, sizeof(char) *g_arguments->prepared_rand, false); + tmfree(col->stmtData.is_null); + col->stmtData.is_null = is_null; + + initStmtData(dataType, &(col->stmtData.data), col->length); + } + + return initStmtDataValue(stbInfo, NULL); +} + +static int64_t fillChildTblNameByCount(SSuperTable *stbInfo) { + for (int64_t i = 0; i < stbInfo->childTblCount; i++) { + snprintf(stbInfo->childTblArray[i]->name, + TSDB_TABLE_NAME_LEN, + "%s%" PRIu64 "", + stbInfo->childTblPrefix, i); + debugPrint("%s(): %s\n", __func__, + stbInfo->childTblArray[i]->name); + } + + return stbInfo->childTblCount; +} + +static int64_t fillChildTblNameByFromTo(SDataBase *database, + SSuperTable* stbInfo) { + for (int64_t i = stbInfo->childTblFrom; i < stbInfo->childTblTo; i++) { + snprintf(stbInfo->childTblArray[i-stbInfo->childTblFrom]->name, + TSDB_TABLE_NAME_LEN, + "%s%" PRIu64 "", + stbInfo->childTblPrefix, i); + } + + return (stbInfo->childTblTo-stbInfo->childTblFrom); +} + +static int64_t fillChildTblNameByLimitOffset(SDataBase *database, + SSuperTable* stbInfo) { + SBenchConn* conn = initBenchConn(); + if (NULL == conn) { + return -1; + } + char cmd[SHORT_1K_SQL_BUFF_LEN] = "\0"; + if (g_arguments->taosc_version == 3) { + snprintf(cmd, SHORT_1K_SQL_BUFF_LEN, + "SELECT DISTINCT(TBNAME) FROM %s.`%s` LIMIT %" PRId64 + " OFFSET %" PRIu64 "", + database->dbName, stbInfo->stbName, stbInfo->childTblLimit, + stbInfo->childTblOffset); + } else { + snprintf(cmd, SHORT_1K_SQL_BUFF_LEN, + "SELECT TBNAME FROM %s.`%s` LIMIT %" PRId64 + " OFFSET %" PRIu64 "", + database->dbName, stbInfo->stbName, stbInfo->childTblLimit, + stbInfo->childTblOffset); + } + debugPrint("cmd: %s\n", cmd); + TAOS_RES *res = taos_query(conn->taos, cmd); + int32_t code = taos_errno(res); + int64_t count = 0; + if (code) { + printErrCmdCodeStr(cmd, code, res); + closeBenchConn(conn); + return -1; + } + TAOS_ROW row = NULL; + while ((row = taos_fetch_row(res)) != NULL) { + int *lengths = taos_fetch_lengths(res); + strncpy(stbInfo->childTblArray[count]->name, row[0], lengths[0]); + stbInfo->childTblArray[count]->name[lengths[0] + 1] = '\0'; + debugPrint("stbInfo->childTblArray[%" PRId64 "]->name: %s\n", + count, stbInfo->childTblArray[count]->name); + count++; + } + taos_free_result(res); + closeBenchConn(conn); + return count; +} + +static void preProcessArgument(SSuperTable *stbInfo) { + if (stbInfo->interlaceRows > g_arguments->reqPerReq) { + infoPrint( + "interlaceRows(%d) is larger than record per request(%u), which " + "will be set to %u\n", + stbInfo->interlaceRows, g_arguments->reqPerReq, + g_arguments->reqPerReq); + stbInfo->interlaceRows = g_arguments->reqPerReq; + } + + if (stbInfo->interlaceRows > stbInfo->insertRows) { + infoPrint( + "interlaceRows larger than insertRows %d > %" PRId64 "\n", + stbInfo->interlaceRows, stbInfo->insertRows); + infoPrint("%s", "interlaceRows will be set to 0\n"); + stbInfo->interlaceRows = 0; + } + + if (stbInfo->interlaceRows == 0 + && g_arguments->reqPerReq > stbInfo->insertRows) { + infoPrint("record per request (%u) is larger than " + "insert rows (%"PRIu64")" + " in progressive mode, which will be set to %"PRIu64"\n", + g_arguments->reqPerReq, stbInfo->insertRows, + stbInfo->insertRows); + g_arguments->reqPerReq = stbInfo->insertRows; + } + + if (stbInfo->interlaceRows > 0 && stbInfo->iface == STMT_IFACE + && stbInfo->autoTblCreating) { + infoPrint("%s", + "not support autocreate table with interlace row in stmt " + "insertion, will change to progressive mode\n"); + stbInfo->interlaceRows = 0; + } +} + +static int printTotalDelay(SDataBase *database, + int64_t totalDelay, + BArray *total_delay_list, + int threads, + int64_t totalInsertRows, + int64_t start, int64_t end) { + succPrint("Spent %.6f seconds to insert rows: %" PRIu64 + " with %d thread(s) into %s %.2f records/second\n", + (end - start)/1E6, totalInsertRows, threads, + database->dbName, + (double)(totalInsertRows / ((end - start)/1E6))); + if (!total_delay_list->size) { + return -1; + } + + succPrint("insert delay, " + "min: %.4fms, " + "avg: %.4fms, " + "p90: %.4fms, " + "p95: %.4fms, " + "p99: %.4fms, " + "max: %.4fms\n", + *(int64_t *)(benchArrayGet(total_delay_list, 0))/1E3, + (double)totalDelay/total_delay_list->size/1E3, + *(int64_t *)(benchArrayGet(total_delay_list, + (int32_t)(total_delay_list->size + * 0.9)))/1E3, + *(int64_t *)(benchArrayGet(total_delay_list, + (int32_t)(total_delay_list->size + * 0.95)))/1E3, + *(int64_t *)(benchArrayGet(total_delay_list, + (int32_t)(total_delay_list->size + * 0.99)))/1E3, + *(int64_t *)(benchArrayGet(total_delay_list, + (int32_t)(total_delay_list->size + - 1)))/1E3); + return 0; +} + +static int64_t fillChildTblNameImp(SDataBase *database, SSuperTable *stbInfo) { + int64_t ntables; + if (stbInfo->childTblLimit) { + ntables = fillChildTblNameByLimitOffset(database, stbInfo); + } else if (stbInfo->childTblFrom || stbInfo->childTblTo) { + ntables = fillChildTblNameByFromTo(database, stbInfo); + } else { + ntables = fillChildTblNameByCount(stbInfo); + } + return ntables; +} + +static int64_t fillChildTblName(SDataBase *database, SSuperTable *stbInfo) { + int64_t ntables = stbInfo->childTblCount; + stbInfo->childTblArray = benchCalloc(stbInfo->childTblCount, + sizeof(SChildTable*), true); + for (int64_t child = 0; child < stbInfo->childTblCount; child++) { + stbInfo->childTblArray[child] = + benchCalloc(1, sizeof(SChildTable), true); + } + + if (stbInfo->childTblCount == 1 && stbInfo->tags->size == 0) { + // Normal table + snprintf(stbInfo->childTblArray[0]->name, TSDB_TABLE_NAME_LEN, + "%s", stbInfo->stbName); + } else if ((stbInfo->iface != SML_IFACE + && stbInfo->iface != SML_REST_IFACE) + && stbInfo->childTblExists) { + ntables = fillChildTblNameImp(database, stbInfo); + } else { + ntables = fillChildTblNameByCount(stbInfo); + } + + return ntables; +} + +static int startMultiThreadInsertData(SDataBase* database, + SSuperTable* stbInfo) { + if ((stbInfo->iface == SML_IFACE || stbInfo->iface == SML_REST_IFACE) + && !stbInfo->use_metric) { + errorPrint("%s", "schemaless cannot work without stable\n"); + return -1; + } + + preProcessArgument(stbInfo); + + int64_t ntables; + if (stbInfo->childTblTo > 0) { + ntables = stbInfo->childTblTo - stbInfo->childTblFrom; + } else if (stbInfo->childTblLimit > 0 && stbInfo->childTblExists) { + ntables = stbInfo->childTblLimit; + } else { + ntables = stbInfo->childTblCount; + } + if (ntables == 0) { + return 0; + } + + uint64_t tableFrom = 0; + int32_t threads = g_arguments->nthreads; + int64_t a = 0, b = 0; + +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + if ((0 == stbInfo->interlaceRows) + && (g_arguments->nthreads_auto)) { + SBenchConn* conn = initBenchConn(); + if (NULL == conn) { + return -1; + } + + for (int64_t i = 0; i < stbInfo->childTblCount; i++) { + int vgId; + int ret = taos_get_table_vgId( + conn->taos, database->dbName, + stbInfo->childTblArray[i]->name, &vgId); + if (ret < 0) { + errorPrint("Failed to get %s db's %s table's vgId\n", + database->dbName, + stbInfo->childTblArray[i]->name); + closeBenchConn(conn); + return -1; + } + debugPrint("Db %s\'s table\'s %s vgId is: %d\n", + database->dbName, + stbInfo->childTblArray[i]->name, vgId); + for (int32_t v = 0; v < database->vgroups; v++) { + SVGroup *vg = benchArrayGet(database->vgArray, v); + if (vgId == vg->vgId) { + vg->tbCountPerVgId++; + } + } + } + + threads = 0; + for (int v = 0; v < database->vgroups; v++) { + SVGroup *vg = benchArrayGet(database->vgArray, v); + infoPrint("Total %"PRId64" tables on bb %s's vgroup %d (id: %d)\n", + vg->tbCountPerVgId, database->dbName, v, vg->vgId); + if (vg->tbCountPerVgId) { + threads++; + } else { + continue; + } + vg->childTblArray = benchCalloc( + vg->tbCountPerVgId, sizeof(SChildTable*), true); + vg->tbOffset = 0; + } + for (int64_t i = 0; i < stbInfo->childTblCount; i++) { + int vgId; + int ret = taos_get_table_vgId( + conn->taos, database->dbName, + stbInfo->childTblArray[i]->name, &vgId); + if (ret < 0) { + errorPrint("Failed to get %s db's %s table's vgId\n", + database->dbName, + stbInfo->childTblArray[i]->name); + + closeBenchConn(conn); + return -1; + } + debugPrint("Db %s\'s table\'s %s vgId is: %d\n", + database->dbName, + stbInfo->childTblArray[i]->name, vgId); + for (int32_t v = 0; v < database->vgroups; v++) { + SVGroup *vg = benchArrayGet(database->vgArray, v); + if (vgId == vg->vgId) { + vg->childTblArray[vg->tbOffset] = + stbInfo->childTblArray[i]; + vg->tbOffset++; + } + } + } + closeBenchConn(conn); + } else { + a = ntables / threads; + if (a < 1) { + threads = (int32_t)ntables; + a = 1; + } + b = 0; + if (threads != 0) { + b = ntables % threads; + } + } + + int32_t vgFrom = 0; +#else + a = ntables / threads; + if (a < 1) { + threads = (int32_t)ntables; + a = 1; + } + b = 0; + if (threads != 0) { + b = ntables % threads; + } +#endif // TD_VER_COMPATIBLE_3_0_0_0 + pthread_t *pids = benchCalloc(1, threads * sizeof(pthread_t), true); + threadInfo *infos = benchCalloc(1, threads * sizeof(threadInfo), true); + + for (int32_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + pThreadInfo->threadID = i; + pThreadInfo->dbInfo = database; + pThreadInfo->stbInfo = stbInfo; + pThreadInfo->start_time = stbInfo->startTimestamp; + pThreadInfo->totalInsertRows = 0; + pThreadInfo->samplePos = 0; +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + if ((0 == stbInfo->interlaceRows) + && (g_arguments->nthreads_auto)) { + int32_t j; + for (j = vgFrom; i < database->vgroups; j++) { + SVGroup *vg = benchArrayGet(database->vgArray, j); + if (0 == vg->tbCountPerVgId) { + continue; + } + pThreadInfo->vg = vg; + pThreadInfo->start_table_from = 0; + pThreadInfo->ntables = vg->tbCountPerVgId; + pThreadInfo->end_table_to = vg->tbCountPerVgId-1; + break; + } + vgFrom = j + 1; + } else { + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = i < b ? a + 1 : a; + pThreadInfo->end_table_to = (i < b)?(tableFrom+a):(tableFrom+a-1); + tableFrom = pThreadInfo->end_table_to + 1; + } +#else + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = i < b ? a + 1 : a; + pThreadInfo->end_table_to = (i < b)?(tableFrom+a):(tableFrom+a-1); + tableFrom = pThreadInfo->end_table_to + 1; +#endif // TD_VER_COMPATIBLE_3_0_0_0 + pThreadInfo->delayList = benchArrayInit(1, sizeof(int64_t)); + switch (stbInfo->iface) { + case REST_IFACE: { + if (stbInfo->interlaceRows > 0) { + pThreadInfo->buffer = new_ds(0); + } else { + pThreadInfo->buffer = + benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, true); + } + int sockfd = createSockFd(); + if (sockfd < 0) { + FREE_PIDS_INFOS_RETURN_MINUS_1(); + } + pThreadInfo->sockfd = sockfd; + break; + } + case STMT_IFACE: { + pThreadInfo->conn = initBenchConn(); + if (NULL == pThreadInfo->conn) { + FREE_PIDS_INFOS_RETURN_MINUS_1(); + } + pThreadInfo->conn->stmt = + taos_stmt_init(pThreadInfo->conn->taos); + if (NULL == pThreadInfo->conn->stmt) { + errorPrint("taos_stmt_init() failed, reason: %s\n", + taos_errstr(NULL)); + FREE_RESOURCE(); + return -1; + } + if (taos_select_db(pThreadInfo->conn->taos, database->dbName)) { + errorPrint("taos select database(%s) failed\n", + database->dbName); + FREE_RESOURCE(); + return -1; + } + if (!stbInfo->autoTblCreating) { + if (prepareStmt(stbInfo, pThreadInfo->conn->stmt, 0)) { + FREE_RESOURCE(); + return -1; + } + } + + pThreadInfo->bind_ts = benchCalloc(1, sizeof(int64_t), true); + pThreadInfo->bind_ts_array = + benchCalloc(1, sizeof(int64_t)*g_arguments->reqPerReq, + true); + pThreadInfo->bindParams = benchCalloc( + 1, sizeof(TAOS_MULTI_BIND)*(stbInfo->cols->size + 1), + true); + pThreadInfo->is_null = benchCalloc(1, g_arguments->reqPerReq, + true); + parseBufferToStmtBatch(stbInfo); + for (int64_t child = 0; + child < stbInfo->childTblCount; child++) { + SChildTable *childTbl = stbInfo->childTblArray[child]; + if (childTbl->useOwnSample) { + parseBufferToStmtBatchChildTbl(stbInfo, childTbl); + } + } + + break; + } + case SML_REST_IFACE: { + int sockfd = createSockFd(); + if (sockfd < 0) { + free(pids); + free(infos); + return -1; + } + pThreadInfo->sockfd = sockfd; + } + case SML_IFACE: { + pThreadInfo->conn = initBenchConn(); + if (pThreadInfo->conn == NULL) { + errorPrint("%s() init connection failed\n", __func__); + FREE_RESOURCE(); + return -1; + } + if (taos_select_db(pThreadInfo->conn->taos, database->dbName)) { + errorPrint("taos select database(%s) failed\n", + database->dbName); + FREE_RESOURCE(); + return -1; + } + pThreadInfo->max_sql_len = + stbInfo->lenOfCols + stbInfo->lenOfTags; + if (stbInfo->iface == SML_REST_IFACE) { + pThreadInfo->buffer = + benchCalloc(1, g_arguments->reqPerReq * + (1 + pThreadInfo->max_sql_len), true); + } + int protocol = stbInfo->lineProtocol; + if (TSDB_SML_JSON_PROTOCOL != protocol + && SML_JSON_TAOS_FORMAT != protocol) { + pThreadInfo->sml_tags = + (char **)benchCalloc(pThreadInfo->ntables, + sizeof(char *), true); + for (int t = 0; t < pThreadInfo->ntables; t++) { + pThreadInfo->sml_tags[t] = + benchCalloc(1, stbInfo->lenOfTags, true); + } + + for (int t = 0; t < pThreadInfo->ntables; t++) { + if (generateRandData( + stbInfo, pThreadInfo->sml_tags[t], + stbInfo->lenOfTags, + stbInfo->lenOfCols + stbInfo->lenOfTags, + stbInfo->tags, 1, true, NULL)) { + return -1; + } + debugPrint("pThreadInfo->sml_tags[%d]: %s\n", t, + pThreadInfo->sml_tags[t]); + } + pThreadInfo->lines = + benchCalloc(g_arguments->reqPerReq, + sizeof(char *), true); + + for (int j = 0; (j < g_arguments->reqPerReq + && !g_arguments->terminate); j++) { + pThreadInfo->lines[j] = + benchCalloc(1, pThreadInfo->max_sql_len, true); + } + } else { + pThreadInfo->json_array = tools_cJSON_CreateArray(); + pThreadInfo->sml_json_tags = tools_cJSON_CreateArray(); + pThreadInfo->sml_tags_json_array = (char **)benchCalloc( + pThreadInfo->ntables, sizeof(char *), true); + for (int t = 0; t < pThreadInfo->ntables; t++) { + if (stbInfo->lineProtocol == TSDB_SML_JSON_PROTOCOL) { + generateSmlJsonTags( + pThreadInfo->sml_json_tags, + pThreadInfo->sml_tags_json_array, + stbInfo, + pThreadInfo->start_table_from, t); + } else { + generateSmlTaosJsonTags( + pThreadInfo->sml_json_tags, stbInfo, + pThreadInfo->start_table_from, t); + } + } + pThreadInfo->lines = (char **)benchCalloc( + 1, sizeof(char *), true); + if ((0 == stbInfo->interlaceRows) + && (TSDB_SML_JSON_PROTOCOL == protocol)) { + pThreadInfo->line_buf_len = + g_arguments->reqPerReq * + accumulateRowLen(pThreadInfo->stbInfo->tags, + pThreadInfo->stbInfo->iface); + debugPrint("%s() LN%d, line_buf_len=%d\n", + __func__, __LINE__, pThreadInfo->line_buf_len); + pThreadInfo->lines[0] = benchCalloc( + 1, pThreadInfo->line_buf_len, true); + pThreadInfo->sml_json_value_array = + (char **)benchCalloc( + pThreadInfo->ntables, sizeof(char *), true); + for (int t = 0; t < pThreadInfo->ntables; t++) { + generateSmlJsonValues( + pThreadInfo->sml_json_value_array, stbInfo, t); + } + } + } + break; + } + case TAOSC_IFACE: { + pThreadInfo->conn = initBenchConn(); + if (pThreadInfo->conn == NULL) { + errorPrint("%s() failed to connect\n", __func__); + FREE_RESOURCE(); + return -1; + } + char* command = benchCalloc(1, SHORT_1K_SQL_BUFF_LEN, false); + snprintf(command, SHORT_1K_SQL_BUFF_LEN, + g_arguments->escape_character + ? "USE `%s`" + : "USE %s", + database->dbName); + if (queryDbExecCall(pThreadInfo->conn, command)) { + errorPrint("taos select database(%s) failed\n", + database->dbName); + FREE_RESOURCE(); + tmfree(command); + return -1; + } + tmfree(command); + command = NULL; + + if (stbInfo->interlaceRows > 0) { + pThreadInfo->buffer = new_ds(0); + } else { + pThreadInfo->buffer = + benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, true); + if (g_arguments->check_sql) { + pThreadInfo->csql = + benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, true); + memset(pThreadInfo->csql, 0, TSDB_MAX_ALLOWED_SQL_LEN); + } + } + + break; + } + default: + break; + } + } + + infoPrint("Estimate memory usage: %.2fMB\n", + (double)g_memoryUsage / 1048576); + prompt(0); + + // create threads + int threadCnt = 0; + for (int i = 0; (i < threads && !g_arguments->terminate); i++) { + threadInfo *pThreadInfo = infos + i; + if (stbInfo->interlaceRows > 0) { + pthread_create(pids + i, NULL, + syncWriteInterlace, pThreadInfo); + } else { + pthread_create(pids + i, NULL, + syncWriteProgressive, pThreadInfo); + } + threadCnt ++; + } + + int64_t start = toolsGetTimestampUs(); + + // wait threads + for (int i = 0; i < threadCnt; i++) { + infoPrint(" pthread_join %d ...\n", i); + pthread_join(pids[i], NULL); + } + + int64_t end = toolsGetTimestampUs()+1; + + if (g_arguments->terminate) toolsMsleep(100); + + BArray * total_delay_list = benchArrayInit(1, sizeof(int64_t)); + int64_t totalDelay = 0; + uint64_t totalInsertRows = 0; + + // free threads resource + for (int i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + // free check sql + if (pThreadInfo->csql) { + tmfree(pThreadInfo->csql); + pThreadInfo->csql = NULL; + } + + int protocol = stbInfo->lineProtocol; + switch (stbInfo->iface) { + case REST_IFACE: + if (g_arguments->terminate) + toolsMsleep(100); + destroySockFd(pThreadInfo->sockfd); + if (stbInfo->interlaceRows > 0) { + free_ds(&pThreadInfo->buffer); + } else { + tmfree(pThreadInfo->buffer); + pThreadInfo->buffer = NULL; + } + break; + case SML_REST_IFACE: + if (g_arguments->terminate) + toolsMsleep(100); + tmfree(pThreadInfo->buffer); + // on-purpose no break here + case SML_IFACE: + if (TSDB_SML_JSON_PROTOCOL != protocol + && SML_JSON_TAOS_FORMAT != protocol) { + for (int t = 0; t < pThreadInfo->ntables; t++) { + tmfree(pThreadInfo->sml_tags[t]); + } + for (int j = 0; j < g_arguments->reqPerReq; j++) { + tmfree(pThreadInfo->lines[j]); + } + tmfree(pThreadInfo->sml_tags); + pThreadInfo->sml_tags = NULL; + } else { + for (int t = 0; t < pThreadInfo->ntables; t++) { + tmfree(pThreadInfo->sml_tags_json_array[t]); + } + tmfree(pThreadInfo->sml_tags_json_array); + pThreadInfo->sml_tags_json_array = NULL; + if (pThreadInfo->sml_json_tags) { + tools_cJSON_Delete(pThreadInfo->sml_json_tags); + pThreadInfo->sml_json_tags = NULL; + } + if (pThreadInfo->json_array) { + tools_cJSON_Delete(pThreadInfo->json_array); + pThreadInfo->json_array = NULL; + } + } + if (pThreadInfo->lines) { + if ((0 == stbInfo->interlaceRows) + && (TSDB_SML_JSON_PROTOCOL == protocol)) { + tmfree(pThreadInfo->lines[0]); + for (int t = 0; t < pThreadInfo->ntables; t++) { + tmfree(pThreadInfo->sml_json_value_array[t]); + } + tmfree(pThreadInfo->sml_json_value_array); + } + tmfree(pThreadInfo->lines); + pThreadInfo->lines = NULL; + } + break; + + case STMT_IFACE: + taos_stmt_close(pThreadInfo->conn->stmt); + tmfree(pThreadInfo->bind_ts); + tmfree(pThreadInfo->bind_ts_array); + tmfree(pThreadInfo->bindParams); + tmfree(pThreadInfo->is_null); + break; + + case TAOSC_IFACE: + if (stbInfo->interlaceRows > 0) { + free_ds(&pThreadInfo->buffer); + } else { + tmfree(pThreadInfo->buffer); + pThreadInfo->buffer = NULL; + } + break; + + default: + break; + } + totalInsertRows += pThreadInfo->totalInsertRows; + totalDelay += pThreadInfo->totalDelay; + benchArrayAddBatch(total_delay_list, pThreadInfo->delayList->pData, + pThreadInfo->delayList->size); + tmfree(pThreadInfo->delayList); + pThreadInfo->delayList = NULL; + // free conn + if (pThreadInfo->conn) { + closeBenchConn(pThreadInfo->conn); + pThreadInfo->conn = NULL; + } + } + + // calculate result + qsort(total_delay_list->pData, total_delay_list->size, + total_delay_list->elemSize, compare); + + if (g_arguments->terminate) toolsMsleep(100); + + free(pids); + free(infos); + + int ret = printTotalDelay(database, totalDelay, + total_delay_list, threads, + totalInsertRows, start, end); + benchArrayDestroy(total_delay_list); + if (g_fail || ret) { + return -1; + } + return 0; +} + +static int getStbInsertedRows(char* dbName, char* stbName, TAOS* taos) { + int rows = 0; + char command[SHORT_1K_SQL_BUFF_LEN]; + snprintf(command, SHORT_1K_SQL_BUFF_LEN, "SELECT COUNT(*) FROM %s.%s", + dbName, stbName); + TAOS_RES* res = taos_query(taos, command); + int code = taos_errno(res); + if (code != 0) { + printErrCmdCodeStr(command, code, res); + return -1; + } + TAOS_ROW row = taos_fetch_row(res); + if (row == NULL) { + rows = 0; + } else { + rows = (int)*(int64_t*)row[0]; + } + taos_free_result(res); + return rows; +} + +static void create_tsma(TSMA* tsma, SBenchConn* conn, char* stbName) { + char command[SHORT_1K_SQL_BUFF_LEN]; + int len = snprintf(command, SHORT_1K_SQL_BUFF_LEN, + "CREATE sma INDEX %s ON %s function(%s) " + "INTERVAL (%s) SLIDING (%s)", + tsma->name, stbName, tsma->func, + tsma->interval, tsma->sliding); + if (tsma->custom) { + snprintf(command + len, SHORT_1K_SQL_BUFF_LEN - len, + " %s", tsma->custom); + } + int code = queryDbExecCall(conn, command); + if (code == 0) { + infoPrint("successfully create tsma with command <%s>\n", command); + } +} + +static void* create_tsmas(void* args) { + tsmaThreadInfo* pThreadInfo = (tsmaThreadInfo*) args; + int inserted_rows = 0; + SBenchConn* conn = initBenchConn(); + if (NULL == conn) { + return NULL; + } + int finished = 0; + if (taos_select_db(conn->taos, pThreadInfo->dbName)) { + errorPrint("failed to use database (%s)\n", pThreadInfo->dbName); + closeBenchConn(conn); + return NULL; + } + while (finished < pThreadInfo->tsmas->size && inserted_rows >= 0) { + inserted_rows = (int)getStbInsertedRows( + pThreadInfo->dbName, pThreadInfo->stbName, conn->taos); + for (int i = 0; i < pThreadInfo->tsmas->size; i++) { + TSMA* tsma = benchArrayGet(pThreadInfo->tsmas, i); + if (!tsma->done && inserted_rows >= tsma->start_when_inserted) { + create_tsma(tsma, conn, pThreadInfo->stbName); + tsma->done = true; + finished++; + break; + } + } + toolsMsleep(10); + } + benchArrayDestroy(pThreadInfo->tsmas); + closeBenchConn(conn); + return NULL; +} + +static int32_t createStream(SSTREAM* stream) { + int32_t code = -1; + char * command = benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, false); + snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, "DROP STREAM IF EXISTS %s", + stream->stream_name); + infoPrint("%s\n", command); + SBenchConn* conn = initBenchConn(); + if (NULL == conn) { + goto END_STREAM; + } + + code = queryDbExecCall(conn, command); + int32_t trying = g_arguments->keep_trying; + while (code && trying) { + infoPrint("will sleep %"PRIu32" milliseconds then re-drop stream %s\n", + g_arguments->trying_interval, stream->stream_name); + toolsMsleep(g_arguments->trying_interval); + code = queryDbExecCall(conn, command); + if (trying != -1) { + trying--; + } + } + + if (code) { + closeBenchConn(conn); + goto END_STREAM; + } + + memset(command, 0, TSDB_MAX_ALLOWED_SQL_LEN); + int pos = snprintf(command, TSDB_MAX_ALLOWED_SQL_LEN, + "CREATE STREAM IF NOT EXISTS %s ", stream->stream_name); + if (stream->trigger_mode[0] != '\0') { + pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, + "TRIGGER %s ", stream->trigger_mode); + } + if (stream->watermark[0] != '\0') { + pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, + "WATERMARK %s ", stream->watermark); + } + if (stream->ignore_update[0] != '\0') { + pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, + "IGNORE UPDATE %s ", stream->ignore_update); + } + if (stream->ignore_expired[0] != '\0') { + pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, + "IGNORE EXPIRED %s ", stream->ignore_expired); + } + if (stream->fill_history[0] != '\0') { + pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, + "FILL_HISTORY %s ", stream->fill_history); + } + pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, + "INTO %s ", stream->stream_stb); + if (stream->stream_stb_field[0] != '\0') { + pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, + "%s ", stream->stream_stb_field); + } + if (stream->stream_tag_field[0] != '\0') { + pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, + "TAGS%s ", stream->stream_tag_field); + } + if (stream->subtable[0] != '\0') { + pos += snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, + "SUBTABLE%s ", stream->subtable); + } + snprintf(command + pos, TSDB_MAX_ALLOWED_SQL_LEN - pos, + "as %s", stream->source_sql); + infoPrint("%s\n", command); + + code = queryDbExecCall(conn, command); + trying = g_arguments->keep_trying; + while (code && trying) { + infoPrint("will sleep %"PRIu32" milliseconds " + "then re-create stream %s\n", + g_arguments->trying_interval, stream->stream_name); + toolsMsleep(g_arguments->trying_interval); + code = queryDbExecCall(conn, command); + if (trying != -1) { + trying--; + } + } + + closeBenchConn(conn); +END_STREAM: + tmfree(command); + return code; +} + +int insertTestProcess() { + prompt(0); + + encodeAuthBase64(); + for (int i = 0; i < g_arguments->databases->size; i++) { + if (REST_IFACE == g_arguments->iface) { + if (0 != convertServAddr(g_arguments->iface, + false, + 1)) { + return -1; + } + } + SDataBase * database = benchArrayGet(g_arguments->databases, i); + + if (database->drop && !(g_arguments->supplementInsert)) { + if (database->superTbls) { + SSuperTable * stbInfo = benchArrayGet(database->superTbls, 0); + if (stbInfo && (REST_IFACE == stbInfo->iface)) { + if (0 != convertServAddr(stbInfo->iface, + stbInfo->tcpTransfer, + stbInfo->lineProtocol)) { + return -1; + } + } + } + if (createDatabase(database)) { + errorPrint("failed to create database (%s)\n", + database->dbName); + return -1; + } + succPrint("created database (%s)\n", database->dbName); + } + } + for (int i = 0; i < g_arguments->databases->size; i++) { + SDataBase * database = benchArrayGet(g_arguments->databases, i); + if (database->superTbls) { + for (int j = 0; j < database->superTbls->size; j++) { + SSuperTable * stbInfo = benchArrayGet(database->superTbls, j); + if (stbInfo->iface != SML_IFACE + && stbInfo->iface != SML_REST_IFACE + && !stbInfo->childTblExists) { +#ifdef WEBSOCKET + if (g_arguments->websocket) { + dropSuperTable(database, stbInfo); + } +#endif + if (getSuperTableFromServer(database, stbInfo) != 0) { + if (createSuperTable(database, stbInfo)) { + return -1; + } + } + } + fillChildTblName(database, stbInfo); + if (0 != prepareSampleData(database, stbInfo)) { + return -1; + } + } + } + } + + if (g_arguments->taosc_version == 3) { + for (int i = 0; i < g_arguments->databases->size; i++) { + SDataBase* database = benchArrayGet(g_arguments->databases, i); + if (database->superTbls) { + for (int j = 0; (j < database->superTbls->size + && !g_arguments->terminate); j++) { + SSuperTable* stbInfo = + benchArrayGet(database->superTbls, j); + if (stbInfo->tsmas == NULL) { + continue; + } + if (stbInfo->tsmas->size > 0) { + tsmaThreadInfo* pThreadInfo = + benchCalloc(1, sizeof(tsmaThreadInfo), true); + pthread_t tsmas_pid = {0}; + pThreadInfo->dbName = database->dbName; + pThreadInfo->stbName = stbInfo->stbName; + pThreadInfo->tsmas = stbInfo->tsmas; + pthread_create(&tsmas_pid, NULL, + create_tsmas, pThreadInfo); + } + } + } + } + } + + if (createChildTables()) return -1; + + if (g_arguments->taosc_version == 3) { + for (int j = 0; j < g_arguments->streams->size; j++) { + SSTREAM * stream = benchArrayGet(g_arguments->streams, j); + if (stream->drop) { + if (createStream(stream)) { + return -1; + } + } + } + } + + // create sub threads for inserting data + for (int i = 0; i < g_arguments->databases->size; i++) { + SDataBase * database = benchArrayGet(g_arguments->databases, i); + if (database->superTbls) { + for (uint64_t j = 0; j < database->superTbls->size; j++) { + SSuperTable * stbInfo = benchArrayGet(database->superTbls, j); + if (stbInfo->insertRows == 0) { + continue; + } + prompt(stbInfo->non_stop); + if (startMultiThreadInsertData(database, stbInfo)) { + return -1; + } + } + } + } + return 0; +} diff --git a/src/benchJsonOpt.c b/src/benchJsonOpt.c index c1365de0..d89e3cb6 100644 --- a/src/benchJsonOpt.c +++ b/src/benchJsonOpt.c @@ -1,1920 +1,1933 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the MIT license as published by the Free Software - * Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - */ - -#include - -extern char g_configDir[MAX_PATH_LEN]; - -static int getColumnAndTagTypeFromInsertJsonFile( - tools_cJSON * superTblObj, SSuperTable *stbInfo) { - int32_t code = -1; - - // columns - tools_cJSON *columnsObj = - tools_cJSON_GetObjectItem(superTblObj, "columns"); - if (!tools_cJSON_IsArray(columnsObj)) { - goto PARSE_OVER; - } - benchArrayClear(stbInfo->cols); - - int columnSize = tools_cJSON_GetArraySize(columnsObj); - - int index = 0; - for (int k = 0; k < columnSize; ++k) { - bool sma = false; - bool customName = false; - uint8_t type = 0; - int count = 1; - int64_t max = RAND_MAX >> 1; - int64_t min = 0; - int32_t length = 4; - - tools_cJSON *column = tools_cJSON_GetArrayItem(columnsObj, k); - if (!tools_cJSON_IsObject(column)) { - errorPrint("%s", "Invalid column format in json\n"); - goto PARSE_OVER; - } - tools_cJSON *countObj = tools_cJSON_GetObjectItem(column, "count"); - if (tools_cJSON_IsNumber(countObj)) { - count = (int)countObj->valueint; - } else { - count = 1; - } - - tools_cJSON *dataName = tools_cJSON_GetObjectItem(column, "name"); - if (tools_cJSON_IsString(dataName)) { - customName = true; - } - - // column info - tools_cJSON *dataType = tools_cJSON_GetObjectItem(column, "type"); - if (!tools_cJSON_IsString(dataType)) { - goto PARSE_OVER; - } - type = convertStringToDatatype(dataType->valuestring, 0); - - tools_cJSON *dataMax = tools_cJSON_GetObjectItem(column, "max"); - if (tools_cJSON_IsNumber(dataMax)) { - max = dataMax->valueint; - } else { - max = convertDatatypeToDefaultMax(type); - } - - tools_cJSON *dataMin = tools_cJSON_GetObjectItem(column, "min"); - if (tools_cJSON_IsNumber(dataMin)) { - min = dataMin->valueint; - } else { - min = convertDatatypeToDefaultMin(type); - } - - tools_cJSON *dataValues = tools_cJSON_GetObjectItem(column, "values"); - - if (g_arguments->taosc_version == 3) { - tools_cJSON *sma_value = tools_cJSON_GetObjectItem(column, "sma"); - if (tools_cJSON_IsString(sma_value) && - (0 == strcasecmp(sma_value->valuestring, "yes"))) { - sma = true; - } - } - - tools_cJSON * dataLen = tools_cJSON_GetObjectItem(column, "len"); - if (tools_cJSON_IsNumber(dataLen)) { - length = (int32_t)dataLen->valueint; - } else { - if (type == TSDB_DATA_TYPE_BINARY - || type == TSDB_DATA_TYPE_JSON - || type == TSDB_DATA_TYPE_NCHAR) { - length = g_arguments->binwidth; - } else { - length = convertTypeToLength(type); - } - } - - for (int n = 0; n < count; ++n) { - Field * col = benchCalloc(1, sizeof(Field), true); - benchArrayPush(stbInfo->cols, col); - col = benchArrayGet(stbInfo->cols, stbInfo->cols->size - 1); - col->type = type; - col->length = length; - if (length == 0) { - col->null = true; - } - col->sma = sma; - col->max = max; - col->min = min; - col->values = dataValues; - if (customName) { - if (n >= 1) { - snprintf(col->name, TSDB_COL_NAME_LEN, - "%s_%d", dataName->valuestring, n); - } else { - snprintf(col->name, TSDB_COL_NAME_LEN, - "%s", dataName->valuestring); - } - } else { - snprintf(col->name, TSDB_COL_NAME_LEN, "c%d", index); - } - index++; - } - } - - index = 0; - // tags - benchArrayClear(stbInfo->tags); - tools_cJSON *tags = tools_cJSON_GetObjectItem(superTblObj, "tags"); - if (!tools_cJSON_IsArray(tags)) { - return 0; - } - - int tagSize = tools_cJSON_GetArraySize(tags); - - stbInfo->use_metric = true; - for (int k = 0; k < tagSize; ++k) { - bool customName = false; - uint8_t type = 0; - int count = 1; - int64_t max = RAND_MAX >> 1; - int64_t min = 0; - int32_t length = 4; - tools_cJSON *tagObj = tools_cJSON_GetArrayItem(tags, k); - if (!tools_cJSON_IsObject(tagObj)) { - errorPrint("%s", "Invalid tag format in json\n"); - goto PARSE_OVER; - } - tools_cJSON *countObj = tools_cJSON_GetObjectItem(tagObj, "count"); - if (tools_cJSON_IsNumber(countObj)) { - count = (int)countObj->valueint; - } else { - count = 1; - } - - tools_cJSON *dataName = tools_cJSON_GetObjectItem(tagObj, "name"); - if (tools_cJSON_IsString(dataName)) { - customName = true; - } - - tools_cJSON *dataType = tools_cJSON_GetObjectItem(tagObj, "type"); - if (!tools_cJSON_IsString(dataType)) { - goto PARSE_OVER; - } - type = convertStringToDatatype(dataType->valuestring, 0); - - if ((tagSize == 1) && (type == TSDB_DATA_TYPE_JSON)) { - Field * tag = benchCalloc(1, sizeof(Field), true); - benchArrayPush(stbInfo->tags, tag); - tag = benchArrayGet(stbInfo->tags, stbInfo->tags->size - 1); - if (customName) { - snprintf(tag->name, TSDB_COL_NAME_LEN, - "%s", dataName->valuestring); - } else { - snprintf(tag->name, TSDB_COL_NAME_LEN, "jtag"); - } - tag->type = type; - tag->length = length; - stbInfo->tags->size = count; - return 0; - } - - tools_cJSON *dataMax = tools_cJSON_GetObjectItem(tagObj, "max"); - if (tools_cJSON_IsNumber(dataMax)) { - max = dataMax->valueint; - } else { - max = convertDatatypeToDefaultMax(type); - } - - tools_cJSON *dataMin = tools_cJSON_GetObjectItem(tagObj, "min"); - if (tools_cJSON_IsNumber(dataMin)) { - min = dataMin->valueint; - } else { - min = convertDatatypeToDefaultMin(type); - } - - tools_cJSON *dataValues = tools_cJSON_GetObjectItem(tagObj, "values"); - - tools_cJSON * dataLen = tools_cJSON_GetObjectItem(tagObj, "len"); - if (tools_cJSON_IsNumber(dataLen)) { - length = (int32_t)dataLen->valueint; - } else { - if (type == TSDB_DATA_TYPE_BINARY - || type == TSDB_DATA_TYPE_JSON - || type == TSDB_DATA_TYPE_NCHAR) { - length = g_arguments->binwidth; - } else { - length = convertTypeToLength(type); - } - } - - for (int n = 0; n < count; ++n) { - Field * tag = benchCalloc(1, sizeof(Field), true); - benchArrayPush(stbInfo->tags, tag); - tag = benchArrayGet(stbInfo->tags, stbInfo->tags->size - 1); - tag->type = type; - tag->length = length; - if (length == 0) { - tag->null = true; - } - tag->max = max; - tag->min = min; - tag->values = dataValues; - if (customName) { - if (n >= 1) { - snprintf(tag->name, TSDB_COL_NAME_LEN, - "%s_%d", dataName->valuestring, n); - } else { - snprintf(tag->name, TSDB_COL_NAME_LEN, - "%s", dataName->valuestring); - } - } else { - snprintf(tag->name, TSDB_COL_NAME_LEN, "t%d", index); - } - index++; - } - } - code = 0; -PARSE_OVER: - return code; -} - -int32_t getDurationVal(tools_cJSON *jsonObj) { - int32_t durMinute = 0; - // get duration value - if (tools_cJSON_IsString(jsonObj)) { - char *val = jsonObj->valuestring; - // like 10d or 10h or 10m - int32_t len = strlen(val); - if (len == 0) return 0; - durMinute = atoi(val); - if (strchr(val, 'h') || strchr(val, 'H')) { - // hour - durMinute *= 60; - } else if (strchr(val, 'm') || strchr(val, 'M')) { - // minute - durMinute *= 1; - } else { - // day - durMinute *= 24 * 60; - } - } else if (tools_cJSON_IsNumber(jsonObj)) { - durMinute = jsonObj->valueint * 24 * 60; - } - - return durMinute; -} - -static int getDatabaseInfo(tools_cJSON *dbinfos, int index) { - SDataBase *database; - if (index > 0) { - database = benchCalloc(1, sizeof(SDataBase), true); - benchArrayPush(g_arguments->databases, database); - } - database = benchArrayGet(g_arguments->databases, index); - if (database->cfgs == NULL) { - database->cfgs = benchArrayInit(1, sizeof(SDbCfg)); - } - database->drop = true; - database->flush = false; - database->precision = TSDB_TIME_PRECISION_MILLI; - database->sml_precision = TSDB_SML_TIMESTAMP_MILLI_SECONDS; - tools_cJSON *dbinfo = tools_cJSON_GetArrayItem(dbinfos, index); - tools_cJSON *db = tools_cJSON_GetObjectItem(dbinfo, "dbinfo"); - if (!tools_cJSON_IsObject(db)) { - errorPrint("%s", "Invalid dbinfo format in json\n"); - return -1; - } - - tools_cJSON* cfg_object = db->child; - - while (cfg_object) { - if (0 == strcasecmp(cfg_object->string, "name")) { - if (tools_cJSON_IsString(cfg_object)) { - database->dbName = cfg_object->valuestring; - } - } else if (0 == strcasecmp(cfg_object->string, "drop")) { - if (tools_cJSON_IsString(cfg_object) - && (0 == strcasecmp(cfg_object->valuestring, "no"))) { - database->drop = false; - } - } else if (0 == strcasecmp(cfg_object->string, "flush_each_batch")) { - if (tools_cJSON_IsString(cfg_object) - && (0 == strcasecmp(cfg_object->valuestring, "yes"))) { - database->flush = true; - } - } else if (0 == strcasecmp(cfg_object->string, "precision")) { - if (tools_cJSON_IsString(cfg_object)) { - if (0 == strcasecmp(cfg_object->valuestring, "us")) { - database->precision = TSDB_TIME_PRECISION_MICRO; - database->sml_precision = TSDB_SML_TIMESTAMP_MICRO_SECONDS; - } else if (0 == strcasecmp(cfg_object->valuestring, "ns")) { - database->precision = TSDB_TIME_PRECISION_NANO; - database->sml_precision = TSDB_SML_TIMESTAMP_NANO_SECONDS; - } - } - } else { - SDbCfg* cfg = benchCalloc(1, sizeof(SDbCfg), true); - cfg->name = cfg_object->string; - - // get duration value - if (0 == strcasecmp(cfg_object->string, "duration")) { - database->durMinute = getDurationVal(cfg_object); - } - - if (tools_cJSON_IsString(cfg_object)) { - cfg->valuestring = cfg_object->valuestring; - } else if (tools_cJSON_IsNumber(cfg_object)) { - cfg->valueint = (int)cfg_object->valueint; - cfg->valuestring = NULL; - } else { - errorPrint("Invalid value format for %s\n", cfg->name); - free(cfg); - return -1; - } - benchArrayPush(database->cfgs, cfg); - } - cfg_object = cfg_object->next; - } - - // set default - if (database->durMinute == 0) { - database->durMinute = TSDB_DEFAULT_DURATION_PER_FILE; - } - - if (database->dbName == NULL) { - errorPrint("%s", "miss name in dbinfo\n"); - return -1; - } - - return 0; -} - -static int get_tsma_info(tools_cJSON* stb_obj, SSuperTable* stbInfo) { - stbInfo->tsmas = benchArrayInit(1, sizeof(TSMA)); - tools_cJSON* tsmas_obj = tools_cJSON_GetObjectItem(stb_obj, "tsmas"); - if (tsmas_obj == NULL) { - return 0; - } - if (!tools_cJSON_IsArray(tsmas_obj)) { - errorPrint("%s", "invalid tsmas format in json\n"); - return -1; - } - for (int i = 0; i < tools_cJSON_GetArraySize(tsmas_obj); ++i) { - tools_cJSON* tsma_obj = tools_cJSON_GetArrayItem(tsmas_obj, i); - if (!tools_cJSON_IsObject(tsma_obj)) { - errorPrint("%s", "Invalid tsma format in json\n"); - return -1; - } - TSMA* tsma = benchCalloc(1, sizeof(TSMA), true); - if (NULL == tsma) { - errorPrint("%s() failed to allocate memory\n", __func__); - } - tools_cJSON* tsma_name_obj = tools_cJSON_GetObjectItem(tsma_obj, - "name"); - if (!tools_cJSON_IsString(tsma_name_obj)) { - errorPrint("%s", "Invalid tsma name format in json\n"); - free(tsma); - return -1; - } - tsma->name = tsma_name_obj->valuestring; - - tools_cJSON* tsma_func_obj = - tools_cJSON_GetObjectItem(tsma_obj, "function"); - if (!tools_cJSON_IsString(tsma_func_obj)) { - errorPrint("%s", "Invalid tsma function format in json\n"); - free(tsma); - return -1; - } - tsma->func = tsma_func_obj->valuestring; - - tools_cJSON* tsma_interval_obj = - tools_cJSON_GetObjectItem(tsma_obj, "interval"); - if (!tools_cJSON_IsString(tsma_interval_obj)) { - errorPrint("%s", "Invalid tsma interval format in json\n"); - free(tsma); - return -1; - } - tsma->interval = tsma_interval_obj->valuestring; - - tools_cJSON* tsma_sliding_obj = - tools_cJSON_GetObjectItem(tsma_obj, "sliding"); - if (!tools_cJSON_IsString(tsma_sliding_obj)) { - errorPrint("%s", "Invalid tsma sliding format in json\n"); - free(tsma); - return -1; - } - tsma->sliding = tsma_sliding_obj->valuestring; - - tools_cJSON* tsma_custom_obj = - tools_cJSON_GetObjectItem(tsma_obj, "custom"); - tsma->custom = tsma_custom_obj->valuestring; - - tools_cJSON* tsma_start_obj = - tools_cJSON_GetObjectItem(tsma_obj, "start_when_inserted"); - if (!tools_cJSON_IsNumber(tsma_start_obj)) { - tsma->start_when_inserted = 0; - } else { - tsma->start_when_inserted = (int)tsma_start_obj->valueint; - } - - benchArrayPush(stbInfo->tsmas, tsma); - } - - return 0; -} - -void parseStringToIntArray(char *str, BArray *arr) { - benchArrayClear(arr); - if (NULL == strstr(str, ",")) { - int *val = benchCalloc(1, sizeof(int), true); - *val = atoi(str); - benchArrayPush(arr, val); - } else { - char *dup_str = strdup(str); - char *running = dup_str; - char *token = strsep(&running, ","); - while (token) { - int *val = benchCalloc(1, sizeof(int), true); - *val = atoi(token); - benchArrayPush(arr, val); - token = strsep(&running, ","); - } - tmfree(dup_str); - } -} - -static int getStableInfo(tools_cJSON *dbinfos, int index) { - SDataBase *database = benchArrayGet(g_arguments->databases, index); - tools_cJSON *dbinfo = tools_cJSON_GetArrayItem(dbinfos, index); - tools_cJSON *stables = tools_cJSON_GetObjectItem(dbinfo, "super_tables"); - if (!tools_cJSON_IsArray(stables)) { - infoPrint("create database %s without stables\n", database->dbName); - return 0; - } - for (int i = 0; i < tools_cJSON_GetArraySize(stables); ++i) { - SSuperTable *superTable; - if (index > 0 || i > 0) { - superTable = benchCalloc(1, sizeof(SSuperTable), true); - benchArrayPush(database->superTbls, superTable); - superTable = benchArrayGet(database->superTbls, i); - superTable->cols = benchArrayInit(1, sizeof(Field)); - superTable->tags = benchArrayInit(1, sizeof(Field)); - } else { - superTable = benchArrayGet(database->superTbls, i); - } - superTable->autoTblCreating = false; - superTable->batchTblCreatingNum = DEFAULT_CREATE_BATCH; - superTable->batchTblCreatingNumbers = NULL; - superTable->batchTblCreatingIntervals = NULL; - superTable->childTblExists = false; - superTable->random_data_source = true; - superTable->iface = TAOSC_IFACE; - superTable->lineProtocol = TSDB_SML_LINE_PROTOCOL; - superTable->tcpTransfer = false; - superTable->childTblOffset = 0; - superTable->timestamp_step = 1; - superTable->useSampleTs = false; - superTable->non_stop = false; - superTable->insertRows = 0; - superTable->interlaceRows = 0; - superTable->disorderRatio = 0; - superTable->disorderRange = DEFAULT_DISORDER_RANGE; - superTable->insert_interval = g_arguments->insert_interval; - superTable->max_sql_len = TSDB_MAX_ALLOWED_SQL_LEN; - superTable->partialColNum = 0; - superTable->comment = NULL; - superTable->delay = -1; - superTable->file_factor = -1; - superTable->rollup = NULL; - tools_cJSON *stbInfo = tools_cJSON_GetArrayItem(stables, i); - tools_cJSON *itemObj; - - tools_cJSON *stbName = tools_cJSON_GetObjectItem(stbInfo, "name"); - if (tools_cJSON_IsString(stbName)) { - superTable->stbName = stbName->valuestring; - } - - tools_cJSON *prefix = - tools_cJSON_GetObjectItem(stbInfo, "childtable_prefix"); - if (tools_cJSON_IsString(prefix)) { - superTable->childTblPrefix = prefix->valuestring; - } - tools_cJSON *childTbleSample = - tools_cJSON_GetObjectItem(stbInfo, "childtable_sample_file"); - if (tools_cJSON_IsString(childTbleSample)) { - superTable->childTblSample = childTbleSample->valuestring; - } - tools_cJSON *autoCreateTbl = - tools_cJSON_GetObjectItem(stbInfo, "auto_create_table"); - if (tools_cJSON_IsString(autoCreateTbl) - && (0 == strcasecmp(autoCreateTbl->valuestring, "yes"))) { - superTable->autoTblCreating = true; - } - tools_cJSON *batchCreateTbl = - tools_cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); - if (tools_cJSON_IsNumber(batchCreateTbl)) { - superTable->batchTblCreatingNum = batchCreateTbl->valueint; - } - tools_cJSON *batchTblCreatingNumbers = - tools_cJSON_GetObjectItem(stbInfo, "batch_create_tbl_numbers"); - if (tools_cJSON_IsString(batchTblCreatingNumbers)) { - superTable->batchTblCreatingNumbers - = batchTblCreatingNumbers->valuestring; - superTable->batchTblCreatingNumbersArray = - benchArrayInit(1, sizeof(int)); - parseStringToIntArray(superTable->batchTblCreatingNumbers, - superTable->batchTblCreatingNumbersArray); - } - tools_cJSON *batchTblCreatingIntervals = - tools_cJSON_GetObjectItem(stbInfo, "batch_create_tbl_intervals"); - if (tools_cJSON_IsString(batchTblCreatingIntervals)) { - superTable->batchTblCreatingIntervals - = batchTblCreatingIntervals->valuestring; - superTable->batchTblCreatingIntervalsArray = - benchArrayInit(1, sizeof(int)); - parseStringToIntArray(superTable->batchTblCreatingIntervals, - superTable->batchTblCreatingIntervalsArray); - } - tools_cJSON *childTblExists = - tools_cJSON_GetObjectItem(stbInfo, "child_table_exists"); - if (tools_cJSON_IsString(childTblExists) - && (0 == strcasecmp(childTblExists->valuestring, "yes")) - && !database->drop) { - superTable->childTblExists = true; - superTable->autoTblCreating = false; - } - - tools_cJSON *childTableCount = - tools_cJSON_GetObjectItem(stbInfo, "childtable_count"); - if (tools_cJSON_IsNumber(childTableCount)) { - superTable->childTblCount = childTableCount->valueint; - g_arguments->totalChildTables += superTable->childTblCount; - } else { - superTable->childTblCount = 0; - g_arguments->totalChildTables += superTable->childTblCount; - } - - tools_cJSON *dataSource = - tools_cJSON_GetObjectItem(stbInfo, "data_source"); - if (tools_cJSON_IsString(dataSource) - && (0 == strcasecmp(dataSource->valuestring, "sample"))) { - superTable->random_data_source = false; - } - - tools_cJSON *stbIface = - tools_cJSON_GetObjectItem(stbInfo, "insert_mode"); - if (tools_cJSON_IsString(stbIface)) { - if (0 == strcasecmp(stbIface->valuestring, "rest")) { - superTable->iface = REST_IFACE; - } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { - superTable->iface = STMT_IFACE; - if (g_arguments->reqPerReq > INT16_MAX) { - g_arguments->reqPerReq = INT16_MAX; - } - if (g_arguments->reqPerReq > g_arguments->prepared_rand) { - g_arguments->prepared_rand = g_arguments->reqPerReq; - } - } else if (0 == strcasecmp(stbIface->valuestring, "sml")) { - if (g_arguments->reqPerReq > SML_MAX_BATCH) { - errorPrint("reqPerReq (%u) larger than maximum (%d)\n", - g_arguments->reqPerReq, SML_MAX_BATCH); - return -1; - } - superTable->iface = SML_IFACE; - } else if (0 == strcasecmp(stbIface->valuestring, "sml-rest")) { - if (g_arguments->reqPerReq > SML_MAX_BATCH) { - errorPrint("reqPerReq (%u) larger than maximum (%d)\n", - g_arguments->reqPerReq, SML_MAX_BATCH); - return -1; - } - superTable->iface = SML_REST_IFACE; - if (0 != convertServAddr(REST_IFACE, - false, - 1)) { - errorPrint("%s", "Failed to convert server address\n"); - return -1; - } - encodeAuthBase64(); - g_arguments->rest_server_ver_major = - getServerVersionRest(g_arguments->port + TSDB_PORT_HTTP); - } - } - -#ifdef WEBSOCKET - if (g_arguments->websocket) { - infoPrint("Since WebSocket interface is enabled, " - "the interface %s is changed to use WebSocket.\n", - stbIface->valuestring); - superTable->iface = TAOSC_IFACE; - } -#endif - - tools_cJSON *stbLineProtocol = - tools_cJSON_GetObjectItem(stbInfo, "line_protocol"); - if (tools_cJSON_IsString(stbLineProtocol)) { - if (0 == strcasecmp(stbLineProtocol->valuestring, "line")) { - superTable->lineProtocol = TSDB_SML_LINE_PROTOCOL; - } else if (0 == strcasecmp(stbLineProtocol->valuestring, - "telnet")) { - superTable->lineProtocol = TSDB_SML_TELNET_PROTOCOL; - } else if (0 == strcasecmp(stbLineProtocol->valuestring, "json")) { - superTable->lineProtocol = TSDB_SML_JSON_PROTOCOL; - } else if (0 == strcasecmp( - stbLineProtocol->valuestring, "taosjson")) { - superTable->lineProtocol = SML_JSON_TAOS_FORMAT; - } - } - tools_cJSON *transferProtocol = - tools_cJSON_GetObjectItem(stbInfo, "tcp_transfer"); - if (tools_cJSON_IsString(transferProtocol) - && (0 == strcasecmp(transferProtocol->valuestring, "yes"))) { - superTable->tcpTransfer = true; - } - tools_cJSON *childTbl_limit = - tools_cJSON_GetObjectItem(stbInfo, "childtable_limit"); - if (tools_cJSON_IsNumber(childTbl_limit)) { - if (childTbl_limit->valueint >= 0) { - superTable->childTblLimit = childTbl_limit->valueint; - if (superTable->childTblLimit > superTable->childTblCount) { - warnPrint("child table limit %"PRId64" " - "is more than %"PRId64", set to %"PRId64"\n", - childTbl_limit->valueint, - superTable->childTblCount, - superTable->childTblCount); - superTable->childTblLimit = superTable->childTblCount; - } - } else { - warnPrint("child table limit %"PRId64" is invalid, " - "set to %"PRId64"\n", - childTbl_limit->valueint, - superTable->childTblCount); - superTable->childTblLimit = superTable->childTblCount; - } - } - tools_cJSON *childTbl_offset = - tools_cJSON_GetObjectItem(stbInfo, "childtable_offset"); - if (tools_cJSON_IsNumber(childTbl_offset)) { - superTable->childTblOffset = childTbl_offset->valueint; - } - tools_cJSON *childTbl_from = - tools_cJSON_GetObjectItem(stbInfo, "childtable_from"); - if (tools_cJSON_IsNumber(childTbl_from)) { - if (childTbl_from->valueint >= 0) { - superTable->childTblFrom = childTbl_from->valueint; - } else { - warnPrint("child table _from_ %"PRId64" is invalid, set to 0\n", - childTbl_from->valueint); - superTable->childTblFrom = 0; - } - } - tools_cJSON *childTbl_to = - tools_cJSON_GetObjectItem(stbInfo, "childtable_to"); - if (tools_cJSON_IsNumber(childTbl_to)) { - superTable->childTblTo = childTbl_to->valueint; - if (superTable->childTblTo < superTable->childTblFrom) { - errorPrint("child table _to_ is invalid number," - "%"PRId64" < %"PRId64"\n", - superTable->childTblTo, superTable->childTblFrom); - return -1; - } - } - - tools_cJSON *continueIfFail = - tools_cJSON_GetObjectItem(stbInfo, "continue_if_fail"); // yes, no, - if (tools_cJSON_IsString(continueIfFail)) { - if (0 == strcasecmp(continueIfFail->valuestring, "no")) { - superTable->continueIfFail = NO_IF_FAILED; - } else if (0 == strcasecmp(continueIfFail->valuestring, "yes")) { - superTable->continueIfFail = YES_IF_FAILED; - } else if (0 == strcasecmp(continueIfFail->valuestring, "smart")) { - superTable->continueIfFail = SMART_IF_FAILED; - } else { - errorPrint("cointinue_if_fail has unknown mode %s\n", - continueIfFail->valuestring); - return -1; - } - } - - tools_cJSON *ts = tools_cJSON_GetObjectItem(stbInfo, "start_timestamp"); - if (tools_cJSON_IsString(ts)) { - if (0 == strcasecmp(ts->valuestring, "now")) { - superTable->startTimestamp = - toolsGetTimestamp(database->precision); - superTable->useNow = true; - // fill time with now conflict with check_sql - g_arguments->check_sql = false; - } else { - if (toolsParseTime(ts->valuestring, - &(superTable->startTimestamp), - (int32_t)strlen(ts->valuestring), - database->precision, 0)) { - errorPrint("failed to parse time %s\n", - ts->valuestring); - return -1; - } - } - } else { - if (tools_cJSON_IsNumber(ts)) { - superTable->startTimestamp = ts->valueint; - } else { - superTable->startTimestamp = - toolsGetTimestamp(database->precision); - } - } - - tools_cJSON *timestampStep = - tools_cJSON_GetObjectItem(stbInfo, "timestamp_step"); - if (tools_cJSON_IsNumber(timestampStep)) { - superTable->timestamp_step = timestampStep->valueint; - } - - tools_cJSON *keepTrying = - tools_cJSON_GetObjectItem(stbInfo, "keep_trying"); - if (tools_cJSON_IsNumber(keepTrying)) { - superTable->keep_trying = keepTrying->valueint; - } - - tools_cJSON *tryingInterval = - tools_cJSON_GetObjectItem(stbInfo, "trying_interval"); - if (tools_cJSON_IsNumber(tryingInterval)) { - superTable->trying_interval = (uint32_t)tryingInterval->valueint; - } - - tools_cJSON *sampleFile = - tools_cJSON_GetObjectItem(stbInfo, "sample_file"); - if (tools_cJSON_IsString(sampleFile)) { - tstrncpy( - superTable->sampleFile, sampleFile->valuestring, - MAX_FILE_NAME_LEN); - } else { - memset(superTable->sampleFile, 0, MAX_FILE_NAME_LEN); - } - - tools_cJSON *useSampleTs = - tools_cJSON_GetObjectItem(stbInfo, "use_sample_ts"); - if (tools_cJSON_IsString(useSampleTs) && - (0 == strcasecmp(useSampleTs->valuestring, "yes"))) { - superTable->useSampleTs = true; - } - - tools_cJSON *nonStop = - tools_cJSON_GetObjectItem(stbInfo, "non_stop_mode"); - if (tools_cJSON_IsString(nonStop) && - (0 == strcasecmp(nonStop->valuestring, "yes"))) { - superTable->non_stop = true; - } - - tools_cJSON* max_sql_len_obj = - tools_cJSON_GetObjectItem(stbInfo, "max_sql_len"); - if (tools_cJSON_IsNumber(max_sql_len_obj)) { - superTable->max_sql_len = max_sql_len_obj->valueint; - } - - tools_cJSON *tagsFile = - tools_cJSON_GetObjectItem(stbInfo, "tags_file"); - if (tools_cJSON_IsString(tagsFile)) { - tstrncpy(superTable->tagsFile, tagsFile->valuestring, - MAX_FILE_NAME_LEN); - } else { - memset(superTable->tagsFile, 0, MAX_FILE_NAME_LEN); - } - - tools_cJSON *insertRows = - tools_cJSON_GetObjectItem(stbInfo, "insert_rows"); - if (tools_cJSON_IsNumber(insertRows)) { - superTable->insertRows = insertRows->valueint; - } - - tools_cJSON *stbInterlaceRows = - tools_cJSON_GetObjectItem(stbInfo, "interlace_rows"); - if (tools_cJSON_IsNumber(stbInterlaceRows)) { - superTable->interlaceRows = (uint32_t)stbInterlaceRows->valueint; - } - - // disorder - tools_cJSON *disorderRatio = - tools_cJSON_GetObjectItem(stbInfo, "disorder_ratio"); - if (tools_cJSON_IsNumber(disorderRatio)) { - if (disorderRatio->valueint > 100) disorderRatio->valueint = 100; - if (disorderRatio->valueint < 0) disorderRatio->valueint = 0; - - superTable->disorderRatio = (int)disorderRatio->valueint; - superTable->disRatio = (uint8_t)disorderRatio->valueint; - } - tools_cJSON *disorderRange = - tools_cJSON_GetObjectItem(stbInfo, "disorder_range"); - if (tools_cJSON_IsNumber(disorderRange)) { - superTable->disorderRange = (int)disorderRange->valueint; - superTable->disRange = disorderRange->valueint; - } - tools_cJSON *disFill = - tools_cJSON_GetObjectItem(stbInfo, "disorder_fill_interval"); - if (tools_cJSON_IsNumber(disFill)) { - superTable->fillIntervalDis = (int)disFill->valueint; - } - - - // update - tools_cJSON *updRatio = - tools_cJSON_GetObjectItem(stbInfo, "update_ratio"); - if (tools_cJSON_IsNumber(updRatio)) { - if (updRatio->valueint > 100) updRatio->valueint = 100; - if (updRatio->valueint < 0) updRatio->valueint = 0; - superTable->updRatio = (int8_t)updRatio->valueint; - } - tools_cJSON *updFill = - tools_cJSON_GetObjectItem(stbInfo, "update_fill_interval"); - if (tools_cJSON_IsNumber(updFill)) { - superTable->fillIntervalUpd = (uint64_t)updFill->valueint; - } - - // delete - tools_cJSON *delRatio = - tools_cJSON_GetObjectItem(stbInfo, "delete_ratio"); - if (tools_cJSON_IsNumber(delRatio)) { - if (delRatio->valueint > 100) delRatio->valueint = 100; - if (delRatio->valueint < 0) delRatio->valueint = 0; - superTable->delRatio = (int8_t)delRatio->valueint; - } - - // generate row rule - tools_cJSON *rowRule = - tools_cJSON_GetObjectItem(stbInfo, "generate_row_rule"); - if (tools_cJSON_IsNumber(rowRule)) { - superTable->genRowRule = (int8_t)rowRule->valueint; - } - - // binary prefix - tools_cJSON *binPrefix = - tools_cJSON_GetObjectItem(stbInfo, "binary_prefix"); - if (tools_cJSON_IsString(binPrefix)) { - superTable->binaryPrefex = binPrefix->valuestring; - } else { - superTable->binaryPrefex = NULL; - } - - // nchar prefix - tools_cJSON *ncharPrefix = - tools_cJSON_GetObjectItem(stbInfo, "nchar_prefix"); - if (tools_cJSON_IsString(ncharPrefix)) { - superTable->ncharPrefex = ncharPrefix->valuestring; - } else { - superTable->ncharPrefex = NULL; - } - - // write future random - itemObj = tools_cJSON_GetObjectItem(stbInfo, "random_write_future"); - if (tools_cJSON_IsString(itemObj) - && (0 == strcasecmp(itemObj->valuestring, "yes"))) { - superTable->writeFuture = true; - } - - // check_correct_interval - itemObj = tools_cJSON_GetObjectItem(stbInfo, "check_correct_interval"); - if (tools_cJSON_IsNumber(itemObj)) { - superTable->checkInterval = itemObj->valueint; - } - - tools_cJSON *insertInterval = - tools_cJSON_GetObjectItem(stbInfo, "insert_interval"); - if (tools_cJSON_IsNumber(insertInterval)) { - superTable->insert_interval = insertInterval->valueint; - } - - tools_cJSON *pPartialColNum = - tools_cJSON_GetObjectItem(stbInfo, "partial_col_num"); - if (tools_cJSON_IsNumber(pPartialColNum)) { - superTable->partialColNum = pPartialColNum->valueint; - } - - if (g_arguments->taosc_version == 3) { - tools_cJSON *delay = tools_cJSON_GetObjectItem(stbInfo, "delay"); - if (tools_cJSON_IsNumber(delay)) { - superTable->delay = (int)delay->valueint; - } - - tools_cJSON *file_factor = - tools_cJSON_GetObjectItem(stbInfo, "file_factor"); - if (tools_cJSON_IsNumber(file_factor)) { - superTable->file_factor = (int)file_factor->valueint; - } - - tools_cJSON *rollup = tools_cJSON_GetObjectItem(stbInfo, "rollup"); - if (tools_cJSON_IsString(rollup)) { - superTable->rollup = rollup->valuestring; - } - - tools_cJSON *ttl = tools_cJSON_GetObjectItem(stbInfo, "ttl"); - if (tools_cJSON_IsNumber(ttl)) { - superTable->ttl = (int)ttl->valueint; - } - - tools_cJSON *max_delay_obj = - tools_cJSON_GetObjectItem(stbInfo, "max_delay"); - if (tools_cJSON_IsString(max_delay_obj)) { - superTable->max_delay = max_delay_obj->valuestring; - } - - tools_cJSON *watermark_obj = - tools_cJSON_GetObjectItem(stbInfo, "watermark"); - if (tools_cJSON_IsString(watermark_obj)) { - superTable->watermark = watermark_obj->valuestring; - } - - if (get_tsma_info(stbInfo, superTable)) { - return -1; - } - } - - if (getColumnAndTagTypeFromInsertJsonFile(stbInfo, superTable)) { - return -1; - } - } - return 0; -} - -static int getStreamInfo(tools_cJSON* json) { - tools_cJSON* streamsObj = tools_cJSON_GetObjectItem(json, "streams"); - if (tools_cJSON_IsArray(streamsObj)) { - int streamCnt = tools_cJSON_GetArraySize(streamsObj); - for (int i = 0; i < streamCnt; ++i) { - tools_cJSON* streamObj = tools_cJSON_GetArrayItem(streamsObj, i); - if (!tools_cJSON_IsObject(streamObj)) { - errorPrint("%s", "invalid stream format in json\n"); - return -1; - } - tools_cJSON* stream_name = - tools_cJSON_GetObjectItem(streamObj, "stream_name"); - tools_cJSON* stream_stb = - tools_cJSON_GetObjectItem(streamObj, "stream_stb"); - tools_cJSON* source_sql = - tools_cJSON_GetObjectItem(streamObj, "source_sql"); - if (!tools_cJSON_IsString(stream_name) - || !tools_cJSON_IsString(stream_stb) - || !tools_cJSON_IsString(source_sql)) { - errorPrint("%s", "Invalid or miss " - "'stream_name'/'stream_stb'/'source_sql' " - "key in json\n"); - return -1; - } - SSTREAM * stream = benchCalloc(1, sizeof(SSTREAM), true); - tstrncpy(stream->stream_name, stream_name->valuestring, - TSDB_TABLE_NAME_LEN); - tstrncpy(stream->stream_stb, stream_stb->valuestring, - TSDB_TABLE_NAME_LEN); - tstrncpy(stream->source_sql, source_sql->valuestring, - TSDB_DEFAULT_PKT_SIZE); - - tools_cJSON* trigger_mode = - tools_cJSON_GetObjectItem(streamObj, "trigger_mode"); - if (tools_cJSON_IsString(trigger_mode)) { - tstrncpy(stream->trigger_mode, trigger_mode->valuestring, - BIGINT_BUFF_LEN); - } - - tools_cJSON* watermark = - tools_cJSON_GetObjectItem(streamObj, "watermark"); - if (tools_cJSON_IsString(watermark)) { - tstrncpy(stream->watermark, watermark->valuestring, - BIGINT_BUFF_LEN); - } - - tools_cJSON* ignore_expired = - tools_cJSON_GetObjectItem(streamObj, "ignore_expired"); - if (tools_cJSON_IsString(ignore_expired)) { - tstrncpy(stream->ignore_expired, ignore_expired->valuestring, - BIGINT_BUFF_LEN); - } - - tools_cJSON* ignore_update = - tools_cJSON_GetObjectItem(streamObj, "ignore_update"); - if (tools_cJSON_IsString(ignore_update)) { - tstrncpy(stream->ignore_update, ignore_update->valuestring, - BIGINT_BUFF_LEN); - } - - tools_cJSON* fill_history = - tools_cJSON_GetObjectItem(streamObj, "fill_history"); - if (tools_cJSON_IsString(fill_history)) { - tstrncpy(stream->fill_history, fill_history->valuestring, - BIGINT_BUFF_LEN); - } - - tools_cJSON* stream_stb_field = - tools_cJSON_GetObjectItem(streamObj, "stream_stb_field"); - if (tools_cJSON_IsString(stream_stb_field)) { - tstrncpy(stream->stream_stb_field, - stream_stb_field->valuestring, - TSDB_DEFAULT_PKT_SIZE); - } - - tools_cJSON* stream_tag_field = - tools_cJSON_GetObjectItem(streamObj, "stream_tag_field"); - if (tools_cJSON_IsString(stream_tag_field)) { - tstrncpy(stream->stream_tag_field, - stream_tag_field->valuestring, - TSDB_DEFAULT_PKT_SIZE); - } - - tools_cJSON* subtable = - tools_cJSON_GetObjectItem(streamObj, "subtable"); - if (tools_cJSON_IsString(subtable)) { - tstrncpy(stream->subtable, subtable->valuestring, - TSDB_DEFAULT_PKT_SIZE); - } - - tools_cJSON* drop = tools_cJSON_GetObjectItem(streamObj, "drop"); - if (tools_cJSON_IsString(drop)) { - if (0 == strcasecmp(drop->valuestring, "yes")) { - stream->drop = true; - } else if (0 == strcasecmp(drop->valuestring, "no")) { - stream->drop = false; - } else { - errorPrint("invalid value for drop field: %s\n", - drop->valuestring); - return -1; - } - } - benchArrayPush(g_arguments->streams, stream); - } - } - return 0; -} - -// read common item -static int getMetaFromCommonJsonFile(tools_cJSON *json) { - int32_t code = -1; - tools_cJSON *cfgdir = tools_cJSON_GetObjectItem(json, "cfgdir"); - if (cfgdir && (cfgdir->type == tools_cJSON_String) - && (cfgdir->valuestring != NULL)) { - tstrncpy(g_configDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - - tools_cJSON *host = tools_cJSON_GetObjectItem(json, "host"); - if (host && host->type == tools_cJSON_String && host->valuestring != NULL) { - g_arguments->host = host->valuestring; - } - - tools_cJSON *port = tools_cJSON_GetObjectItem(json, "port"); - if (port && port->type == tools_cJSON_Number) { - g_arguments->port = (uint16_t)port->valueint; - } - - tools_cJSON *user = tools_cJSON_GetObjectItem(json, "user"); - if (user && user->type == tools_cJSON_String && user->valuestring != NULL) { - g_arguments->user = user->valuestring; - } - - tools_cJSON *password = tools_cJSON_GetObjectItem(json, "password"); - if (password && password->type == tools_cJSON_String && - password->valuestring != NULL) { - g_arguments->password = password->valuestring; - } - - tools_cJSON *answerPrompt = - tools_cJSON_GetObjectItem(json, - "confirm_parameter_prompt"); // yes, no, - if (answerPrompt && answerPrompt->type == tools_cJSON_String - && answerPrompt->valuestring != NULL) { - if (0 == strcasecmp(answerPrompt->valuestring, "no")) { - g_arguments->answer_yes = true; - } - } - - tools_cJSON *continueIfFail = - tools_cJSON_GetObjectItem(json, "continue_if_fail"); // yes, no, - if (tools_cJSON_IsString(continueIfFail)) { - if (0 == strcasecmp(continueIfFail->valuestring, "no")) { - g_arguments->continueIfFail = NO_IF_FAILED; - } else if (0 == strcasecmp(continueIfFail->valuestring, "yes")) { - g_arguments->continueIfFail = YES_IF_FAILED; - } else if (0 == strcasecmp(continueIfFail->valuestring, "smart")) { - g_arguments->continueIfFail = SMART_IF_FAILED; - } else { - errorPrint("cointinue_if_fail has unknown mode %s\n", - continueIfFail->valuestring); - return -1; - } - } - - code = 0; - return code; -} - -static int getMetaFromInsertJsonFile(tools_cJSON *json) { - int32_t code = -1; - -#ifdef WEBSOCKET - tools_cJSON *dsn = tools_cJSON_GetObjectItem(json, "dsn"); - if (tools_cJSON_IsString(dsn)) { - g_arguments->dsn = dsn->valuestring; - g_arguments->websocket = true; - } -#endif - - // check after inserted - tools_cJSON *checkSql = tools_cJSON_GetObjectItem(json, "check_sql"); - if (tools_cJSON_IsString(checkSql)) { - if (0 == strcasecmp(checkSql->valuestring, "yes")) { - g_arguments->check_sql = true; - } - } - - tools_cJSON *resultfile = tools_cJSON_GetObjectItem(json, "result_file"); - if (resultfile && resultfile->type == tools_cJSON_String - && resultfile->valuestring != NULL) { - g_arguments->output_file = resultfile->valuestring; - } - - tools_cJSON *threads = tools_cJSON_GetObjectItem(json, "thread_count"); - if (threads && threads->type == tools_cJSON_Number) { - g_arguments->nthreads = (uint32_t)threads->valueint; - } - - tools_cJSON *keepTrying = tools_cJSON_GetObjectItem(json, "keep_trying"); - if (keepTrying && keepTrying->type == tools_cJSON_Number) { - g_arguments->keep_trying = (int32_t)keepTrying->valueint; - } - - tools_cJSON *tryingInterval = - tools_cJSON_GetObjectItem(json, "trying_interval"); - if (tryingInterval && tryingInterval->type == tools_cJSON_Number) { - g_arguments->trying_interval = (uint32_t)tryingInterval->valueint; - } - - tools_cJSON *table_theads = - tools_cJSON_GetObjectItem(json, "create_table_thread_count"); - if (tools_cJSON_IsNumber(table_theads)) { - g_arguments->table_threads = (uint32_t)table_theads->valueint; - } - -#ifdef WEBSOCKET - if (!g_arguments->websocket) { -#endif -#ifdef LINUX - if (strlen(g_configDir)) { - wordexp_t full_path; - if (wordexp(g_configDir, &full_path, 0) != 0) { - errorPrint("Invalid path %s\n", g_configDir); - exit(EXIT_FAILURE); - } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); - } -#endif -#ifdef WEBSOCKET - } -#endif - - tools_cJSON *numRecPerReq = - tools_cJSON_GetObjectItem(json, "num_of_records_per_req"); - if (numRecPerReq && numRecPerReq->type == tools_cJSON_Number) { - g_arguments->reqPerReq = (uint32_t)numRecPerReq->valueint; - if ((int32_t)g_arguments->reqPerReq <= 0) { - infoPrint("waring: num_of_records_per_req item in json config must over zero, current = %d. now reset to default. \n", g_arguments->reqPerReq); - g_arguments->reqPerReq = DEFAULT_REQ_PER_REQ; - } - - if (g_arguments->reqPerReq > 32768) { - infoPrint("warning: num_of_records_per_req item in json config need less than 32768. current = %d. now reset to default.\n", g_arguments->reqPerReq); - g_arguments->reqPerReq = DEFAULT_REQ_PER_REQ; - } - - } - - tools_cJSON *prepareRand = - tools_cJSON_GetObjectItem(json, "prepared_rand"); - if (prepareRand && prepareRand->type == tools_cJSON_Number) { - g_arguments->prepared_rand = prepareRand->valueint; - } - - tools_cJSON *chineseOpt = - tools_cJSON_GetObjectItem(json, "chinese"); // yes, no, - if (chineseOpt && chineseOpt->type == tools_cJSON_String - && chineseOpt->valuestring != NULL) { - if (0 == strncasecmp(chineseOpt->valuestring, "yes", 3)) { - g_arguments->chinese = true; - } - } - - tools_cJSON *escapeChar = - tools_cJSON_GetObjectItem(json, "escape_character"); // yes, no, - if (escapeChar && escapeChar->type == tools_cJSON_String - && escapeChar->valuestring != NULL) { - if (0 == strncasecmp(escapeChar->valuestring, "yes", 3)) { - g_arguments->escape_character = true; - } - } - - tools_cJSON *top_insertInterval = - tools_cJSON_GetObjectItem(json, "insert_interval"); - if (top_insertInterval && top_insertInterval->type == tools_cJSON_Number) { - g_arguments->insert_interval = top_insertInterval->valueint; - } - - tools_cJSON *insert_mode = tools_cJSON_GetObjectItem(json, "insert_mode"); - if (insert_mode && insert_mode->type == tools_cJSON_String - && insert_mode->valuestring != NULL) { - if (0 == strcasecmp(insert_mode->valuestring, "rest")) { - g_arguments->iface = REST_IFACE; - } - } - - tools_cJSON *dbinfos = tools_cJSON_GetObjectItem(json, "databases"); - if (!tools_cJSON_IsArray(dbinfos)) { - errorPrint("%s", "Invalid databases format in json\n"); - return -1; - } - int dbSize = tools_cJSON_GetArraySize(dbinfos); - - for (int i = 0; i < dbSize; ++i) { - if (getDatabaseInfo(dbinfos, i)) { - goto PARSE_OVER; - } - if (getStableInfo(dbinfos, i)) { - goto PARSE_OVER; - } - } - - if (g_arguments->taosc_version == 3) { - if (getStreamInfo(json)) { - goto PARSE_OVER; - } - } - - code = 0; - -PARSE_OVER: - return code; -} - -static int getMetaFromQueryJsonFile(tools_cJSON *json) { - int32_t code = -1; - - tools_cJSON *telnet_tcp_port = - tools_cJSON_GetObjectItem(json, "telnet_tcp_port"); - if (tools_cJSON_IsNumber(telnet_tcp_port)) { - g_arguments->telnet_tcp_port = (uint16_t)telnet_tcp_port->valueint; - } - - tools_cJSON *gQueryTimes = tools_cJSON_GetObjectItem(json, "query_times"); - if (tools_cJSON_IsNumber(gQueryTimes)) { - g_queryInfo.query_times = gQueryTimes->valueint; - } else { - g_queryInfo.query_times = 1; - } - - tools_cJSON *gKillSlowQueryThreshold = - tools_cJSON_GetObjectItem(json, "kill_slow_query_threshold"); - if (tools_cJSON_IsNumber(gKillSlowQueryThreshold)) { - g_queryInfo.killQueryThreshold = gKillSlowQueryThreshold->valueint; - } else { - g_queryInfo.killQueryThreshold = 0; - } - - tools_cJSON *gKillSlowQueryInterval = - tools_cJSON_GetObjectItem(json, "kill_slow_query_interval"); - if (tools_cJSON_IsNumber(gKillSlowQueryInterval)) { - g_queryInfo.killQueryInterval = gKillSlowQueryInterval ->valueint; - } else { - g_queryInfo.killQueryInterval = 1; /* by default, interval 1s */ - } - - tools_cJSON *resetCache = - tools_cJSON_GetObjectItem(json, "reset_query_cache"); - if (tools_cJSON_IsString(resetCache)) { - if (0 == strcasecmp(resetCache->valuestring, "yes")) { - g_queryInfo.reset_query_cache = true; - } - } else { - g_queryInfo.reset_query_cache = false; - } - - tools_cJSON *respBuffer = - tools_cJSON_GetObjectItem(json, "response_buffer"); - if (tools_cJSON_IsNumber(respBuffer)) { - g_queryInfo.response_buffer = respBuffer->valueint; - } else { - g_queryInfo.response_buffer = RESP_BUF_LEN; - } - - tools_cJSON *dbs = tools_cJSON_GetObjectItem(json, "databases"); - if (tools_cJSON_IsString(dbs)) { - g_queryInfo.dbName = dbs->valuestring; - } - - tools_cJSON *queryMode = tools_cJSON_GetObjectItem(json, "query_mode"); - if (tools_cJSON_IsString(queryMode)) { - if (0 == strcasecmp(queryMode->valuestring, "rest")) { - g_queryInfo.iface = REST_IFACE; - } else if (0 == strcasecmp(queryMode->valuestring, "taosc")) { - g_queryInfo.iface = TAOSC_IFACE; - } else { - errorPrint("Invalid query_mode value: %s\n", - queryMode->valuestring); - goto PARSE_OVER; - } - } - // init sqls - g_queryInfo.specifiedQueryInfo.sqls = benchArrayInit(1, sizeof(SSQL)); - - // specified_table_query - tools_cJSON *specifiedQuery = - tools_cJSON_GetObjectItem(json, "specified_table_query"); - g_queryInfo.specifiedQueryInfo.concurrent = 1; - if (tools_cJSON_IsObject(specifiedQuery)) { - tools_cJSON *queryInterval = - tools_cJSON_GetObjectItem(specifiedQuery, "query_interval"); - if (tools_cJSON_IsNumber(queryInterval)) { - g_queryInfo.specifiedQueryInfo.queryInterval = - queryInterval->valueint; - } else { - g_queryInfo.specifiedQueryInfo.queryInterval = 0; - } - - tools_cJSON *specifiedQueryTimes = - tools_cJSON_GetObjectItem(specifiedQuery, "query_times"); - if (tools_cJSON_IsNumber(specifiedQueryTimes)) { - g_queryInfo.specifiedQueryInfo.queryTimes = - specifiedQueryTimes->valueint; - } else { - g_queryInfo.specifiedQueryInfo.queryTimes = g_queryInfo.query_times; - } - - tools_cJSON *mixedQueryObj = - tools_cJSON_GetObjectItem(specifiedQuery, "mixed_query"); - if (tools_cJSON_IsString(mixedQueryObj)) { - if (0 == strcasecmp(mixedQueryObj->valuestring, "yes")) { - g_queryInfo.specifiedQueryInfo.mixed_query = true; - } else if (0 == strcasecmp(mixedQueryObj->valuestring, "no")) { - g_queryInfo.specifiedQueryInfo.mixed_query = false; - } else { - errorPrint("Invalid mixed_query value: %s\n", - mixedQueryObj->valuestring); - goto PARSE_OVER; - } - } - - tools_cJSON *concurrent = - tools_cJSON_GetObjectItem(specifiedQuery, "concurrent"); - if (tools_cJSON_IsNumber(concurrent)) { - g_queryInfo.specifiedQueryInfo.concurrent = - (uint32_t)concurrent->valueint; - } - - tools_cJSON *threads = - tools_cJSON_GetObjectItem(specifiedQuery, "threads"); - if (tools_cJSON_IsNumber(threads)) { - g_queryInfo.specifiedQueryInfo.concurrent = - (uint32_t)threads->valueint; - } - - tools_cJSON *specifiedAsyncMode = - tools_cJSON_GetObjectItem(specifiedQuery, "mode"); - if (tools_cJSON_IsString(specifiedAsyncMode)) { - if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { - g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; - } else { - g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; - } - } else { - g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; - } - - tools_cJSON *subscribe_interval = - tools_cJSON_GetObjectItem(specifiedQuery, "subscribe_interval"); - if (tools_cJSON_IsNumber(subscribe_interval)) { - g_queryInfo.specifiedQueryInfo.subscribeInterval = - subscribe_interval->valueint; - } else { - g_queryInfo.specifiedQueryInfo.subscribeInterval = - DEFAULT_SUB_INTERVAL; - } - - tools_cJSON *specifiedSubscribeTimes = - tools_cJSON_GetObjectItem(specifiedQuery, "subscribe_times"); - if (tools_cJSON_IsNumber(specifiedSubscribeTimes)) { - g_queryInfo.specifiedQueryInfo.subscribeTimes = - specifiedSubscribeTimes->valueint; - } else { - g_queryInfo.specifiedQueryInfo.subscribeTimes = - g_queryInfo.query_times; - } - - tools_cJSON *restart = - tools_cJSON_GetObjectItem(specifiedQuery, "restart"); - if (tools_cJSON_IsString(restart)) { - if (0 == strcmp("no", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = false; - } else { - g_queryInfo.specifiedQueryInfo.subscribeRestart = true; - } - } else { - g_queryInfo.specifiedQueryInfo.subscribeRestart = true; - } - - tools_cJSON *keepProgress = - tools_cJSON_GetObjectItem(specifiedQuery, "keepProgress"); - if (tools_cJSON_IsString(keepProgress)) { - if (0 == strcmp("yes", keepProgress->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1; - } else { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; - } - } else { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; - } - - // read sqls from file - tools_cJSON *sqlFileObj = - tools_cJSON_GetObjectItem(specifiedQuery, "sql_file"); - if (tools_cJSON_IsString(sqlFileObj)) { - FILE * fp = fopen(sqlFileObj->valuestring, "r"); - if (fp == NULL) { - errorPrint("failed to open file: %s\n", - sqlFileObj->valuestring); - goto PARSE_OVER; - } - char *buf = benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, true); - while (fgets(buf, TSDB_MAX_ALLOWED_SQL_LEN, fp)) { - SSQL * sql = benchCalloc(1, sizeof(SSQL), true); - benchArrayPush(g_queryInfo.specifiedQueryInfo.sqls, sql); - sql = benchArrayGet(g_queryInfo.specifiedQueryInfo.sqls, - g_queryInfo.specifiedQueryInfo.sqls->size - 1); - int bufLen = strlen(buf) + 1; - sql->command = benchCalloc(1, bufLen, true); - sql->delay_list = benchCalloc( - g_queryInfo.specifiedQueryInfo.queryTimes - * g_queryInfo.specifiedQueryInfo.concurrent, - sizeof(int64_t), true); - tstrncpy(sql->command, buf, bufLen - 1); - debugPrint("read file buffer: %s\n", sql->command); - memset(buf, 0, TSDB_MAX_ALLOWED_SQL_LEN); - } - free(buf); - fclose(fp); - } - // sqls - tools_cJSON *specifiedSqls = - tools_cJSON_GetObjectItem(specifiedQuery, "sqls"); - if (tools_cJSON_IsArray(specifiedSqls)) { - int specifiedSqlSize = tools_cJSON_GetArraySize(specifiedSqls); - for (int j = 0; j < specifiedSqlSize; ++j) { - tools_cJSON *sqlObj = - tools_cJSON_GetArrayItem(specifiedSqls, j); - if (tools_cJSON_IsObject(sqlObj)) { - SSQL * sql = benchCalloc(1, sizeof(SSQL), true); - benchArrayPush(g_queryInfo.specifiedQueryInfo.sqls, sql); - sql = benchArrayGet(g_queryInfo.specifiedQueryInfo.sqls, - g_queryInfo.specifiedQueryInfo.sqls->size -1); - sql->delay_list = benchCalloc( - g_queryInfo.specifiedQueryInfo.queryTimes - * g_queryInfo.specifiedQueryInfo.concurrent, - sizeof(int64_t), true); - - tools_cJSON *sqlStr = - tools_cJSON_GetObjectItem(sqlObj, "sql"); - if (tools_cJSON_IsString(sqlStr)) { - int strLen = strlen(sqlStr->valuestring) + 1; - sql->command = benchCalloc(1, strLen, true); - tstrncpy(sql->command, sqlStr->valuestring, strLen); - // default value is -1, which mean infinite loop - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; - tools_cJSON *endAfterConsume = - tools_cJSON_GetObjectItem(specifiedQuery, - "endAfterConsume"); - if (tools_cJSON_IsNumber(endAfterConsume)) { - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = - (int)endAfterConsume->valueint; - } - if (g_queryInfo.specifiedQueryInfo - .endAfterConsume[j] < -1) { - g_queryInfo.specifiedQueryInfo - .endAfterConsume[j] = -1; - } - - g_queryInfo.specifiedQueryInfo - .resubAfterConsume[j] = -1; - tools_cJSON *resubAfterConsume = - tools_cJSON_GetObjectItem( - specifiedQuery, "resubAfterConsume"); - if (tools_cJSON_IsNumber(resubAfterConsume)) { - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] - = (int)resubAfterConsume->valueint; - } - - if (g_queryInfo.specifiedQueryInfo - .resubAfterConsume[j] < -1) - g_queryInfo.specifiedQueryInfo - .resubAfterConsume[j] = -1; - - tools_cJSON *result = - tools_cJSON_GetObjectItem(sqlObj, "result"); - if (tools_cJSON_IsString(result)) { - tstrncpy(sql->result, result->valuestring, - MAX_FILE_NAME_LEN); - } else { - memset(sql->result, 0, MAX_FILE_NAME_LEN); - } - } else { - errorPrint("%s", "Invalid sql in json\n"); - goto PARSE_OVER; - } - } - } - } - } - - // super_table_query - tools_cJSON *superQuery = - tools_cJSON_GetObjectItem(json, "super_table_query"); - g_queryInfo.superQueryInfo.threadCnt = 1; - if (!superQuery || superQuery->type != tools_cJSON_Object) { - g_queryInfo.superQueryInfo.sqlCount = 0; - } else { - tools_cJSON *subrate = - tools_cJSON_GetObjectItem(superQuery, "query_interval"); - if (subrate && subrate->type == tools_cJSON_Number) { - g_queryInfo.superQueryInfo.queryInterval = subrate->valueint; - } else { - g_queryInfo.superQueryInfo.queryInterval = 0; - } - - tools_cJSON *superQueryTimes = - tools_cJSON_GetObjectItem(superQuery, "query_times"); - if (superQueryTimes && superQueryTimes->type == tools_cJSON_Number) { - g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; - } else { - g_queryInfo.superQueryInfo.queryTimes = g_queryInfo.query_times; - } - - tools_cJSON *concurrent = - tools_cJSON_GetObjectItem(superQuery, "concurrent"); - if (concurrent && concurrent->type == tools_cJSON_Number) { - g_queryInfo.superQueryInfo.threadCnt = - (uint32_t)concurrent->valueint; - } - - tools_cJSON *threads = tools_cJSON_GetObjectItem(superQuery, "threads"); - if (threads && threads->type == tools_cJSON_Number) { - g_queryInfo.superQueryInfo.threadCnt = (uint32_t)threads->valueint; - } - - tools_cJSON *stblname = - tools_cJSON_GetObjectItem(superQuery, "stblname"); - if (stblname && stblname->type == tools_cJSON_String - && stblname->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.stbName, - stblname->valuestring, - TSDB_TABLE_NAME_LEN); - } - - tools_cJSON *superAsyncMode = - tools_cJSON_GetObjectItem(superQuery, "mode"); - if (superAsyncMode && superAsyncMode->type == tools_cJSON_String - && superAsyncMode->valuestring != NULL) { - if (0 == strcmp("async", superAsyncMode->valuestring)) { - g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; - } else { - g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; - } - } else { - g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; - } - - tools_cJSON *superInterval = - tools_cJSON_GetObjectItem(superQuery, "interval"); - if (superInterval && superInterval->type == tools_cJSON_Number) { - g_queryInfo.superQueryInfo.subscribeInterval = - superInterval->valueint; - } else { - g_queryInfo.superQueryInfo.subscribeInterval = - DEFAULT_QUERY_INTERVAL; - } - - tools_cJSON *subrestart = - tools_cJSON_GetObjectItem(superQuery, "restart"); - if (subrestart && subrestart->type == tools_cJSON_String - && subrestart->valuestring != NULL) { - if (0 == strcmp("no", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = false; - } else { - g_queryInfo.superQueryInfo.subscribeRestart = true; - } - } else { - g_queryInfo.superQueryInfo.subscribeRestart = true; - } - - tools_cJSON *superkeepProgress = - tools_cJSON_GetObjectItem(superQuery, "keepProgress"); - if (superkeepProgress && superkeepProgress->type == tools_cJSON_String - && superkeepProgress->valuestring != NULL) { - if (0 == strcmp("yes", superkeepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; - } else { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } - } else { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } - - // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.endAfterConsume = -1; - tools_cJSON *superEndAfterConsume = - tools_cJSON_GetObjectItem(superQuery, "endAfterConsume"); - if (superEndAfterConsume && - superEndAfterConsume->type == tools_cJSON_Number) { - g_queryInfo.superQueryInfo.endAfterConsume = - (int)superEndAfterConsume->valueint; - } - if (g_queryInfo.superQueryInfo.endAfterConsume < -1) - g_queryInfo.superQueryInfo.endAfterConsume = -1; - - // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.resubAfterConsume = -1; - tools_cJSON *superResubAfterConsume = - tools_cJSON_GetObjectItem(superQuery, "resubAfterConsume"); - if ((superResubAfterConsume) && - (superResubAfterConsume->type == tools_cJSON_Number) && - (superResubAfterConsume->valueint >= 0)) { - g_queryInfo.superQueryInfo.resubAfterConsume = - (int)superResubAfterConsume->valueint; - } - if (g_queryInfo.superQueryInfo.resubAfterConsume < -1) - g_queryInfo.superQueryInfo.resubAfterConsume = -1; - - // supert table sqls - tools_cJSON *superSqls = tools_cJSON_GetObjectItem(superQuery, "sqls"); - if (!superSqls || superSqls->type != tools_cJSON_Array) { - g_queryInfo.superQueryInfo.sqlCount = 0; - } else { - int superSqlSize = tools_cJSON_GetArraySize(superSqls); - if (superSqlSize > MAX_QUERY_SQL_COUNT) { - errorPrint( - "failed to read json, query sql size overflow, max is %d\n", - MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.superQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - tools_cJSON *sql = tools_cJSON_GetArrayItem(superSqls, j); - if (sql == NULL) continue; - - tools_cJSON *sqlStr = tools_cJSON_GetObjectItem(sql, "sql"); - if (sqlStr && sqlStr->type == tools_cJSON_String) { - tstrncpy(g_queryInfo.superQueryInfo.sql[j], - sqlStr->valuestring, TSDB_MAX_ALLOWED_SQL_LEN); - } - - tools_cJSON *result = tools_cJSON_GetObjectItem(sql, "result"); - if (result != NULL && result->type == tools_cJSON_String - && result->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.result[j], - result->valuestring, MAX_FILE_NAME_LEN); - } else { - memset(g_queryInfo.superQueryInfo.result[j], 0, - MAX_FILE_NAME_LEN); - } - } - } - } - - code = 0; - -PARSE_OVER: - return code; -} - -#ifdef TD_VER_COMPATIBLE_3_0_0_0 -static int getMetaFromTmqJsonFile(tools_cJSON *json) { - int32_t code = -1; - - tools_cJSON *cfgdir = tools_cJSON_GetObjectItem(json, "cfgdir"); - if (tools_cJSON_IsString(cfgdir)) { - tstrncpy(g_configDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } - -#ifdef LINUX - if (strlen(g_configDir)) { - wordexp_t full_path; - if (wordexp(g_configDir, &full_path, 0) != 0) { - errorPrint("Invalid path %s\n", g_configDir); - exit(EXIT_FAILURE); - } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); - } -#endif - - tools_cJSON *resultfile = tools_cJSON_GetObjectItem(json, "result_file"); - if (resultfile && resultfile->type == tools_cJSON_String - && resultfile->valuestring != NULL) { - g_arguments->output_file = resultfile->valuestring; - } - - tools_cJSON *answerPrompt = - tools_cJSON_GetObjectItem(json, - "confirm_parameter_prompt"); // yes, no, - if (tools_cJSON_IsString(answerPrompt)) { - if (0 == strcasecmp(answerPrompt->valuestring, "no")) { - g_arguments->answer_yes = true; - } - } - - // consumer info - tools_cJSON *tmqInfo = tools_cJSON_GetObjectItem(json, "tmq_info"); - g_tmqInfo.consumerInfo.concurrent = 1; - - tools_cJSON *concurrent = tools_cJSON_GetObjectItem(tmqInfo, "concurrent"); - if (tools_cJSON_IsNumber(concurrent)) { - g_tmqInfo.consumerInfo.concurrent = (uint32_t)concurrent->valueint; - } - - tools_cJSON *pollDelay = tools_cJSON_GetObjectItem(tmqInfo, "poll_delay"); - if (tools_cJSON_IsNumber(pollDelay)) { - g_tmqInfo.consumerInfo.pollDelay = (uint32_t)pollDelay->valueint; - } - - tools_cJSON *autoCommitInterval = tools_cJSON_GetObjectItem( - tmqInfo, "auto.commit.interval.ms"); - if (tools_cJSON_IsNumber(autoCommitInterval)) { - g_tmqInfo.consumerInfo.autoCommitIntervalMs = - (uint32_t)autoCommitInterval->valueint; - } - - tools_cJSON *groupId = tools_cJSON_GetObjectItem(tmqInfo, "group.id"); - if (tools_cJSON_IsString(groupId)) { - g_tmqInfo.consumerInfo.groupId = groupId->valuestring; - } - - tools_cJSON *clientId = tools_cJSON_GetObjectItem(tmqInfo, "client.id"); - if (tools_cJSON_IsString(clientId)) { - g_tmqInfo.consumerInfo.clientId = clientId->valuestring; - } - - tools_cJSON *autoOffsetReset = tools_cJSON_GetObjectItem( - tmqInfo, "auto.offset.reset"); - if (tools_cJSON_IsString(autoOffsetReset)) { - g_tmqInfo.consumerInfo.autoOffsetReset = autoOffsetReset->valuestring; - } - - tools_cJSON *enableAutoCommit = tools_cJSON_GetObjectItem( - tmqInfo, "enable.auto.commit"); - if (tools_cJSON_IsString(enableAutoCommit)) { - g_tmqInfo.consumerInfo.enableAutoCommit = enableAutoCommit->valuestring; - } - - tools_cJSON *enableManualCommit = tools_cJSON_GetObjectItem( - tmqInfo, "enable.manual.commit"); - if (tools_cJSON_IsString(enableManualCommit)) { - g_tmqInfo.consumerInfo.enableManualCommit = - enableManualCommit->valuestring; - } - - tools_cJSON *enableHeartbeatBackground = tools_cJSON_GetObjectItem( - tmqInfo, "enable.heartbeat.background"); - if (tools_cJSON_IsString(enableHeartbeatBackground)) { - g_tmqInfo.consumerInfo.enableHeartbeatBackground = - enableHeartbeatBackground->valuestring; - } - - tools_cJSON *snapshotEnable = tools_cJSON_GetObjectItem( - tmqInfo, "experimental.snapshot.enable"); - if (tools_cJSON_IsString(snapshotEnable)) { - g_tmqInfo.consumerInfo.snapshotEnable = snapshotEnable->valuestring; - } - - tools_cJSON *msgWithTableName = tools_cJSON_GetObjectItem( - tmqInfo, "msg.with.table.name"); - if (tools_cJSON_IsString(msgWithTableName)) { - g_tmqInfo.consumerInfo.msgWithTableName = msgWithTableName->valuestring; - } - - tools_cJSON *rowsFile = tools_cJSON_GetObjectItem(tmqInfo, "rows_file"); - if (tools_cJSON_IsString(rowsFile)) { - g_tmqInfo.consumerInfo.rowsFile = rowsFile->valuestring; - } - - g_tmqInfo.consumerInfo.expectRows = -1; - tools_cJSON *expectRows = tools_cJSON_GetObjectItem(tmqInfo, "expect_rows"); - if (tools_cJSON_IsNumber(expectRows)) { - g_tmqInfo.consumerInfo.expectRows = (uint32_t)expectRows->valueint; - } - - tools_cJSON *topicList = tools_cJSON_GetObjectItem(tmqInfo, "topic_list"); - if (tools_cJSON_IsArray(topicList)) { - int topicCount = tools_cJSON_GetArraySize(topicList); - for (int j = 0; j < topicCount; ++j) { - tools_cJSON *topicObj = tools_cJSON_GetArrayItem(topicList, j); - if (tools_cJSON_IsObject(topicObj)) { - tools_cJSON *topicName = tools_cJSON_GetObjectItem( - topicObj, "name"); - if (tools_cJSON_IsString(topicName)) { - // int strLen = strlen(topicName->valuestring) + 1; - tstrncpy(g_tmqInfo.consumerInfo.topicName[ - g_tmqInfo.consumerInfo.topicCount], - topicName->valuestring, 255); - - } else { - errorPrint("%s", "Invalid topic name in json\n"); - goto TMQ_PARSE_OVER; - } - - tools_cJSON *sqlString = tools_cJSON_GetObjectItem( - topicObj, "sql"); - if (tools_cJSON_IsString(sqlString)) { - // int strLen = strlen(sqlString->valuestring) + 1; - tstrncpy(g_tmqInfo.consumerInfo.topicSql[ - g_tmqInfo.consumerInfo.topicCount], - sqlString->valuestring, 255); - - } else { - errorPrint("%s", "Invalid topic sql in json\n"); - goto TMQ_PARSE_OVER; - } - g_tmqInfo.consumerInfo.topicCount++; - } - } - } - code = 0; -TMQ_PARSE_OVER: - return code; -} -#endif - -int getInfoFromJsonFile() { - char * file = g_arguments->metaFile; - int32_t code = -1; - FILE * fp = fopen(file, "r"); - if (!fp) { - errorPrint("failed to read %s, reason:%s\n", file, - strerror(errno)); - return code; - } - - int maxLen = MAX_JSON_BUFF; - char *content = benchCalloc(1, maxLen + 1, false); - int len = (int)fread(content, 1, maxLen, fp); - if (len <= 0) { - errorPrint("failed to read %s, content is null", file); - goto PARSE_OVER; - } - - content[len] = 0; - root = tools_cJSON_Parse(content); - if (root == NULL) { - errorPrint("failed to cjson parse %s, invalid json format\n", - file); - goto PARSE_OVER; - } - - char *pstr = tools_cJSON_Print(root); - infoPrint("%s\n%s\n", file, pstr); - tmfree(pstr); - - tools_cJSON *filetype = tools_cJSON_GetObjectItem(root, "filetype"); - if (tools_cJSON_IsString(filetype)) { - if (0 == strcasecmp("insert", filetype->valuestring)) { - g_arguments->test_mode = INSERT_TEST; - } else if (0 == strcasecmp("query", filetype->valuestring)) { - g_arguments->test_mode = QUERY_TEST; - } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { - g_arguments->test_mode = SUBSCRIBE_TEST; - } else { - errorPrint("%s", - "failed to read json, filetype not support\n"); - goto PARSE_OVER; - } - } else { - g_arguments->test_mode = INSERT_TEST; - } - - // read common item - code = getMetaFromCommonJsonFile(root); - if (INSERT_TEST == g_arguments->test_mode) { - code = getMetaFromInsertJsonFile(root); -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - } else if (QUERY_TEST == g_arguments->test_mode) { -#else - } else { -#endif - memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); - code = getMetaFromQueryJsonFile(root); -#ifdef TD_VER_COMPATIBLE_3_0_0_0 - } else if (SUBSCRIBE_TEST == g_arguments->test_mode) { - memset(&g_tmqInfo, 0, sizeof(STmqMetaInfo)); - code = getMetaFromTmqJsonFile(root); -#endif - } -PARSE_OVER: - free(content); - fclose(fp); - return code; -} +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the MIT license as published by the Free Software + * Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + */ + +#include + +extern char g_configDir[MAX_PATH_LEN]; + +static int getColumnAndTagTypeFromInsertJsonFile( + tools_cJSON * superTblObj, SSuperTable *stbInfo) { + int32_t code = -1; + + // columns + tools_cJSON *columnsObj = + tools_cJSON_GetObjectItem(superTblObj, "columns"); + if (!tools_cJSON_IsArray(columnsObj)) { + goto PARSE_OVER; + } + benchArrayClear(stbInfo->cols); + + int columnSize = tools_cJSON_GetArraySize(columnsObj); + + int index = 0; + for (int k = 0; k < columnSize; ++k) { + bool sma = false; + bool customName = false; + uint8_t type = 0; + int count = 1; + int64_t max = RAND_MAX >> 1; + int64_t min = 0; + int32_t length = 4; + + tools_cJSON *column = tools_cJSON_GetArrayItem(columnsObj, k); + if (!tools_cJSON_IsObject(column)) { + errorPrint("%s", "Invalid column format in json\n"); + goto PARSE_OVER; + } + tools_cJSON *countObj = tools_cJSON_GetObjectItem(column, "count"); + if (tools_cJSON_IsNumber(countObj)) { + count = (int)countObj->valueint; + } else { + count = 1; + } + + tools_cJSON *dataName = tools_cJSON_GetObjectItem(column, "name"); + if (tools_cJSON_IsString(dataName)) { + customName = true; + } + + // column info + tools_cJSON *dataType = tools_cJSON_GetObjectItem(column, "type"); + if (!tools_cJSON_IsString(dataType)) { + goto PARSE_OVER; + } + type = convertStringToDatatype(dataType->valuestring, 0); + + tools_cJSON *dataMax = tools_cJSON_GetObjectItem(column, "max"); + if (tools_cJSON_IsNumber(dataMax)) { + max = dataMax->valueint; + } else { + max = convertDatatypeToDefaultMax(type); + } + + tools_cJSON *dataMin = tools_cJSON_GetObjectItem(column, "min"); + if (tools_cJSON_IsNumber(dataMin)) { + min = dataMin->valueint; + } else { + min = convertDatatypeToDefaultMin(type); + } + + tools_cJSON *dataValues = tools_cJSON_GetObjectItem(column, "values"); + + if (g_arguments->taosc_version == 3) { + tools_cJSON *sma_value = tools_cJSON_GetObjectItem(column, "sma"); + if (tools_cJSON_IsString(sma_value) && + (0 == strcasecmp(sma_value->valuestring, "yes"))) { + sma = true; + } + } + + tools_cJSON * dataLen = tools_cJSON_GetObjectItem(column, "len"); + if (tools_cJSON_IsNumber(dataLen)) { + length = (int32_t)dataLen->valueint; + } else { + if (type == TSDB_DATA_TYPE_BINARY + || type == TSDB_DATA_TYPE_JSON + || type == TSDB_DATA_TYPE_NCHAR) { + length = g_arguments->binwidth; + } else { + length = convertTypeToLength(type); + } + } + + for (int n = 0; n < count; ++n) { + Field * col = benchCalloc(1, sizeof(Field), true); + benchArrayPush(stbInfo->cols, col); + col = benchArrayGet(stbInfo->cols, stbInfo->cols->size - 1); + col->type = type; + col->length = length; + if (length == 0) { + col->null = true; + } + col->sma = sma; + col->max = max; + col->min = min; + col->values = dataValues; + if (customName) { + if (n >= 1) { + snprintf(col->name, TSDB_COL_NAME_LEN, + "%s_%d", dataName->valuestring, n); + } else { + snprintf(col->name, TSDB_COL_NAME_LEN, + "%s", dataName->valuestring); + } + } else { + snprintf(col->name, TSDB_COL_NAME_LEN, "c%d", index); + } + index++; + } + } + + index = 0; + // tags + benchArrayClear(stbInfo->tags); + tools_cJSON *tags = tools_cJSON_GetObjectItem(superTblObj, "tags"); + if (!tools_cJSON_IsArray(tags)) { + return 0; + } + + int tagSize = tools_cJSON_GetArraySize(tags); + + stbInfo->use_metric = true; + for (int k = 0; k < tagSize; ++k) { + bool customName = false; + uint8_t type = 0; + int count = 1; + int64_t max = RAND_MAX >> 1; + int64_t min = 0; + int32_t length = 4; + tools_cJSON *tagObj = tools_cJSON_GetArrayItem(tags, k); + if (!tools_cJSON_IsObject(tagObj)) { + errorPrint("%s", "Invalid tag format in json\n"); + goto PARSE_OVER; + } + tools_cJSON *countObj = tools_cJSON_GetObjectItem(tagObj, "count"); + if (tools_cJSON_IsNumber(countObj)) { + count = (int)countObj->valueint; + } else { + count = 1; + } + + tools_cJSON *dataName = tools_cJSON_GetObjectItem(tagObj, "name"); + if (tools_cJSON_IsString(dataName)) { + customName = true; + } + + tools_cJSON *dataType = tools_cJSON_GetObjectItem(tagObj, "type"); + if (!tools_cJSON_IsString(dataType)) { + goto PARSE_OVER; + } + type = convertStringToDatatype(dataType->valuestring, 0); + + if ((tagSize == 1) && (type == TSDB_DATA_TYPE_JSON)) { + Field * tag = benchCalloc(1, sizeof(Field), true); + benchArrayPush(stbInfo->tags, tag); + tag = benchArrayGet(stbInfo->tags, stbInfo->tags->size - 1); + if (customName) { + snprintf(tag->name, TSDB_COL_NAME_LEN, + "%s", dataName->valuestring); + } else { + snprintf(tag->name, TSDB_COL_NAME_LEN, "jtag"); + } + tag->type = type; + tag->length = length; + stbInfo->tags->size = count; + return 0; + } + + tools_cJSON *dataMax = tools_cJSON_GetObjectItem(tagObj, "max"); + if (tools_cJSON_IsNumber(dataMax)) { + max = dataMax->valueint; + } else { + max = convertDatatypeToDefaultMax(type); + } + + tools_cJSON *dataMin = tools_cJSON_GetObjectItem(tagObj, "min"); + if (tools_cJSON_IsNumber(dataMin)) { + min = dataMin->valueint; + } else { + min = convertDatatypeToDefaultMin(type); + } + + tools_cJSON *dataValues = tools_cJSON_GetObjectItem(tagObj, "values"); + + tools_cJSON * dataLen = tools_cJSON_GetObjectItem(tagObj, "len"); + if (tools_cJSON_IsNumber(dataLen)) { + length = (int32_t)dataLen->valueint; + } else { + if (type == TSDB_DATA_TYPE_BINARY + || type == TSDB_DATA_TYPE_JSON + || type == TSDB_DATA_TYPE_NCHAR) { + length = g_arguments->binwidth; + } else { + length = convertTypeToLength(type); + } + } + + for (int n = 0; n < count; ++n) { + Field * tag = benchCalloc(1, sizeof(Field), true); + benchArrayPush(stbInfo->tags, tag); + tag = benchArrayGet(stbInfo->tags, stbInfo->tags->size - 1); + tag->type = type; + tag->length = length; + if (length == 0) { + tag->null = true; + } + tag->max = max; + tag->min = min; + tag->values = dataValues; + if (customName) { + if (n >= 1) { + snprintf(tag->name, TSDB_COL_NAME_LEN, + "%s_%d", dataName->valuestring, n); + } else { + snprintf(tag->name, TSDB_COL_NAME_LEN, + "%s", dataName->valuestring); + } + } else { + snprintf(tag->name, TSDB_COL_NAME_LEN, "t%d", index); + } + index++; + } + } + code = 0; +PARSE_OVER: + return code; +} + +int32_t getDurationVal(tools_cJSON *jsonObj) { + int32_t durMinute = 0; + // get duration value + if (tools_cJSON_IsString(jsonObj)) { + char *val = jsonObj->valuestring; + // like 10d or 10h or 10m + int32_t len = strlen(val); + if (len == 0) return 0; + durMinute = atoi(val); + if (strchr(val, 'h') || strchr(val, 'H')) { + // hour + durMinute *= 60; + } else if (strchr(val, 'm') || strchr(val, 'M')) { + // minute + durMinute *= 1; + } else { + // day + durMinute *= 24 * 60; + } + } else if (tools_cJSON_IsNumber(jsonObj)) { + durMinute = jsonObj->valueint * 24 * 60; + } + + return durMinute; +} + +static int getDatabaseInfo(tools_cJSON *dbinfos, int index) { + SDataBase *database; + if (index > 0) { + database = benchCalloc(1, sizeof(SDataBase), true); + benchArrayPush(g_arguments->databases, database); + } + database = benchArrayGet(g_arguments->databases, index); + if (database->cfgs == NULL) { + database->cfgs = benchArrayInit(1, sizeof(SDbCfg)); + } + database->drop = true; + database->flush = false; + database->precision = TSDB_TIME_PRECISION_MILLI; + database->sml_precision = TSDB_SML_TIMESTAMP_MILLI_SECONDS; + tools_cJSON *dbinfo = tools_cJSON_GetArrayItem(dbinfos, index); + tools_cJSON *db = tools_cJSON_GetObjectItem(dbinfo, "dbinfo"); + if (!tools_cJSON_IsObject(db)) { + errorPrint("%s", "Invalid dbinfo format in json\n"); + return -1; + } + + tools_cJSON* cfg_object = db->child; + + while (cfg_object) { + if (0 == strcasecmp(cfg_object->string, "name")) { + if (tools_cJSON_IsString(cfg_object)) { + database->dbName = cfg_object->valuestring; + } + } else if (0 == strcasecmp(cfg_object->string, "drop")) { + if (tools_cJSON_IsString(cfg_object) + && (0 == strcasecmp(cfg_object->valuestring, "no"))) { + database->drop = false; + } + } else if (0 == strcasecmp(cfg_object->string, "flush_each_batch")) { + if (tools_cJSON_IsString(cfg_object) + && (0 == strcasecmp(cfg_object->valuestring, "yes"))) { + database->flush = true; + } + } else if (0 == strcasecmp(cfg_object->string, "precision")) { + if (tools_cJSON_IsString(cfg_object)) { + if (0 == strcasecmp(cfg_object->valuestring, "us")) { + database->precision = TSDB_TIME_PRECISION_MICRO; + database->sml_precision = TSDB_SML_TIMESTAMP_MICRO_SECONDS; + } else if (0 == strcasecmp(cfg_object->valuestring, "ns")) { + database->precision = TSDB_TIME_PRECISION_NANO; + database->sml_precision = TSDB_SML_TIMESTAMP_NANO_SECONDS; + } + } + } else { + SDbCfg* cfg = benchCalloc(1, sizeof(SDbCfg), true); + cfg->name = cfg_object->string; + + // get duration value + if (0 == strcasecmp(cfg_object->string, "duration")) { + database->durMinute = getDurationVal(cfg_object); + } + + if (tools_cJSON_IsString(cfg_object)) { + cfg->valuestring = cfg_object->valuestring; + } else if (tools_cJSON_IsNumber(cfg_object)) { + cfg->valueint = (int)cfg_object->valueint; + cfg->valuestring = NULL; + } else { + errorPrint("Invalid value format for %s\n", cfg->name); + free(cfg); + return -1; + } + benchArrayPush(database->cfgs, cfg); + } + cfg_object = cfg_object->next; + } + + // set default + if (database->durMinute == 0) { + database->durMinute = TSDB_DEFAULT_DURATION_PER_FILE; + } + + if (database->dbName == NULL) { + errorPrint("%s", "miss name in dbinfo\n"); + return -1; + } + + return 0; +} + +static int get_tsma_info(tools_cJSON* stb_obj, SSuperTable* stbInfo) { + stbInfo->tsmas = benchArrayInit(1, sizeof(TSMA)); + tools_cJSON* tsmas_obj = tools_cJSON_GetObjectItem(stb_obj, "tsmas"); + if (tsmas_obj == NULL) { + return 0; + } + if (!tools_cJSON_IsArray(tsmas_obj)) { + errorPrint("%s", "invalid tsmas format in json\n"); + return -1; + } + for (int i = 0; i < tools_cJSON_GetArraySize(tsmas_obj); ++i) { + tools_cJSON* tsma_obj = tools_cJSON_GetArrayItem(tsmas_obj, i); + if (!tools_cJSON_IsObject(tsma_obj)) { + errorPrint("%s", "Invalid tsma format in json\n"); + return -1; + } + TSMA* tsma = benchCalloc(1, sizeof(TSMA), true); + if (NULL == tsma) { + errorPrint("%s() failed to allocate memory\n", __func__); + } + tools_cJSON* tsma_name_obj = tools_cJSON_GetObjectItem(tsma_obj, + "name"); + if (!tools_cJSON_IsString(tsma_name_obj)) { + errorPrint("%s", "Invalid tsma name format in json\n"); + free(tsma); + return -1; + } + tsma->name = tsma_name_obj->valuestring; + + tools_cJSON* tsma_func_obj = + tools_cJSON_GetObjectItem(tsma_obj, "function"); + if (!tools_cJSON_IsString(tsma_func_obj)) { + errorPrint("%s", "Invalid tsma function format in json\n"); + free(tsma); + return -1; + } + tsma->func = tsma_func_obj->valuestring; + + tools_cJSON* tsma_interval_obj = + tools_cJSON_GetObjectItem(tsma_obj, "interval"); + if (!tools_cJSON_IsString(tsma_interval_obj)) { + errorPrint("%s", "Invalid tsma interval format in json\n"); + free(tsma); + return -1; + } + tsma->interval = tsma_interval_obj->valuestring; + + tools_cJSON* tsma_sliding_obj = + tools_cJSON_GetObjectItem(tsma_obj, "sliding"); + if (!tools_cJSON_IsString(tsma_sliding_obj)) { + errorPrint("%s", "Invalid tsma sliding format in json\n"); + free(tsma); + return -1; + } + tsma->sliding = tsma_sliding_obj->valuestring; + + tools_cJSON* tsma_custom_obj = + tools_cJSON_GetObjectItem(tsma_obj, "custom"); + tsma->custom = tsma_custom_obj->valuestring; + + tools_cJSON* tsma_start_obj = + tools_cJSON_GetObjectItem(tsma_obj, "start_when_inserted"); + if (!tools_cJSON_IsNumber(tsma_start_obj)) { + tsma->start_when_inserted = 0; + } else { + tsma->start_when_inserted = (int)tsma_start_obj->valueint; + } + + benchArrayPush(stbInfo->tsmas, tsma); + } + + return 0; +} + +void parseStringToIntArray(char *str, BArray *arr) { + benchArrayClear(arr); + if (NULL == strstr(str, ",")) { + int *val = benchCalloc(1, sizeof(int), true); + *val = atoi(str); + benchArrayPush(arr, val); + } else { + char *dup_str = strdup(str); + char *running = dup_str; + char *token = strsep(&running, ","); + while (token) { + int *val = benchCalloc(1, sizeof(int), true); + *val = atoi(token); + benchArrayPush(arr, val); + token = strsep(&running, ","); + } + tmfree(dup_str); + } +} + +static int getStableInfo(tools_cJSON *dbinfos, int index) { + SDataBase *database = benchArrayGet(g_arguments->databases, index); + tools_cJSON *dbinfo = tools_cJSON_GetArrayItem(dbinfos, index); + tools_cJSON *stables = tools_cJSON_GetObjectItem(dbinfo, "super_tables"); + if (!tools_cJSON_IsArray(stables)) { + infoPrint("create database %s without stables\n", database->dbName); + return 0; + } + for (int i = 0; i < tools_cJSON_GetArraySize(stables); ++i) { + SSuperTable *superTable; + if (index > 0 || i > 0) { + superTable = benchCalloc(1, sizeof(SSuperTable), true); + benchArrayPush(database->superTbls, superTable); + superTable = benchArrayGet(database->superTbls, i); + superTable->cols = benchArrayInit(1, sizeof(Field)); + superTable->tags = benchArrayInit(1, sizeof(Field)); + } else { + superTable = benchArrayGet(database->superTbls, i); + } + superTable->autoTblCreating = false; + superTable->batchTblCreatingNum = DEFAULT_CREATE_BATCH; + superTable->batchTblCreatingNumbers = NULL; + superTable->batchTblCreatingIntervals = NULL; + superTable->childTblExists = false; + superTable->random_data_source = true; + superTable->iface = TAOSC_IFACE; + superTable->lineProtocol = TSDB_SML_LINE_PROTOCOL; + superTable->tcpTransfer = false; + superTable->childTblOffset = 0; + superTable->timestamp_step = 1; + superTable->useSampleTs = false; + superTable->non_stop = false; + superTable->insertRows = 0; + superTable->interlaceRows = 0; + superTable->disorderRatio = 0; + superTable->disorderRange = DEFAULT_DISORDER_RANGE; + superTable->insert_interval = g_arguments->insert_interval; + superTable->max_sql_len = TSDB_MAX_ALLOWED_SQL_LEN; + superTable->partialColNum = 0; + superTable->comment = NULL; + superTable->delay = -1; + superTable->file_factor = -1; + superTable->rollup = NULL; + tools_cJSON *stbInfo = tools_cJSON_GetArrayItem(stables, i); + tools_cJSON *itemObj; + + tools_cJSON *stbName = tools_cJSON_GetObjectItem(stbInfo, "name"); + if (tools_cJSON_IsString(stbName)) { + superTable->stbName = stbName->valuestring; + } + + tools_cJSON *prefix = + tools_cJSON_GetObjectItem(stbInfo, "childtable_prefix"); + if (tools_cJSON_IsString(prefix)) { + superTable->childTblPrefix = prefix->valuestring; + } + tools_cJSON *childTbleSample = + tools_cJSON_GetObjectItem(stbInfo, "childtable_sample_file"); + if (tools_cJSON_IsString(childTbleSample)) { + superTable->childTblSample = childTbleSample->valuestring; + } + tools_cJSON *autoCreateTbl = + tools_cJSON_GetObjectItem(stbInfo, "auto_create_table"); + if (tools_cJSON_IsString(autoCreateTbl) + && (0 == strcasecmp(autoCreateTbl->valuestring, "yes"))) { + superTable->autoTblCreating = true; + } + tools_cJSON *batchCreateTbl = + tools_cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); + if (tools_cJSON_IsNumber(batchCreateTbl)) { + superTable->batchTblCreatingNum = batchCreateTbl->valueint; + } + tools_cJSON *batchTblCreatingNumbers = + tools_cJSON_GetObjectItem(stbInfo, "batch_create_tbl_numbers"); + if (tools_cJSON_IsString(batchTblCreatingNumbers)) { + superTable->batchTblCreatingNumbers + = batchTblCreatingNumbers->valuestring; + superTable->batchTblCreatingNumbersArray = + benchArrayInit(1, sizeof(int)); + parseStringToIntArray(superTable->batchTblCreatingNumbers, + superTable->batchTblCreatingNumbersArray); + } + tools_cJSON *batchTblCreatingIntervals = + tools_cJSON_GetObjectItem(stbInfo, "batch_create_tbl_intervals"); + if (tools_cJSON_IsString(batchTblCreatingIntervals)) { + superTable->batchTblCreatingIntervals + = batchTblCreatingIntervals->valuestring; + superTable->batchTblCreatingIntervalsArray = + benchArrayInit(1, sizeof(int)); + parseStringToIntArray(superTable->batchTblCreatingIntervals, + superTable->batchTblCreatingIntervalsArray); + } + tools_cJSON *childTblExists = + tools_cJSON_GetObjectItem(stbInfo, "child_table_exists"); + if (tools_cJSON_IsString(childTblExists) + && (0 == strcasecmp(childTblExists->valuestring, "yes")) + && !database->drop) { + superTable->childTblExists = true; + superTable->autoTblCreating = false; + } + + tools_cJSON *childTableCount = + tools_cJSON_GetObjectItem(stbInfo, "childtable_count"); + if (tools_cJSON_IsNumber(childTableCount)) { + superTable->childTblCount = childTableCount->valueint; + g_arguments->totalChildTables += superTable->childTblCount; + } else { + superTable->childTblCount = 0; + g_arguments->totalChildTables += superTable->childTblCount; + } + + tools_cJSON *dataSource = + tools_cJSON_GetObjectItem(stbInfo, "data_source"); + if (tools_cJSON_IsString(dataSource) + && (0 == strcasecmp(dataSource->valuestring, "sample"))) { + superTable->random_data_source = false; + } + + tools_cJSON *stbIface = + tools_cJSON_GetObjectItem(stbInfo, "insert_mode"); + if (tools_cJSON_IsString(stbIface)) { + if (0 == strcasecmp(stbIface->valuestring, "rest")) { + superTable->iface = REST_IFACE; + } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { + superTable->iface = STMT_IFACE; + if (g_arguments->reqPerReq > INT16_MAX) { + g_arguments->reqPerReq = INT16_MAX; + } + if (g_arguments->reqPerReq > g_arguments->prepared_rand) { + g_arguments->prepared_rand = g_arguments->reqPerReq; + } + } else if (0 == strcasecmp(stbIface->valuestring, "sml")) { + if (g_arguments->reqPerReq > SML_MAX_BATCH) { + errorPrint("reqPerReq (%u) larger than maximum (%d)\n", + g_arguments->reqPerReq, SML_MAX_BATCH); + return -1; + } + superTable->iface = SML_IFACE; + } else if (0 == strcasecmp(stbIface->valuestring, "sml-rest")) { + if (g_arguments->reqPerReq > SML_MAX_BATCH) { + errorPrint("reqPerReq (%u) larger than maximum (%d)\n", + g_arguments->reqPerReq, SML_MAX_BATCH); + return -1; + } + superTable->iface = SML_REST_IFACE; + if (0 != convertServAddr(REST_IFACE, + false, + 1)) { + errorPrint("%s", "Failed to convert server address\n"); + return -1; + } + encodeAuthBase64(); + g_arguments->rest_server_ver_major = + getServerVersionRest(g_arguments->port + TSDB_PORT_HTTP); + } + } + +#ifdef WEBSOCKET + if (g_arguments->websocket) { + infoPrint("Since WebSocket interface is enabled, " + "the interface %s is changed to use WebSocket.\n", + stbIface->valuestring); + superTable->iface = TAOSC_IFACE; + } +#endif + + tools_cJSON *stbLineProtocol = + tools_cJSON_GetObjectItem(stbInfo, "line_protocol"); + if (tools_cJSON_IsString(stbLineProtocol)) { + if (0 == strcasecmp(stbLineProtocol->valuestring, "line")) { + superTable->lineProtocol = TSDB_SML_LINE_PROTOCOL; + } else if (0 == strcasecmp(stbLineProtocol->valuestring, + "telnet")) { + superTable->lineProtocol = TSDB_SML_TELNET_PROTOCOL; + } else if (0 == strcasecmp(stbLineProtocol->valuestring, "json")) { + superTable->lineProtocol = TSDB_SML_JSON_PROTOCOL; + } else if (0 == strcasecmp( + stbLineProtocol->valuestring, "taosjson")) { + superTable->lineProtocol = SML_JSON_TAOS_FORMAT; + } + } + tools_cJSON *transferProtocol = + tools_cJSON_GetObjectItem(stbInfo, "tcp_transfer"); + if (tools_cJSON_IsString(transferProtocol) + && (0 == strcasecmp(transferProtocol->valuestring, "yes"))) { + superTable->tcpTransfer = true; + } + tools_cJSON *childTbl_limit = + tools_cJSON_GetObjectItem(stbInfo, "childtable_limit"); + if (tools_cJSON_IsNumber(childTbl_limit)) { + if (childTbl_limit->valueint >= 0) { + superTable->childTblLimit = childTbl_limit->valueint; + if (superTable->childTblLimit > superTable->childTblCount) { + warnPrint("child table limit %"PRId64" " + "is more than %"PRId64", set to %"PRId64"\n", + childTbl_limit->valueint, + superTable->childTblCount, + superTable->childTblCount); + superTable->childTblLimit = superTable->childTblCount; + } + } else { + warnPrint("child table limit %"PRId64" is invalid, " + "set to %"PRId64"\n", + childTbl_limit->valueint, + superTable->childTblCount); + superTable->childTblLimit = superTable->childTblCount; + } + } + tools_cJSON *childTbl_offset = + tools_cJSON_GetObjectItem(stbInfo, "childtable_offset"); + if (tools_cJSON_IsNumber(childTbl_offset)) { + superTable->childTblOffset = childTbl_offset->valueint; + } + tools_cJSON *childTbl_from = + tools_cJSON_GetObjectItem(stbInfo, "childtable_from"); + if (tools_cJSON_IsNumber(childTbl_from)) { + if (childTbl_from->valueint >= 0) { + superTable->childTblFrom = childTbl_from->valueint; + } else { + warnPrint("child table _from_ %"PRId64" is invalid, set to 0\n", + childTbl_from->valueint); + superTable->childTblFrom = 0; + } + } + tools_cJSON *childTbl_to = + tools_cJSON_GetObjectItem(stbInfo, "childtable_to"); + if (tools_cJSON_IsNumber(childTbl_to)) { + superTable->childTblTo = childTbl_to->valueint; + if (superTable->childTblTo < superTable->childTblFrom) { + errorPrint("child table _to_ is invalid number," + "%"PRId64" < %"PRId64"\n", + superTable->childTblTo, superTable->childTblFrom); + return -1; + } + } + + tools_cJSON *continueIfFail = + tools_cJSON_GetObjectItem(stbInfo, "continue_if_fail"); // yes, no, + if (tools_cJSON_IsString(continueIfFail)) { + if (0 == strcasecmp(continueIfFail->valuestring, "no")) { + superTable->continueIfFail = NO_IF_FAILED; + } else if (0 == strcasecmp(continueIfFail->valuestring, "yes")) { + superTable->continueIfFail = YES_IF_FAILED; + } else if (0 == strcasecmp(continueIfFail->valuestring, "smart")) { + superTable->continueIfFail = SMART_IF_FAILED; + } else { + errorPrint("cointinue_if_fail has unknown mode %s\n", + continueIfFail->valuestring); + return -1; + } + } + + tools_cJSON *ts = tools_cJSON_GetObjectItem(stbInfo, "start_timestamp"); + if (tools_cJSON_IsString(ts)) { + if (0 == strcasecmp(ts->valuestring, "now")) { + superTable->startTimestamp = + toolsGetTimestamp(database->precision); + superTable->useNow = true; + // fill time with now conflict with check_sql + g_arguments->check_sql = false; + } else { + if (toolsParseTime(ts->valuestring, + &(superTable->startTimestamp), + (int32_t)strlen(ts->valuestring), + database->precision, 0)) { + errorPrint("failed to parse time %s\n", + ts->valuestring); + return -1; + } + } + } else { + if (tools_cJSON_IsNumber(ts)) { + superTable->startTimestamp = ts->valueint; + } else { + superTable->startTimestamp = + toolsGetTimestamp(database->precision); + } + } + + tools_cJSON *timestampStep = + tools_cJSON_GetObjectItem(stbInfo, "timestamp_step"); + if (tools_cJSON_IsNumber(timestampStep)) { + superTable->timestamp_step = timestampStep->valueint; + } + + tools_cJSON *keepTrying = + tools_cJSON_GetObjectItem(stbInfo, "keep_trying"); + if (tools_cJSON_IsNumber(keepTrying)) { + superTable->keep_trying = keepTrying->valueint; + } + + tools_cJSON *tryingInterval = + tools_cJSON_GetObjectItem(stbInfo, "trying_interval"); + if (tools_cJSON_IsNumber(tryingInterval)) { + superTable->trying_interval = (uint32_t)tryingInterval->valueint; + } + + tools_cJSON *sampleFile = + tools_cJSON_GetObjectItem(stbInfo, "sample_file"); + if (tools_cJSON_IsString(sampleFile)) { + tstrncpy( + superTable->sampleFile, sampleFile->valuestring, + MAX_FILE_NAME_LEN); + } else { + memset(superTable->sampleFile, 0, MAX_FILE_NAME_LEN); + } + + tools_cJSON *useSampleTs = + tools_cJSON_GetObjectItem(stbInfo, "use_sample_ts"); + if (tools_cJSON_IsString(useSampleTs) && + (0 == strcasecmp(useSampleTs->valuestring, "yes"))) { + superTable->useSampleTs = true; + } + + tools_cJSON *nonStop = + tools_cJSON_GetObjectItem(stbInfo, "non_stop_mode"); + if (tools_cJSON_IsString(nonStop) && + (0 == strcasecmp(nonStop->valuestring, "yes"))) { + superTable->non_stop = true; + } + + tools_cJSON* max_sql_len_obj = + tools_cJSON_GetObjectItem(stbInfo, "max_sql_len"); + if (tools_cJSON_IsNumber(max_sql_len_obj)) { + superTable->max_sql_len = max_sql_len_obj->valueint; + } + + tools_cJSON *tagsFile = + tools_cJSON_GetObjectItem(stbInfo, "tags_file"); + if (tools_cJSON_IsString(tagsFile)) { + tstrncpy(superTable->tagsFile, tagsFile->valuestring, + MAX_FILE_NAME_LEN); + } else { + memset(superTable->tagsFile, 0, MAX_FILE_NAME_LEN); + } + + tools_cJSON *insertRows = + tools_cJSON_GetObjectItem(stbInfo, "insert_rows"); + if (tools_cJSON_IsNumber(insertRows)) { + superTable->insertRows = insertRows->valueint; + } + + tools_cJSON *stbInterlaceRows = + tools_cJSON_GetObjectItem(stbInfo, "interlace_rows"); + if (tools_cJSON_IsNumber(stbInterlaceRows)) { + superTable->interlaceRows = (uint32_t)stbInterlaceRows->valueint; + } + + // disorder + tools_cJSON *disorderRatio = + tools_cJSON_GetObjectItem(stbInfo, "disorder_ratio"); + if (tools_cJSON_IsNumber(disorderRatio)) { + if (disorderRatio->valueint > 100) disorderRatio->valueint = 100; + if (disorderRatio->valueint < 0) disorderRatio->valueint = 0; + + superTable->disorderRatio = (int)disorderRatio->valueint; + superTable->disRatio = (uint8_t)disorderRatio->valueint; + } + tools_cJSON *disorderRange = + tools_cJSON_GetObjectItem(stbInfo, "disorder_range"); + if (tools_cJSON_IsNumber(disorderRange)) { + superTable->disorderRange = (int)disorderRange->valueint; + superTable->disRange = disorderRange->valueint; + } + tools_cJSON *disFill = + tools_cJSON_GetObjectItem(stbInfo, "disorder_fill_interval"); + if (tools_cJSON_IsNumber(disFill)) { + superTable->fillIntervalDis = (int)disFill->valueint; + } + + + // update + tools_cJSON *updRatio = + tools_cJSON_GetObjectItem(stbInfo, "update_ratio"); + if (tools_cJSON_IsNumber(updRatio)) { + if (updRatio->valueint > 100) updRatio->valueint = 100; + if (updRatio->valueint < 0) updRatio->valueint = 0; + superTable->updRatio = (int8_t)updRatio->valueint; + } + tools_cJSON *updFill = + tools_cJSON_GetObjectItem(stbInfo, "update_fill_interval"); + if (tools_cJSON_IsNumber(updFill)) { + superTable->fillIntervalUpd = (uint64_t)updFill->valueint; + } + + // delete + tools_cJSON *delRatio = + tools_cJSON_GetObjectItem(stbInfo, "delete_ratio"); + if (tools_cJSON_IsNumber(delRatio)) { + if (delRatio->valueint > 100) delRatio->valueint = 100; + if (delRatio->valueint < 0) delRatio->valueint = 0; + superTable->delRatio = (int8_t)delRatio->valueint; + } + + // generate row rule + tools_cJSON *rowRule = + tools_cJSON_GetObjectItem(stbInfo, "generate_row_rule"); + if (tools_cJSON_IsNumber(rowRule)) { + superTable->genRowRule = (int8_t)rowRule->valueint; + } + + // binary prefix + tools_cJSON *binPrefix = + tools_cJSON_GetObjectItem(stbInfo, "binary_prefix"); + if (tools_cJSON_IsString(binPrefix)) { + superTable->binaryPrefex = binPrefix->valuestring; + } else { + superTable->binaryPrefex = NULL; + } + + // nchar prefix + tools_cJSON *ncharPrefix = + tools_cJSON_GetObjectItem(stbInfo, "nchar_prefix"); + if (tools_cJSON_IsString(ncharPrefix)) { + superTable->ncharPrefex = ncharPrefix->valuestring; + } else { + superTable->ncharPrefex = NULL; + } + + // write future random + itemObj = tools_cJSON_GetObjectItem(stbInfo, "random_write_future"); + if (tools_cJSON_IsString(itemObj) + && (0 == strcasecmp(itemObj->valuestring, "yes"))) { + superTable->writeFuture = true; + } + + // check_correct_interval + itemObj = tools_cJSON_GetObjectItem(stbInfo, "check_correct_interval"); + if (tools_cJSON_IsNumber(itemObj)) { + superTable->checkInterval = itemObj->valueint; + } + + tools_cJSON *insertInterval = + tools_cJSON_GetObjectItem(stbInfo, "insert_interval"); + if (tools_cJSON_IsNumber(insertInterval)) { + superTable->insert_interval = insertInterval->valueint; + } + + tools_cJSON *pPartialColNum = + tools_cJSON_GetObjectItem(stbInfo, "partial_col_num"); + if (tools_cJSON_IsNumber(pPartialColNum)) { + superTable->partialColNum = pPartialColNum->valueint; + } + + if (g_arguments->taosc_version == 3) { + tools_cJSON *delay = tools_cJSON_GetObjectItem(stbInfo, "delay"); + if (tools_cJSON_IsNumber(delay)) { + superTable->delay = (int)delay->valueint; + } + + tools_cJSON *file_factor = + tools_cJSON_GetObjectItem(stbInfo, "file_factor"); + if (tools_cJSON_IsNumber(file_factor)) { + superTable->file_factor = (int)file_factor->valueint; + } + + tools_cJSON *rollup = tools_cJSON_GetObjectItem(stbInfo, "rollup"); + if (tools_cJSON_IsString(rollup)) { + superTable->rollup = rollup->valuestring; + } + + tools_cJSON *ttl = tools_cJSON_GetObjectItem(stbInfo, "ttl"); + if (tools_cJSON_IsNumber(ttl)) { + superTable->ttl = (int)ttl->valueint; + } + + tools_cJSON *max_delay_obj = + tools_cJSON_GetObjectItem(stbInfo, "max_delay"); + if (tools_cJSON_IsString(max_delay_obj)) { + superTable->max_delay = max_delay_obj->valuestring; + } + + tools_cJSON *watermark_obj = + tools_cJSON_GetObjectItem(stbInfo, "watermark"); + if (tools_cJSON_IsString(watermark_obj)) { + superTable->watermark = watermark_obj->valuestring; + } + + if (get_tsma_info(stbInfo, superTable)) { + return -1; + } + } + + if (getColumnAndTagTypeFromInsertJsonFile(stbInfo, superTable)) { + return -1; + } + } + return 0; +} + +static int getStreamInfo(tools_cJSON* json) { + tools_cJSON* streamsObj = tools_cJSON_GetObjectItem(json, "streams"); + if (tools_cJSON_IsArray(streamsObj)) { + int streamCnt = tools_cJSON_GetArraySize(streamsObj); + for (int i = 0; i < streamCnt; ++i) { + tools_cJSON* streamObj = tools_cJSON_GetArrayItem(streamsObj, i); + if (!tools_cJSON_IsObject(streamObj)) { + errorPrint("%s", "invalid stream format in json\n"); + return -1; + } + tools_cJSON* stream_name = + tools_cJSON_GetObjectItem(streamObj, "stream_name"); + tools_cJSON* stream_stb = + tools_cJSON_GetObjectItem(streamObj, "stream_stb"); + tools_cJSON* source_sql = + tools_cJSON_GetObjectItem(streamObj, "source_sql"); + if (!tools_cJSON_IsString(stream_name) + || !tools_cJSON_IsString(stream_stb) + || !tools_cJSON_IsString(source_sql)) { + errorPrint("%s", "Invalid or miss " + "'stream_name'/'stream_stb'/'source_sql' " + "key in json\n"); + return -1; + } + SSTREAM * stream = benchCalloc(1, sizeof(SSTREAM), true); + tstrncpy(stream->stream_name, stream_name->valuestring, + TSDB_TABLE_NAME_LEN); + tstrncpy(stream->stream_stb, stream_stb->valuestring, + TSDB_TABLE_NAME_LEN); + tstrncpy(stream->source_sql, source_sql->valuestring, + TSDB_DEFAULT_PKT_SIZE); + + tools_cJSON* trigger_mode = + tools_cJSON_GetObjectItem(streamObj, "trigger_mode"); + if (tools_cJSON_IsString(trigger_mode)) { + tstrncpy(stream->trigger_mode, trigger_mode->valuestring, + BIGINT_BUFF_LEN); + } + + tools_cJSON* watermark = + tools_cJSON_GetObjectItem(streamObj, "watermark"); + if (tools_cJSON_IsString(watermark)) { + tstrncpy(stream->watermark, watermark->valuestring, + BIGINT_BUFF_LEN); + } + + tools_cJSON* ignore_expired = + tools_cJSON_GetObjectItem(streamObj, "ignore_expired"); + if (tools_cJSON_IsString(ignore_expired)) { + tstrncpy(stream->ignore_expired, ignore_expired->valuestring, + BIGINT_BUFF_LEN); + } + + tools_cJSON* ignore_update = + tools_cJSON_GetObjectItem(streamObj, "ignore_update"); + if (tools_cJSON_IsString(ignore_update)) { + tstrncpy(stream->ignore_update, ignore_update->valuestring, + BIGINT_BUFF_LEN); + } + + tools_cJSON* fill_history = + tools_cJSON_GetObjectItem(streamObj, "fill_history"); + if (tools_cJSON_IsString(fill_history)) { + tstrncpy(stream->fill_history, fill_history->valuestring, + BIGINT_BUFF_LEN); + } + + tools_cJSON* stream_stb_field = + tools_cJSON_GetObjectItem(streamObj, "stream_stb_field"); + if (tools_cJSON_IsString(stream_stb_field)) { + tstrncpy(stream->stream_stb_field, + stream_stb_field->valuestring, + TSDB_DEFAULT_PKT_SIZE); + } + + tools_cJSON* stream_tag_field = + tools_cJSON_GetObjectItem(streamObj, "stream_tag_field"); + if (tools_cJSON_IsString(stream_tag_field)) { + tstrncpy(stream->stream_tag_field, + stream_tag_field->valuestring, + TSDB_DEFAULT_PKT_SIZE); + } + + tools_cJSON* subtable = + tools_cJSON_GetObjectItem(streamObj, "subtable"); + if (tools_cJSON_IsString(subtable)) { + tstrncpy(stream->subtable, subtable->valuestring, + TSDB_DEFAULT_PKT_SIZE); + } + + tools_cJSON* drop = tools_cJSON_GetObjectItem(streamObj, "drop"); + if (tools_cJSON_IsString(drop)) { + if (0 == strcasecmp(drop->valuestring, "yes")) { + stream->drop = true; + } else if (0 == strcasecmp(drop->valuestring, "no")) { + stream->drop = false; + } else { + errorPrint("invalid value for drop field: %s\n", + drop->valuestring); + return -1; + } + } + benchArrayPush(g_arguments->streams, stream); + } + } + return 0; +} + +// read common item +static int getMetaFromCommonJsonFile(tools_cJSON *json) { + int32_t code = -1; + tools_cJSON *cfgdir = tools_cJSON_GetObjectItem(json, "cfgdir"); + if (cfgdir && (cfgdir->type == tools_cJSON_String) + && (cfgdir->valuestring != NULL)) { + tstrncpy(g_configDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); + } + + tools_cJSON *host = tools_cJSON_GetObjectItem(json, "host"); + if (host && host->type == tools_cJSON_String && host->valuestring != NULL) { + g_arguments->host = host->valuestring; + } + + tools_cJSON *port = tools_cJSON_GetObjectItem(json, "port"); + if (port && port->type == tools_cJSON_Number) { + g_arguments->port = (uint16_t)port->valueint; + } + + tools_cJSON *user = tools_cJSON_GetObjectItem(json, "user"); + if (user && user->type == tools_cJSON_String && user->valuestring != NULL) { + g_arguments->user = user->valuestring; + } + + tools_cJSON *password = tools_cJSON_GetObjectItem(json, "password"); + if (password && password->type == tools_cJSON_String && + password->valuestring != NULL) { + g_arguments->password = password->valuestring; + } + + tools_cJSON *answerPrompt = + tools_cJSON_GetObjectItem(json, + "confirm_parameter_prompt"); // yes, no, + if (answerPrompt && answerPrompt->type == tools_cJSON_String + && answerPrompt->valuestring != NULL) { + if (0 == strcasecmp(answerPrompt->valuestring, "no")) { + g_arguments->answer_yes = true; + } + } + + tools_cJSON *continueIfFail = + tools_cJSON_GetObjectItem(json, "continue_if_fail"); // yes, no, + if (tools_cJSON_IsString(continueIfFail)) { + if (0 == strcasecmp(continueIfFail->valuestring, "no")) { + g_arguments->continueIfFail = NO_IF_FAILED; + } else if (0 == strcasecmp(continueIfFail->valuestring, "yes")) { + g_arguments->continueIfFail = YES_IF_FAILED; + } else if (0 == strcasecmp(continueIfFail->valuestring, "smart")) { + g_arguments->continueIfFail = SMART_IF_FAILED; + } else { + errorPrint("cointinue_if_fail has unknown mode %s\n", + continueIfFail->valuestring); + return -1; + } + } + + code = 0; + return code; +} + +static int getMetaFromInsertJsonFile(tools_cJSON *json) { + int32_t code = -1; + +#ifdef WEBSOCKET + tools_cJSON *dsn = tools_cJSON_GetObjectItem(json, "dsn"); + if (tools_cJSON_IsString(dsn)) { + g_arguments->dsn = dsn->valuestring; + g_arguments->websocket = true; + } +#endif + + // check after inserted + tools_cJSON *checkSql = tools_cJSON_GetObjectItem(json, "check_sql"); + if (tools_cJSON_IsString(checkSql)) { + if (0 == strcasecmp(checkSql->valuestring, "yes")) { + g_arguments->check_sql = true; + } + } + + tools_cJSON *resultfile = tools_cJSON_GetObjectItem(json, "result_file"); + if (resultfile && resultfile->type == tools_cJSON_String + && resultfile->valuestring != NULL) { + g_arguments->output_file = resultfile->valuestring; + } + + tools_cJSON *threads = tools_cJSON_GetObjectItem(json, "thread_count"); + if (threads && threads->type == tools_cJSON_Number) { + g_arguments->nthreads = (uint32_t)threads->valueint; + } + + tools_cJSON *keepTrying = tools_cJSON_GetObjectItem(json, "keep_trying"); + if (keepTrying && keepTrying->type == tools_cJSON_Number) { + g_arguments->keep_trying = (int32_t)keepTrying->valueint; + } + + tools_cJSON *tryingInterval = + tools_cJSON_GetObjectItem(json, "trying_interval"); + if (tryingInterval && tryingInterval->type == tools_cJSON_Number) { + g_arguments->trying_interval = (uint32_t)tryingInterval->valueint; + } + + tools_cJSON *table_theads = + tools_cJSON_GetObjectItem(json, "create_table_thread_count"); + if (tools_cJSON_IsNumber(table_theads)) { + g_arguments->table_threads = (uint32_t)table_theads->valueint; + } + +#ifdef WEBSOCKET + if (!g_arguments->websocket) { +#endif +#ifdef LINUX + if (strlen(g_configDir)) { + wordexp_t full_path; + if (wordexp(g_configDir, &full_path, 0) != 0) { + errorPrint("Invalid path %s\n", g_configDir); + exit(EXIT_FAILURE); + } + taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); + wordfree(&full_path); + } +#endif +#ifdef WEBSOCKET + } +#endif + + tools_cJSON *numRecPerReq = + tools_cJSON_GetObjectItem(json, "num_of_records_per_req"); + if (numRecPerReq && numRecPerReq->type == tools_cJSON_Number) { + g_arguments->reqPerReq = (uint32_t)numRecPerReq->valueint; + if ((int32_t)g_arguments->reqPerReq <= 0) { + infoPrint("waring: num_of_records_per_req item in json config must over zero, current = %d. now reset to default. \n", g_arguments->reqPerReq); + g_arguments->reqPerReq = DEFAULT_REQ_PER_REQ; + } + + if (g_arguments->reqPerReq > 32768) { + infoPrint("warning: num_of_records_per_req item in json config need less than 32768. current = %d. now reset to default.\n", g_arguments->reqPerReq); + g_arguments->reqPerReq = DEFAULT_REQ_PER_REQ; + } + + } + + tools_cJSON *prepareRand = + tools_cJSON_GetObjectItem(json, "prepared_rand"); + if (prepareRand && prepareRand->type == tools_cJSON_Number) { + g_arguments->prepared_rand = prepareRand->valueint; + } + + tools_cJSON *chineseOpt = + tools_cJSON_GetObjectItem(json, "chinese"); // yes, no, + if (chineseOpt && chineseOpt->type == tools_cJSON_String + && chineseOpt->valuestring != NULL) { + if (0 == strncasecmp(chineseOpt->valuestring, "yes", 3)) { + g_arguments->chinese = true; + } + } + + tools_cJSON *escapeChar = + tools_cJSON_GetObjectItem(json, "escape_character"); // yes, no, + if (escapeChar && escapeChar->type == tools_cJSON_String + && escapeChar->valuestring != NULL) { + if (0 == strncasecmp(escapeChar->valuestring, "yes", 3)) { + g_arguments->escape_character = true; + } + } + + tools_cJSON *top_insertInterval = + tools_cJSON_GetObjectItem(json, "insert_interval"); + if (top_insertInterval && top_insertInterval->type == tools_cJSON_Number) { + g_arguments->insert_interval = top_insertInterval->valueint; + } + + tools_cJSON *insert_mode = tools_cJSON_GetObjectItem(json, "insert_mode"); + if (insert_mode && insert_mode->type == tools_cJSON_String + && insert_mode->valuestring != NULL) { + if (0 == strcasecmp(insert_mode->valuestring, "rest")) { + g_arguments->iface = REST_IFACE; + } + } + + tools_cJSON *dbinfos = tools_cJSON_GetObjectItem(json, "databases"); + if (!tools_cJSON_IsArray(dbinfos)) { + errorPrint("%s", "Invalid databases format in json\n"); + return -1; + } + int dbSize = tools_cJSON_GetArraySize(dbinfos); + + for (int i = 0; i < dbSize; ++i) { + if (getDatabaseInfo(dbinfos, i)) { + goto PARSE_OVER; + } + if (getStableInfo(dbinfos, i)) { + goto PARSE_OVER; + } + } + + if (g_arguments->taosc_version == 3) { + if (getStreamInfo(json)) { + goto PARSE_OVER; + } + } + + code = 0; + +PARSE_OVER: + return code; +} + +static int getMetaFromQueryJsonFile(tools_cJSON *json) { + int32_t code = -1; + + tools_cJSON *telnet_tcp_port = + tools_cJSON_GetObjectItem(json, "telnet_tcp_port"); + if (tools_cJSON_IsNumber(telnet_tcp_port)) { + g_arguments->telnet_tcp_port = (uint16_t)telnet_tcp_port->valueint; + } + + tools_cJSON *gQueryTimes = tools_cJSON_GetObjectItem(json, "query_times"); + if (tools_cJSON_IsNumber(gQueryTimes)) { + g_queryInfo.query_times = gQueryTimes->valueint; + } else { + g_queryInfo.query_times = 1; + } + + tools_cJSON *gKillSlowQueryThreshold = + tools_cJSON_GetObjectItem(json, "kill_slow_query_threshold"); + if (tools_cJSON_IsNumber(gKillSlowQueryThreshold)) { + g_queryInfo.killQueryThreshold = gKillSlowQueryThreshold->valueint; + } else { + g_queryInfo.killQueryThreshold = 0; + } + + tools_cJSON *gKillSlowQueryInterval = + tools_cJSON_GetObjectItem(json, "kill_slow_query_interval"); + if (tools_cJSON_IsNumber(gKillSlowQueryInterval)) { + g_queryInfo.killQueryInterval = gKillSlowQueryInterval ->valueint; + } else { + g_queryInfo.killQueryInterval = 1; /* by default, interval 1s */ + } + + tools_cJSON *resetCache = + tools_cJSON_GetObjectItem(json, "reset_query_cache"); + if (tools_cJSON_IsString(resetCache)) { + if (0 == strcasecmp(resetCache->valuestring, "yes")) { + g_queryInfo.reset_query_cache = true; + } + } else { + g_queryInfo.reset_query_cache = false; + } + + tools_cJSON *respBuffer = + tools_cJSON_GetObjectItem(json, "response_buffer"); + if (tools_cJSON_IsNumber(respBuffer)) { + g_queryInfo.response_buffer = respBuffer->valueint; + } else { + g_queryInfo.response_buffer = RESP_BUF_LEN; + } + + tools_cJSON *dbs = tools_cJSON_GetObjectItem(json, "databases"); + if (tools_cJSON_IsString(dbs)) { + g_queryInfo.dbName = dbs->valuestring; + } + + tools_cJSON *queryMode = tools_cJSON_GetObjectItem(json, "query_mode"); + if (tools_cJSON_IsString(queryMode)) { + if (0 == strcasecmp(queryMode->valuestring, "rest")) { + g_queryInfo.iface = REST_IFACE; + } else if (0 == strcasecmp(queryMode->valuestring, "taosc")) { + g_queryInfo.iface = TAOSC_IFACE; + } else { + errorPrint("Invalid query_mode value: %s\n", + queryMode->valuestring); + goto PARSE_OVER; + } + } + // init sqls + g_queryInfo.specifiedQueryInfo.sqls = benchArrayInit(1, sizeof(SSQL)); + + // specified_table_query + tools_cJSON *specifiedQuery = + tools_cJSON_GetObjectItem(json, "specified_table_query"); + g_queryInfo.specifiedQueryInfo.concurrent = 1; + if (tools_cJSON_IsObject(specifiedQuery)) { + tools_cJSON *queryInterval = + tools_cJSON_GetObjectItem(specifiedQuery, "query_interval"); + if (tools_cJSON_IsNumber(queryInterval)) { + g_queryInfo.specifiedQueryInfo.queryInterval = + queryInterval->valueint; + } else { + g_queryInfo.specifiedQueryInfo.queryInterval = 0; + } + + tools_cJSON *specifiedQueryTimes = + tools_cJSON_GetObjectItem(specifiedQuery, "query_times"); + if (tools_cJSON_IsNumber(specifiedQueryTimes)) { + g_queryInfo.specifiedQueryInfo.queryTimes = + specifiedQueryTimes->valueint; + } else { + g_queryInfo.specifiedQueryInfo.queryTimes = g_queryInfo.query_times; + } + + tools_cJSON *mixedQueryObj = + tools_cJSON_GetObjectItem(specifiedQuery, "mixed_query"); + if (tools_cJSON_IsString(mixedQueryObj)) { + if (0 == strcasecmp(mixedQueryObj->valuestring, "yes")) { + g_queryInfo.specifiedQueryInfo.mixed_query = true; + } else if (0 == strcasecmp(mixedQueryObj->valuestring, "no")) { + g_queryInfo.specifiedQueryInfo.mixed_query = false; + } else { + errorPrint("Invalid mixed_query value: %s\n", + mixedQueryObj->valuestring); + goto PARSE_OVER; + } + } + + tools_cJSON *concurrent = + tools_cJSON_GetObjectItem(specifiedQuery, "concurrent"); + if (tools_cJSON_IsNumber(concurrent)) { + g_queryInfo.specifiedQueryInfo.concurrent = + (uint32_t)concurrent->valueint; + } + + tools_cJSON *threads = + tools_cJSON_GetObjectItem(specifiedQuery, "threads"); + if (tools_cJSON_IsNumber(threads)) { + g_queryInfo.specifiedQueryInfo.concurrent = + (uint32_t)threads->valueint; + } + + tools_cJSON *specifiedAsyncMode = + tools_cJSON_GetObjectItem(specifiedQuery, "mode"); + if (tools_cJSON_IsString(specifiedAsyncMode)) { + if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; + } else { + g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; + } + } else { + g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; + } + + tools_cJSON *subscribe_interval = + tools_cJSON_GetObjectItem(specifiedQuery, "subscribe_interval"); + if (tools_cJSON_IsNumber(subscribe_interval)) { + g_queryInfo.specifiedQueryInfo.subscribeInterval = + subscribe_interval->valueint; + } else { + g_queryInfo.specifiedQueryInfo.subscribeInterval = + DEFAULT_SUB_INTERVAL; + } + + tools_cJSON *specifiedSubscribeTimes = + tools_cJSON_GetObjectItem(specifiedQuery, "subscribe_times"); + if (tools_cJSON_IsNumber(specifiedSubscribeTimes)) { + g_queryInfo.specifiedQueryInfo.subscribeTimes = + specifiedSubscribeTimes->valueint; + } else { + g_queryInfo.specifiedQueryInfo.subscribeTimes = + g_queryInfo.query_times; + } + + tools_cJSON *restart = + tools_cJSON_GetObjectItem(specifiedQuery, "restart"); + if (tools_cJSON_IsString(restart)) { + if (0 == strcmp("no", restart->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeRestart = false; + } else { + g_queryInfo.specifiedQueryInfo.subscribeRestart = true; + } + } else { + g_queryInfo.specifiedQueryInfo.subscribeRestart = true; + } + + tools_cJSON *keepProgress = + tools_cJSON_GetObjectItem(specifiedQuery, "keepProgress"); + if (tools_cJSON_IsString(keepProgress)) { + if (0 == strcmp("yes", keepProgress->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1; + } else { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; + } + } else { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; + } + + // read sqls from file + tools_cJSON *sqlFileObj = + tools_cJSON_GetObjectItem(specifiedQuery, "sql_file"); + if (tools_cJSON_IsString(sqlFileObj)) { + FILE * fp = fopen(sqlFileObj->valuestring, "r"); + if (fp == NULL) { + errorPrint("failed to open file: %s\n", + sqlFileObj->valuestring); + goto PARSE_OVER; + } + char *buf = benchCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN, true); + while (fgets(buf, TSDB_MAX_ALLOWED_SQL_LEN, fp)) { + SSQL * sql = benchCalloc(1, sizeof(SSQL), true); + benchArrayPush(g_queryInfo.specifiedQueryInfo.sqls, sql); + sql = benchArrayGet(g_queryInfo.specifiedQueryInfo.sqls, + g_queryInfo.specifiedQueryInfo.sqls->size - 1); + int bufLen = strlen(buf) + 1; + sql->command = benchCalloc(1, bufLen, true); + sql->delay_list = benchCalloc( + g_queryInfo.specifiedQueryInfo.queryTimes + * g_queryInfo.specifiedQueryInfo.concurrent, + sizeof(int64_t), true); + tstrncpy(sql->command, buf, bufLen - 1); + debugPrint("read file buffer: %s\n", sql->command); + memset(buf, 0, TSDB_MAX_ALLOWED_SQL_LEN); + } + free(buf); + fclose(fp); + } + // sqls + tools_cJSON *specifiedSqls = + tools_cJSON_GetObjectItem(specifiedQuery, "sqls"); + if (tools_cJSON_IsArray(specifiedSqls)) { + int specifiedSqlSize = tools_cJSON_GetArraySize(specifiedSqls); + for (int j = 0; j < specifiedSqlSize; ++j) { + tools_cJSON *sqlObj = + tools_cJSON_GetArrayItem(specifiedSqls, j); + if (tools_cJSON_IsObject(sqlObj)) { + SSQL * sql = benchCalloc(1, sizeof(SSQL), true); + benchArrayPush(g_queryInfo.specifiedQueryInfo.sqls, sql); + sql = benchArrayGet(g_queryInfo.specifiedQueryInfo.sqls, + g_queryInfo.specifiedQueryInfo.sqls->size -1); + sql->delay_list = benchCalloc( + g_queryInfo.specifiedQueryInfo.queryTimes + * g_queryInfo.specifiedQueryInfo.concurrent, + sizeof(int64_t), true); + + tools_cJSON *sqlStr = + tools_cJSON_GetObjectItem(sqlObj, "sql"); + if (tools_cJSON_IsString(sqlStr)) { + int strLen = strlen(sqlStr->valuestring) + 1; + sql->command = benchCalloc(1, strLen, true); + tstrncpy(sql->command, sqlStr->valuestring, strLen); + // default value is -1, which mean infinite loop + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; + tools_cJSON *endAfterConsume = + tools_cJSON_GetObjectItem(specifiedQuery, + "endAfterConsume"); + if (tools_cJSON_IsNumber(endAfterConsume)) { + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = + (int)endAfterConsume->valueint; + } + if (g_queryInfo.specifiedQueryInfo + .endAfterConsume[j] < -1) { + g_queryInfo.specifiedQueryInfo + .endAfterConsume[j] = -1; + } + + g_queryInfo.specifiedQueryInfo + .resubAfterConsume[j] = -1; + tools_cJSON *resubAfterConsume = + tools_cJSON_GetObjectItem( + specifiedQuery, "resubAfterConsume"); + if (tools_cJSON_IsNumber(resubAfterConsume)) { + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] + = (int)resubAfterConsume->valueint; + } + + if (g_queryInfo.specifiedQueryInfo + .resubAfterConsume[j] < -1) + g_queryInfo.specifiedQueryInfo + .resubAfterConsume[j] = -1; + + tools_cJSON *result = + tools_cJSON_GetObjectItem(sqlObj, "result"); + if (tools_cJSON_IsString(result)) { + tstrncpy(sql->result, result->valuestring, + MAX_FILE_NAME_LEN); + } else { + memset(sql->result, 0, MAX_FILE_NAME_LEN); + } + } else { + errorPrint("%s", "Invalid sql in json\n"); + goto PARSE_OVER; + } + } + } + } + } + + // super_table_query + tools_cJSON *superQuery = + tools_cJSON_GetObjectItem(json, "super_table_query"); + g_queryInfo.superQueryInfo.threadCnt = 1; + if (!superQuery || superQuery->type != tools_cJSON_Object) { + g_queryInfo.superQueryInfo.sqlCount = 0; + } else { + tools_cJSON *subrate = + tools_cJSON_GetObjectItem(superQuery, "query_interval"); + if (subrate && subrate->type == tools_cJSON_Number) { + g_queryInfo.superQueryInfo.queryInterval = subrate->valueint; + } else { + g_queryInfo.superQueryInfo.queryInterval = 0; + } + + tools_cJSON *superQueryTimes = + tools_cJSON_GetObjectItem(superQuery, "query_times"); + if (superQueryTimes && superQueryTimes->type == tools_cJSON_Number) { + g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; + } else { + g_queryInfo.superQueryInfo.queryTimes = g_queryInfo.query_times; + } + + tools_cJSON *concurrent = + tools_cJSON_GetObjectItem(superQuery, "concurrent"); + if (concurrent && concurrent->type == tools_cJSON_Number) { + g_queryInfo.superQueryInfo.threadCnt = + (uint32_t)concurrent->valueint; + } + + tools_cJSON *threads = tools_cJSON_GetObjectItem(superQuery, "threads"); + if (threads && threads->type == tools_cJSON_Number) { + g_queryInfo.superQueryInfo.threadCnt = (uint32_t)threads->valueint; + } + + tools_cJSON *stblname = + tools_cJSON_GetObjectItem(superQuery, "stblname"); + if (stblname && stblname->type == tools_cJSON_String + && stblname->valuestring != NULL) { + tstrncpy(g_queryInfo.superQueryInfo.stbName, + stblname->valuestring, + TSDB_TABLE_NAME_LEN); + } + + tools_cJSON *superAsyncMode = + tools_cJSON_GetObjectItem(superQuery, "mode"); + if (superAsyncMode && superAsyncMode->type == tools_cJSON_String + && superAsyncMode->valuestring != NULL) { + if (0 == strcmp("async", superAsyncMode->valuestring)) { + g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; + } else { + g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; + } + } else { + g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; + } + + tools_cJSON *superInterval = + tools_cJSON_GetObjectItem(superQuery, "interval"); + if (superInterval && superInterval->type == tools_cJSON_Number) { + g_queryInfo.superQueryInfo.subscribeInterval = + superInterval->valueint; + } else { + g_queryInfo.superQueryInfo.subscribeInterval = + DEFAULT_QUERY_INTERVAL; + } + + tools_cJSON *subrestart = + tools_cJSON_GetObjectItem(superQuery, "restart"); + if (subrestart && subrestart->type == tools_cJSON_String + && subrestart->valuestring != NULL) { + if (0 == strcmp("no", subrestart->valuestring)) { + g_queryInfo.superQueryInfo.subscribeRestart = false; + } else { + g_queryInfo.superQueryInfo.subscribeRestart = true; + } + } else { + g_queryInfo.superQueryInfo.subscribeRestart = true; + } + + tools_cJSON *superkeepProgress = + tools_cJSON_GetObjectItem(superQuery, "keepProgress"); + if (superkeepProgress && superkeepProgress->type == tools_cJSON_String + && superkeepProgress->valuestring != NULL) { + if (0 == strcmp("yes", superkeepProgress->valuestring)) { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; + } else { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; + } + } else { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; + } + + // default value is -1, which mean do not resub + g_queryInfo.superQueryInfo.endAfterConsume = -1; + tools_cJSON *superEndAfterConsume = + tools_cJSON_GetObjectItem(superQuery, "endAfterConsume"); + if (superEndAfterConsume && + superEndAfterConsume->type == tools_cJSON_Number) { + g_queryInfo.superQueryInfo.endAfterConsume = + (int)superEndAfterConsume->valueint; + } + if (g_queryInfo.superQueryInfo.endAfterConsume < -1) + g_queryInfo.superQueryInfo.endAfterConsume = -1; + + // default value is -1, which mean do not resub + g_queryInfo.superQueryInfo.resubAfterConsume = -1; + tools_cJSON *superResubAfterConsume = + tools_cJSON_GetObjectItem(superQuery, "resubAfterConsume"); + if ((superResubAfterConsume) && + (superResubAfterConsume->type == tools_cJSON_Number) && + (superResubAfterConsume->valueint >= 0)) { + g_queryInfo.superQueryInfo.resubAfterConsume = + (int)superResubAfterConsume->valueint; + } + if (g_queryInfo.superQueryInfo.resubAfterConsume < -1) + g_queryInfo.superQueryInfo.resubAfterConsume = -1; + + // supert table sqls + tools_cJSON *superSqls = tools_cJSON_GetObjectItem(superQuery, "sqls"); + if (!superSqls || superSqls->type != tools_cJSON_Array) { + g_queryInfo.superQueryInfo.sqlCount = 0; + } else { + int superSqlSize = tools_cJSON_GetArraySize(superSqls); + if (superSqlSize > MAX_QUERY_SQL_COUNT) { + errorPrint( + "failed to read json, query sql size overflow, max is %d\n", + MAX_QUERY_SQL_COUNT); + goto PARSE_OVER; + } + + g_queryInfo.superQueryInfo.sqlCount = superSqlSize; + for (int j = 0; j < superSqlSize; ++j) { + tools_cJSON *sql = tools_cJSON_GetArrayItem(superSqls, j); + if (sql == NULL) continue; + + tools_cJSON *sqlStr = tools_cJSON_GetObjectItem(sql, "sql"); + if (sqlStr && sqlStr->type == tools_cJSON_String) { + tstrncpy(g_queryInfo.superQueryInfo.sql[j], + sqlStr->valuestring, TSDB_MAX_ALLOWED_SQL_LEN); + } + + tools_cJSON *result = tools_cJSON_GetObjectItem(sql, "result"); + if (result != NULL && result->type == tools_cJSON_String + && result->valuestring != NULL) { + tstrncpy(g_queryInfo.superQueryInfo.result[j], + result->valuestring, MAX_FILE_NAME_LEN); + } else { + memset(g_queryInfo.superQueryInfo.result[j], 0, + MAX_FILE_NAME_LEN); + } + } + } + } + + code = 0; + +PARSE_OVER: + return code; +} + +#ifdef TD_VER_COMPATIBLE_3_0_0_0 +static int getMetaFromTmqJsonFile(tools_cJSON *json) { + int32_t code = -1; + + tools_cJSON *cfgdir = tools_cJSON_GetObjectItem(json, "cfgdir"); + if (tools_cJSON_IsString(cfgdir)) { + tstrncpy(g_configDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); + } + +#ifdef LINUX + if (strlen(g_configDir)) { + wordexp_t full_path; + if (wordexp(g_configDir, &full_path, 0) != 0) { + errorPrint("Invalid path %s\n", g_configDir); + exit(EXIT_FAILURE); + } + taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); + wordfree(&full_path); + } +#endif + + tools_cJSON *resultfile = tools_cJSON_GetObjectItem(json, "result_file"); + if (resultfile && resultfile->type == tools_cJSON_String + && resultfile->valuestring != NULL) { + g_arguments->output_file = resultfile->valuestring; + } + + tools_cJSON *answerPrompt = + tools_cJSON_GetObjectItem(json, + "confirm_parameter_prompt"); // yes, no, + if (tools_cJSON_IsString(answerPrompt)) { + if (0 == strcasecmp(answerPrompt->valuestring, "no")) { + g_arguments->answer_yes = true; + } + } + + // consumer info + tools_cJSON *tmqInfo = tools_cJSON_GetObjectItem(json, "tmq_info"); + g_tmqInfo.consumerInfo.concurrent = 1; + + tools_cJSON *concurrent = tools_cJSON_GetObjectItem(tmqInfo, "concurrent"); + if (tools_cJSON_IsNumber(concurrent)) { + g_tmqInfo.consumerInfo.concurrent = (uint32_t)concurrent->valueint; + } + + // sequential, parallel + tools_cJSON *createMode = tools_cJSON_GetObjectItem(tmqInfo, "create_mode"); + if (tools_cJSON_IsString(createMode)) { + g_tmqInfo.consumerInfo.createMode = createMode->valuestring; + } + + // share, independent + tools_cJSON *groupMode = tools_cJSON_GetObjectItem(tmqInfo, "group_mode"); + if (tools_cJSON_IsString(groupMode)) { + g_tmqInfo.consumerInfo.groupMode = groupMode->valuestring; + } + + + tools_cJSON *pollDelay = tools_cJSON_GetObjectItem(tmqInfo, "poll_delay"); + if (tools_cJSON_IsNumber(pollDelay)) { + g_tmqInfo.consumerInfo.pollDelay = (uint32_t)pollDelay->valueint; + } + + tools_cJSON *autoCommitInterval = tools_cJSON_GetObjectItem( + tmqInfo, "auto.commit.interval.ms"); + if (tools_cJSON_IsNumber(autoCommitInterval)) { + g_tmqInfo.consumerInfo.autoCommitIntervalMs = + (uint32_t)autoCommitInterval->valueint; + } + + tools_cJSON *groupId = tools_cJSON_GetObjectItem(tmqInfo, "group.id"); + if (tools_cJSON_IsString(groupId)) { + g_tmqInfo.consumerInfo.groupId = groupId->valuestring; + } + + tools_cJSON *clientId = tools_cJSON_GetObjectItem(tmqInfo, "client.id"); + if (tools_cJSON_IsString(clientId)) { + g_tmqInfo.consumerInfo.clientId = clientId->valuestring; + } + + tools_cJSON *autoOffsetReset = tools_cJSON_GetObjectItem( + tmqInfo, "auto.offset.reset"); + if (tools_cJSON_IsString(autoOffsetReset)) { + g_tmqInfo.consumerInfo.autoOffsetReset = autoOffsetReset->valuestring; + } + + tools_cJSON *enableAutoCommit = tools_cJSON_GetObjectItem( + tmqInfo, "enable.auto.commit"); + if (tools_cJSON_IsString(enableAutoCommit)) { + g_tmqInfo.consumerInfo.enableAutoCommit = enableAutoCommit->valuestring; + } + + tools_cJSON *enableManualCommit = tools_cJSON_GetObjectItem( + tmqInfo, "enable.manual.commit"); + if (tools_cJSON_IsString(enableManualCommit)) { + g_tmqInfo.consumerInfo.enableManualCommit = + enableManualCommit->valuestring; + } + + tools_cJSON *enableHeartbeatBackground = tools_cJSON_GetObjectItem( + tmqInfo, "enable.heartbeat.background"); + if (tools_cJSON_IsString(enableHeartbeatBackground)) { + g_tmqInfo.consumerInfo.enableHeartbeatBackground = + enableHeartbeatBackground->valuestring; + } + + tools_cJSON *snapshotEnable = tools_cJSON_GetObjectItem( + tmqInfo, "experimental.snapshot.enable"); + if (tools_cJSON_IsString(snapshotEnable)) { + g_tmqInfo.consumerInfo.snapshotEnable = snapshotEnable->valuestring; + } + + tools_cJSON *msgWithTableName = tools_cJSON_GetObjectItem( + tmqInfo, "msg.with.table.name"); + if (tools_cJSON_IsString(msgWithTableName)) { + g_tmqInfo.consumerInfo.msgWithTableName = msgWithTableName->valuestring; + } + + tools_cJSON *rowsFile = tools_cJSON_GetObjectItem(tmqInfo, "rows_file"); + if (tools_cJSON_IsString(rowsFile)) { + g_tmqInfo.consumerInfo.rowsFile = rowsFile->valuestring; + } + + g_tmqInfo.consumerInfo.expectRows = -1; + tools_cJSON *expectRows = tools_cJSON_GetObjectItem(tmqInfo, "expect_rows"); + if (tools_cJSON_IsNumber(expectRows)) { + g_tmqInfo.consumerInfo.expectRows = (uint32_t)expectRows->valueint; + } + + tools_cJSON *topicList = tools_cJSON_GetObjectItem(tmqInfo, "topic_list"); + if (tools_cJSON_IsArray(topicList)) { + int topicCount = tools_cJSON_GetArraySize(topicList); + for (int j = 0; j < topicCount; ++j) { + tools_cJSON *topicObj = tools_cJSON_GetArrayItem(topicList, j); + if (tools_cJSON_IsObject(topicObj)) { + tools_cJSON *topicName = tools_cJSON_GetObjectItem( + topicObj, "name"); + if (tools_cJSON_IsString(topicName)) { + // int strLen = strlen(topicName->valuestring) + 1; + tstrncpy(g_tmqInfo.consumerInfo.topicName[ + g_tmqInfo.consumerInfo.topicCount], + topicName->valuestring, 255); + + } else { + errorPrint("%s", "Invalid topic name in json\n"); + goto TMQ_PARSE_OVER; + } + + tools_cJSON *sqlString = tools_cJSON_GetObjectItem( + topicObj, "sql"); + if (tools_cJSON_IsString(sqlString)) { + // int strLen = strlen(sqlString->valuestring) + 1; + tstrncpy(g_tmqInfo.consumerInfo.topicSql[ + g_tmqInfo.consumerInfo.topicCount], + sqlString->valuestring, 255); + + } else { + errorPrint("%s", "Invalid topic sql in json\n"); + goto TMQ_PARSE_OVER; + } + g_tmqInfo.consumerInfo.topicCount++; + } + } + } + code = 0; +TMQ_PARSE_OVER: + return code; +} +#endif + +int getInfoFromJsonFile() { + char * file = g_arguments->metaFile; + int32_t code = -1; + FILE * fp = fopen(file, "r"); + if (!fp) { + errorPrint("failed to read %s, reason:%s\n", file, + strerror(errno)); + return code; + } + + int maxLen = MAX_JSON_BUFF; + char *content = benchCalloc(1, maxLen + 1, false); + int len = (int)fread(content, 1, maxLen, fp); + if (len <= 0) { + errorPrint("failed to read %s, content is null", file); + goto PARSE_OVER; + } + + content[len] = 0; + root = tools_cJSON_Parse(content); + if (root == NULL) { + errorPrint("failed to cjson parse %s, invalid json format\n", + file); + goto PARSE_OVER; + } + + char *pstr = tools_cJSON_Print(root); + infoPrint("%s\n%s\n", file, pstr); + tmfree(pstr); + + tools_cJSON *filetype = tools_cJSON_GetObjectItem(root, "filetype"); + if (tools_cJSON_IsString(filetype)) { + if (0 == strcasecmp("insert", filetype->valuestring)) { + g_arguments->test_mode = INSERT_TEST; + } else if (0 == strcasecmp("query", filetype->valuestring)) { + g_arguments->test_mode = QUERY_TEST; + } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { + g_arguments->test_mode = SUBSCRIBE_TEST; + } else { + errorPrint("%s", + "failed to read json, filetype not support\n"); + goto PARSE_OVER; + } + } else { + g_arguments->test_mode = INSERT_TEST; + } + + // read common item + code = getMetaFromCommonJsonFile(root); + if (INSERT_TEST == g_arguments->test_mode) { + code = getMetaFromInsertJsonFile(root); +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + } else if (QUERY_TEST == g_arguments->test_mode) { +#else + } else { +#endif + memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); + code = getMetaFromQueryJsonFile(root); +#ifdef TD_VER_COMPATIBLE_3_0_0_0 + } else if (SUBSCRIBE_TEST == g_arguments->test_mode) { + memset(&g_tmqInfo, 0, sizeof(STmqMetaInfo)); + code = getMetaFromTmqJsonFile(root); +#endif + } +PARSE_OVER: + free(content); + fclose(fp); + return code; +} diff --git a/src/benchTmq.c b/src/benchTmq.c index 4de47719..861f2c8d 100644 --- a/src/benchTmq.c +++ b/src/benchTmq.c @@ -136,8 +136,86 @@ static int32_t data_msg_process(TAOS_RES* msg, tmqThreadInfo* pInfo, int32_t msg return totalRows; } +int buildConsumerAndSubscribe(tmqThreadInfo * pThreadInfo, char* groupId) { + int ret = 0; + char tmpBuff[128] = {0}; + + SConsumerInfo* pConsumerInfo = &g_tmqInfo.consumerInfo; + + tmq_list_t * topic_list = buildTopicList(); + + tmq_conf_t * conf = tmq_conf_new(); + + tmq_conf_set(conf, "td.connect.user", g_arguments->user); + tmq_conf_set(conf, "td.connect.pass", g_arguments->password); + tmq_conf_set(conf, "td.connect.ip", g_arguments->host); + + memset(tmpBuff, 0, sizeof(tmpBuff)); + snprintf(tmpBuff, 16, "%d", g_arguments->port); + tmq_conf_set(conf, "td.connect.port", tmpBuff); + + tmq_conf_set(conf, "group.id", groupId); + + memset(tmpBuff, 0, sizeof(tmpBuff)); + snprintf(tmpBuff, 16, "%s_%d", pConsumerInfo->clientId, pThreadInfo->id); + tmq_conf_set(conf, "client.id", tmpBuff); + + tmq_conf_set(conf, "auto.offset.reset", pConsumerInfo->autoOffsetReset); + tmq_conf_set(conf, "enable.auto.commit", pConsumerInfo->enableAutoCommit); + + memset(tmpBuff, 0, sizeof(tmpBuff)); + snprintf(tmpBuff, 16, "%d", pConsumerInfo->autoCommitIntervalMs); + tmq_conf_set(conf, "auto.commit.interval.ms", tmpBuff); + + tmq_conf_set(conf, "enable.heartbeat.background", pConsumerInfo->enableHeartbeatBackground); + tmq_conf_set(conf, "experimental.snapshot.enable", pConsumerInfo->snapshotEnable); + tmq_conf_set(conf, "msg.with.table.name", pConsumerInfo->msgWithTableName); + + pThreadInfo->tmq = tmq_consumer_new(conf, NULL, 0); + tmq_conf_destroy(conf); + if (pThreadInfo->tmq == NULL) { + errorPrint("%s", "failed to execute tmq_consumer_new\n"); + ret = -1; + tmq_list_destroy(topic_list); + return ret; + } + infoPrint("thread[%d]: successfully create consumer\n", pThreadInfo->id); + + int32_t code = tmq_subscribe(pThreadInfo->tmq, topic_list); + if (code) { + errorPrint("failed to execute tmq_subscribe, reason: %s\n", tmq_err2str(code)); + ret = -1; + tmq_list_destroy(topic_list); + return ret; + } + infoPrint("thread[%d]: successfully subscribe topics\n", pThreadInfo->id); + tmq_list_destroy(topic_list); + + return ret; +} + static void* tmqConsume(void* arg) { - tmqThreadInfo *pThreadInfo = (tmqThreadInfo*)arg; + tmqThreadInfo* pThreadInfo = (tmqThreadInfo*)arg; + SConsumerInfo* pConsumerInfo = &g_tmqInfo.consumerInfo; + + // "sequential" or "parallel" + if (0 != strncasecmp(pConsumerInfo->createMode, "sequential", 10)) { + + // "share" or "independent" + char groupId[16] = {0}; + if (0 != strncasecmp(pConsumerInfo->groupMode, "share", 5)) { + + if ((NULL == pConsumerInfo->groupId) || (0 == strlen(pConsumerInfo->groupId))) { + // rand string + memset(groupId, 0, sizeof(groupId)); + rand_string(groupId, sizeof(groupId) - 1, 0); + infoPrint("consumer id: %d generate rand group id: %s\n", pThreadInfo->id, groupId); + //pConsumerInfo->groupId = groupId; + } + } + + buildConsumerAndSubscribe(pThreadInfo, groupId); + } int64_t totalMsgs = 0; int64_t totalRows = 0; @@ -145,7 +223,7 @@ static void* tmqConsume(void* arg) { infoPrint("consumer id %d start to loop pull msg\n", pThreadInfo->id); - if ((NULL != g_tmqInfo.consumerInfo.enableManualCommit) && (0 == strncmp("true", g_tmqInfo.consumerInfo.enableManualCommit, 4))) { + if ((NULL != pConsumerInfo->enableManualCommit) && (0 == strncmp("true", pConsumerInfo->enableManualCommit, 4))) { manualCommit = 1; infoPrint("consumer id %d enable manual commit\n", pThreadInfo->id); } @@ -154,7 +232,7 @@ static void* tmqConsume(void* arg) { int64_t lastTotalRows = 0; uint64_t lastPrintTime = toolsGetTimestampMs(); - int32_t consumeDelay = g_tmqInfo.consumerInfo.pollDelay == -1 ? -1 : g_tmqInfo.consumerInfo.pollDelay; + int32_t consumeDelay = pConsumerInfo->pollDelay == -1 ? -1 : pConsumerInfo->pollDelay; while (running) { TAOS_RES* tmqMsg = tmq_consumer_poll(pThreadInfo->tmq, consumeDelay); if (tmqMsg) { @@ -184,8 +262,8 @@ static void* tmqConsume(void* arg) { lastTotalRows = totalRows; } - if ((g_tmqInfo.consumerInfo.expectRows > 0) && (totalRows > g_tmqInfo.consumerInfo.expectRows)) { - infoPrint("consumer id %d consumed rows: %" PRId64 " over than expect rows: %d, exit consume\n", pThreadInfo->id, totalRows, g_tmqInfo.consumerInfo.expectRows); + if ((pConsumerInfo->expectRows > 0) && (totalRows > pConsumerInfo->expectRows)) { + infoPrint("consumer id %d consumed rows: %" PRId64 " over than expect rows: %d, exit consume\n", pThreadInfo->id, totalRows, pConsumerInfo->expectRows); break; } } else { @@ -217,6 +295,7 @@ static void* tmqConsume(void* arg) { return NULL; } + int subscribeTestProcess() { printfTmqConfigIntoFile(); int ret = 0; @@ -227,17 +306,18 @@ int subscribeTestProcess() { } } - tmq_list_t * topic_list = buildTopicList(); - - char groupId[16] = {0}; - if ((NULL == pConsumerInfo->groupId) || (0 == strlen(pConsumerInfo->groupId))) { - // rand string - memset(groupId, 0, sizeof(groupId)); - rand_string(groupId, sizeof(groupId) - 1, 0); - infoPrint("rand generate group id: %s\n", groupId); - pConsumerInfo->groupId = groupId; - } - + // "share" or "independent" + if (0 == strncasecmp(pConsumerInfo->groupMode, "share", 5)) { + char groupId[16] = {0}; + if ((NULL == pConsumerInfo->groupId) || (0 == strlen(pConsumerInfo->groupId))) { + // rand string + memset(groupId, 0, sizeof(groupId)); + rand_string(groupId, sizeof(groupId) - 1, 0); + infoPrint("rand generate group id: %s\n", groupId); + pConsumerInfo->groupId = groupId; + } + } + pthread_t * pids = benchCalloc(pConsumerInfo->concurrent, sizeof(pthread_t), true); tmqThreadInfo *infos = benchCalloc(pConsumerInfo->concurrent, sizeof(tmqThreadInfo), true); @@ -259,48 +339,11 @@ int subscribeTestProcess() { goto tmq_over; } } - - tmq_conf_t * conf = tmq_conf_new(); - tmq_conf_set(conf, "td.connect.user", g_arguments->user); - tmq_conf_set(conf, "td.connect.pass", g_arguments->password); - tmq_conf_set(conf, "td.connect.ip", g_arguments->host); - - memset(tmpBuff, 0, sizeof(tmpBuff)); - snprintf(tmpBuff, 16, "%d", g_arguments->port); - tmq_conf_set(conf, "td.connect.port", tmpBuff); - - tmq_conf_set(conf, "group.id", pConsumerInfo->groupId); - - memset(tmpBuff, 0, sizeof(tmpBuff)); - snprintf(tmpBuff, 16, "%s_%d", pConsumerInfo->clientId, i); - tmq_conf_set(conf, "client.id", tmpBuff); - - tmq_conf_set(conf, "auto.offset.reset", pConsumerInfo->autoOffsetReset); - tmq_conf_set(conf, "enable.auto.commit", pConsumerInfo->enableAutoCommit); - - memset(tmpBuff, 0, sizeof(tmpBuff)); - snprintf(tmpBuff, 16, "%d", pConsumerInfo->autoCommitIntervalMs); - tmq_conf_set(conf, "auto.commit.interval.ms", tmpBuff); - - tmq_conf_set(conf, "enable.heartbeat.background", pConsumerInfo->enableHeartbeatBackground); - tmq_conf_set(conf, "experimental.snapshot.enable", pConsumerInfo->snapshotEnable); - tmq_conf_set(conf, "msg.with.table.name", pConsumerInfo->msgWithTableName); - - pThreadInfo->tmq = tmq_consumer_new(conf, NULL, 0); - tmq_conf_destroy(conf); - if (pThreadInfo->tmq == NULL) { - errorPrint("%s", "failed to execute tmq_consumer_new\n"); - ret = -1; - goto tmq_over; - } - infoPrint("thread[%d]: successfully create consumer\n", i); - int32_t code = tmq_subscribe(pThreadInfo->tmq, topic_list); - if (code) { - errorPrint("failed to execute tmq_subscribe, reason: %s\n", tmq_err2str(code)); - ret = -1; - goto tmq_over; - } - infoPrint("thread[%d]: successfully subscribe topics\n", i); + + // "sequential" or "parallel" + if (0 == strncasecmp(pConsumerInfo->createMode, "sequential", 10)) { + buildConsumerAndSubscribe(pThreadInfo, pConsumerInfo->groupId); + } pthread_create(pids + i, NULL, tmqConsume, pThreadInfo); } @@ -332,6 +375,5 @@ int subscribeTestProcess() { tmq_over: free(pids); free(infos); - tmq_list_destroy(topic_list); return ret; }