diff --git a/.github/workflows/taoskeeper-ci.yml b/.github/workflows/taoskeeper-ci.yml new file mode 100644 index 00000000000..fbc662ffb21 --- /dev/null +++ b/.github/workflows/taoskeeper-ci.yml @@ -0,0 +1,58 @@ +name: TaosKeeper CI + +on: + push: + paths: + - tools/keeper/** + +jobs: + build: + runs-on: ubuntu-latest + name: Run unit tests + + steps: + - name: Checkout the repository + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: 1.18 + + - name: Install system dependencies + run: | + sudo apt update -y + sudo apt install -y build-essential cmake libgeos-dev + + - name: Install TDengine + run: | + mkdir debug + cd debug + cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off -DBUILD_KEEPER=true -DBUILD_DEPENDENCY_TESTS=false + make -j 4 + sudo make install + which taosd + which taosadapter + which taoskeeper + + - name: Start taosd + run: | + cp /etc/taos/taos.cfg ./ + sudo echo "supportVnodes 256" >> taos.cfg + nohup sudo taosd -c taos.cfg & + + - name: Start taosadapter + run: nohup sudo taosadapter & + + - name: Run tests with coverage + working-directory: tools/keeper + run: | + go mod tidy + sudo go test -v -ldflags="-X 'github.com/taosdata/taoskeeper/version.IsEnterprise=true'" -coverpkg=./... -coverprofile=coverage.out ./... + go tool cover -func=coverage.out + + - name: Clean up + if: always() + run: | + if pgrep taosd; then sudo pkill taosd; fi + if pgrep taosadapter; then sudo pkill taosadapter; fi diff --git a/.gitignore b/.gitignore index b849df0d063..8f461f2b022 100644 --- a/.gitignore +++ b/.gitignore @@ -159,4 +159,6 @@ pcre2.h zconf.h version.h geos_c.h - +source/libs/parser/src/sql.c +include/common/ttokenauto.h +!packaging/smokeTest/pytest_require.txt diff --git a/README-CN.md b/README-CN.md index 06ac087859c..1f785eb4584 100644 --- a/README-CN.md +++ b/README-CN.md @@ -348,7 +348,7 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java # 成为社区贡献者 -点击 [这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。 +点击 [这里](https://www.taosdata.com/contributor),了解如何成为 TDengine 的贡献者。 # 加入技术交流群 diff --git a/cmake/cmake.define b/cmake/cmake.define index 8b762011a48..ff582261b30 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -1,6 +1,7 @@ cmake_minimum_required(VERSION 3.0) set(CMAKE_VERBOSE_MAKEFILE FALSE) set(TD_BUILD_TAOSA_INTERNAL FALSE) +set(TD_BUILD_KEEPER_INTERNAL FALSE) # set output directory SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib) @@ -57,6 +58,19 @@ IF(TD_BUILD_HTTP) ADD_DEFINITIONS(-DHTTP_EMBEDDED) ENDIF() +IF("${BUILD_KEEPER}" STREQUAL "") + SET(TD_BUILD_KEEPER FALSE) +ELSEIF(${BUILD_KEEPER} MATCHES "false") + SET(TD_BUILD_KEEPER FALSE) +ELSEIF(${BUILD_KEEPER} MATCHES "true") + SET(TD_BUILD_KEEPER TRUE) +ELSEIF(${BUILD_KEEPER} MATCHES "internal") + SET(TD_BUILD_KEEPER FALSE) + SET(TD_BUILD_KEEPER_INTERNAL TRUE) +ELSE() + SET(TD_BUILD_KEEPER FALSE) +ENDIF() + IF("${BUILD_TOOLS}" STREQUAL "") IF(TD_LINUX) IF(TD_ARM_32) @@ -177,48 +191,11 @@ ELSE() SET(COMPILER_SUPPORT_AVX512VL false) ELSE() CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA) + CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX) + CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2) CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F) CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI) CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL) - - INCLUDE(CheckCSourceRuns) - SET(CMAKE_REQUIRED_FLAGS "-mavx") - check_c_source_runs(" - #include - int main() { - __m256d a, b, c; - double buf[4] = {0}; - a = _mm256_loadu_pd(buf); - b = _mm256_loadu_pd(buf); - c = _mm256_add_pd(a, b); - _mm256_storeu_pd(buf, c); - for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) { - IF (buf[i] != 0) { - return 1; - } - } - return 0; - } - " COMPILER_SUPPORT_AVX) - - SET(CMAKE_REQUIRED_FLAGS "-mavx2") - check_c_source_runs(" - #include - int main() { - __m256i a, b, c; - int buf[8] = {0}; - a = _mm256_loadu_si256((__m256i *)buf); - b = _mm256_loadu_si256((__m256i *)buf); - c = _mm256_and_si256(a, b); - _mm256_storeu_si256((__m256i *)buf, c); - for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) { - IF (buf[i] != 0) { - return 1; - } - } - return 0; - } - " COMPILER_SUPPORT_AVX2) ENDIF() IF(COMPILER_SUPPORT_SSE42) diff --git a/cmake/lemon_CMakeLists.txt.in b/cmake/lemon_CMakeLists.txt.in new file mode 100644 index 00000000000..26226d3eda8 --- /dev/null +++ b/cmake/lemon_CMakeLists.txt.in @@ -0,0 +1,11 @@ +# lemon + +ExternalProject_Add( + lemon + SOURCE_DIR ${TD_CONTRIB_DIR}/lemon + CONFIGURE_COMMAND "" + BUILD_COMMAND "${C_COMPILER_LEMON}" -o ${TD_CONTRIB_DIR}/lemon/lemon ${TD_CONTRIB_DIR}/lemon/lemon.c + INSTALL_COMMAND "" + BUILD_IN_SOURCE 1 + BUILD_ALWAYS 1 +) \ No newline at end of file diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index eae697560b6..7741ae4e14f 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -184,6 +184,17 @@ if(${BUILD_PCRE2}) cat("${TD_SUPPORT_DIR}/pcre2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif() +find_program(C_COMPILER_LEMON NAMES gcc) +if(C_COMPILER_LEMON) + message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}") +else() + set(C_COMPILER_LEMON ${CMAKE_C_COMPILER}) + message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}") +endif() + +# lemon +cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + # download dependencies configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . @@ -261,11 +272,19 @@ if(${TD_DARWIN}) endif(${TD_DARWIN}) add_subdirectory(zlib EXCLUDE_FROM_ALL) + +if(${TD_DARWIN}) + target_compile_options(zlibstatic PRIVATE -Wno-error=deprecated-non-prototype) +endif() target_include_directories( zlibstatic PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/zlib ) + +if(${TD_DARWIN}) + target_compile_options(zlib PRIVATE -Wno-error=deprecated-non-prototype) +endif() target_include_directories( zlib PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib diff --git a/contrib/lemon/lemon.c b/contrib/lemon/lemon.c new file mode 100644 index 00000000000..d92df2a1a7b --- /dev/null +++ b/contrib/lemon/lemon.c @@ -0,0 +1,6038 @@ +/* +** This file contains all sources (including headers) to the LEMON +** LALR(1) parser generator. The sources have been combined into a +** single file to make it easy to include LEMON in the source tree +** and Makefile of another program. +** +** The author of this program disclaims copyright. +*/ +#include +#include +#include +#include +#include +#include + +#define ISSPACE(X) isspace((unsigned char)(X)) +#define ISDIGIT(X) isdigit((unsigned char)(X)) +#define ISALNUM(X) isalnum((unsigned char)(X)) +#define ISALPHA(X) isalpha((unsigned char)(X)) +#define ISUPPER(X) isupper((unsigned char)(X)) +#define ISLOWER(X) islower((unsigned char)(X)) + + +#ifndef __WIN32__ +# if defined(_WIN32) || defined(WIN32) +# define __WIN32__ +# endif +#endif + +#ifdef __WIN32__ +#ifdef __cplusplus +extern "C" { +#endif +extern int access(const char *path, int mode); +#ifdef __cplusplus +} +#endif +#else +#include +#endif + +/* #define PRIVATE static */ +#define PRIVATE + +#ifdef TEST +#define MAXRHS 5 /* Set low to exercise exception code */ +#else +#define MAXRHS 1000 +#endif + +extern void memory_error(); +static int showPrecedenceConflict = 0; +static char *msort(char*,char**,int(*)(const char*,const char*)); + +/* +** Compilers are getting increasingly pedantic about type conversions +** as C evolves ever closer to Ada.... To work around the latest problems +** we have to define the following variant of strlen(). +*/ +#define lemonStrlen(X) ((int)strlen(X)) + +/* +** Header on the linked list of memory allocations. +*/ +typedef struct MemChunk MemChunk; +struct MemChunk { + MemChunk *pNext; + size_t sz; + /* Actually memory follows */ +}; + +/* +** Global linked list of all memory allocations. +*/ +static MemChunk *memChunkList = 0; + +/* +** Wrappers around malloc(), calloc(), realloc() and free(). +** +** All memory allocations are kept on a doubly-linked list. The +** lemon_free_all() function can be called prior to exit to clean +** up any memory leaks. +** +** This is not necessary. But compilers and getting increasingly +** fussy about memory leaks, even in command-line programs like Lemon +** where they do not matter. So this code is provided to hush the +** warnings. +*/ +static void *lemon_malloc(size_t nByte){ + MemChunk *p; + if( nByte<0 ) return 0; + p = malloc( nByte + sizeof(MemChunk) ); + if( p==0 ){ + fprintf(stderr, "Out of memory. Failed to allocate %lld bytes.\n", + (long long int)nByte); + exit(1); + } + p->pNext = memChunkList; + p->sz = nByte; + memChunkList = p; + return (void*)&p[1]; +} +static void *lemon_calloc(size_t nElem, size_t sz){ + void *p = lemon_malloc(nElem*sz); + memset(p, 0, nElem*sz); + return p; +} +static void lemon_free(void *pOld){ + if( pOld ){ + MemChunk *p = (MemChunk*)pOld; + p--; + memset(pOld, 0, p->sz); + } +} +static void *lemon_realloc(void *pOld, size_t nNew){ + void *pNew; + MemChunk *p; + if( pOld==0 ) return lemon_malloc(nNew); + p = (MemChunk*)pOld; + p--; + if( p->sz>=nNew ) return pOld; + pNew = lemon_malloc( nNew ); + memcpy(pNew, pOld, p->sz); + return pNew; +} + +/* Free all outstanding memory allocations. +** Do this right before exiting. +*/ +static void lemon_free_all(void){ + while( memChunkList ){ + MemChunk *pNext = memChunkList->pNext; + free( memChunkList ); + memChunkList = pNext; + } +} + +/* +** Compilers are starting to complain about the use of sprintf() and strcpy(), +** saying they are unsafe. So we define our own versions of those routines too. +** +** There are three routines here: lemon_sprintf(), lemon_vsprintf(), and +** lemon_addtext(). The first two are replacements for sprintf() and vsprintf(). +** The third is a helper routine for vsnprintf() that adds texts to the end of a +** buffer, making sure the buffer is always zero-terminated. +** +** The string formatter is a minimal subset of stdlib sprintf() supporting only +** a few simply conversions: +** +** %d +** %s +** %.*s +** +*/ +static void lemon_addtext( + char *zBuf, /* The buffer to which text is added */ + int *pnUsed, /* Slots of the buffer used so far */ + const char *zIn, /* Text to add */ + int nIn, /* Bytes of text to add. -1 to use strlen() */ + int iWidth /* Field width. Negative to left justify */ +){ + if( nIn<0 ) for(nIn=0; zIn[nIn]; nIn++){} + while( iWidth>nIn ){ zBuf[(*pnUsed)++] = ' '; iWidth--; } + if( nIn==0 ) return; + memcpy(&zBuf[*pnUsed], zIn, nIn); + *pnUsed += nIn; + while( (-iWidth)>nIn ){ zBuf[(*pnUsed)++] = ' '; iWidth++; } + zBuf[*pnUsed] = 0; +} +static int lemon_vsprintf(char *str, const char *zFormat, va_list ap){ + int i, j, k, c; + int nUsed = 0; + const char *z; + char zTemp[50]; + str[0] = 0; + for(i=j=0; (c = zFormat[i])!=0; i++){ + if( c=='%' ){ + int iWidth = 0; + lemon_addtext(str, &nUsed, &zFormat[j], i-j, 0); + c = zFormat[++i]; + if( ISDIGIT(c) || (c=='-' && ISDIGIT(zFormat[i+1])) ){ + if( c=='-' ) i++; + while( ISDIGIT(zFormat[i]) ) iWidth = iWidth*10 + zFormat[i++] - '0'; + if( c=='-' ) iWidth = -iWidth; + c = zFormat[i]; + } + if( c=='d' ){ + int v = va_arg(ap, int); + if( v<0 ){ + lemon_addtext(str, &nUsed, "-", 1, iWidth); + v = -v; + }else if( v==0 ){ + lemon_addtext(str, &nUsed, "0", 1, iWidth); + } + k = 0; + while( v>0 ){ + k++; + zTemp[sizeof(zTemp)-k] = (v%10) + '0'; + v /= 10; + } + lemon_addtext(str, &nUsed, &zTemp[sizeof(zTemp)-k], k, iWidth); + }else if( c=='s' ){ + z = va_arg(ap, const char*); + lemon_addtext(str, &nUsed, z, -1, iWidth); + }else if( c=='.' && memcmp(&zFormat[i], ".*s", 3)==0 ){ + i += 2; + k = va_arg(ap, int); + z = va_arg(ap, const char*); + lemon_addtext(str, &nUsed, z, k, iWidth); + }else if( c=='%' ){ + lemon_addtext(str, &nUsed, "%", 1, 0); + }else{ + fprintf(stderr, "illegal format\n"); + exit(1); + } + j = i+1; + } + } + lemon_addtext(str, &nUsed, &zFormat[j], i-j, 0); + return nUsed; +} +static int lemon_sprintf(char *str, const char *format, ...){ + va_list ap; + int rc; + va_start(ap, format); + rc = lemon_vsprintf(str, format, ap); + va_end(ap); + return rc; +} +static void lemon_strcpy(char *dest, const char *src){ + while( (*(dest++) = *(src++))!=0 ){} +} +static void lemon_strcat(char *dest, const char *src){ + while( *dest ) dest++; + lemon_strcpy(dest, src); +} + + +/* a few forward declarations... */ +struct rule; +struct lemon; +struct action; + +static struct action *Action_new(void); +static struct action *Action_sort(struct action *); + +/********** From the file "build.h" ************************************/ +void FindRulePrecedences(struct lemon*); +void FindFirstSets(struct lemon*); +void FindStates(struct lemon*); +void FindLinks(struct lemon*); +void FindFollowSets(struct lemon*); +void FindActions(struct lemon*); + +/********* From the file "configlist.h" *********************************/ +void Configlist_init(void); +struct config *Configlist_add(struct rule *, int); +struct config *Configlist_addbasis(struct rule *, int); +void Configlist_closure(struct lemon *); +void Configlist_sort(void); +void Configlist_sortbasis(void); +struct config *Configlist_return(void); +struct config *Configlist_basis(void); +void Configlist_eat(struct config *); +void Configlist_reset(void); + +/********* From the file "error.h" ***************************************/ +void ErrorMsg(const char *, int,const char *, ...); + +/****** From the file "option.h" ******************************************/ +enum option_type { OPT_FLAG=1, OPT_INT, OPT_DBL, OPT_STR, + OPT_FFLAG, OPT_FINT, OPT_FDBL, OPT_FSTR}; +struct s_options { + enum option_type type; + const char *label; + char *arg; + const char *message; +}; +int OptInit(char**,struct s_options*,FILE*); +int OptNArgs(void); +char *OptArg(int); +void OptErr(int); +void OptPrint(void); + +/******** From the file "parse.h" *****************************************/ +void Parse(struct lemon *lemp); + +/********* From the file "plink.h" ***************************************/ +struct plink *Plink_new(void); +void Plink_add(struct plink **, struct config *); +void Plink_copy(struct plink **, struct plink *); +void Plink_delete(struct plink *); + +/********** From the file "report.h" *************************************/ +void Reprint(struct lemon *); +void ReportOutput(struct lemon *); +void ReportTable(struct lemon *, int, int); +void ReportHeader(struct lemon *); +void CompressTables(struct lemon *); +void ResortStates(struct lemon *); + +/********** From the file "set.h" ****************************************/ +void SetSize(int); /* All sets will be of size N */ +char *SetNew(void); /* A new set for element 0..N */ +void SetFree(char*); /* Deallocate a set */ +int SetAdd(char*,int); /* Add element to a set */ +int SetUnion(char *,char *); /* A <- A U B, thru element N */ +#define SetFind(X,Y) (X[Y]) /* True if Y is in set X */ + +/********** From the file "struct.h" *************************************/ +/* +** Principal data structures for the LEMON parser generator. +*/ + +typedef enum {LEMON_FALSE=0, LEMON_TRUE} Boolean; + +/* Symbols (terminals and nonterminals) of the grammar are stored +** in the following: */ +enum symbol_type { + TERMINAL, + NONTERMINAL, + MULTITERMINAL +}; +enum e_assoc { + LEFT, + RIGHT, + NONE, + UNK +}; +struct symbol { + const char *name; /* Name of the symbol */ + int index; /* Index number for this symbol */ + enum symbol_type type; /* Symbols are all either TERMINALS or NTs */ + struct rule *rule; /* Linked list of rules of this (if an NT) */ + struct symbol *fallback; /* fallback token in case this token doesn't parse */ + int prec; /* Precedence if defined (-1 otherwise) */ + enum e_assoc assoc; /* Associativity if precedence is defined */ + char *firstset; /* First-set for all rules of this symbol */ + Boolean lambda; /* True if NT and can generate an empty string */ + int useCnt; /* Number of times used */ + char *destructor; /* Code which executes whenever this symbol is + ** popped from the stack during error processing */ + int destLineno; /* Line number for start of destructor. Set to + ** -1 for duplicate destructors. */ + char *datatype; /* The data type of information held by this + ** object. Only used if type==NONTERMINAL */ + int dtnum; /* The data type number. In the parser, the value + ** stack is a union. The .yy%d element of this + ** union is the correct data type for this object */ + int bContent; /* True if this symbol ever carries content - if + ** it is ever more than just syntax */ + /* The following fields are used by MULTITERMINALs only */ + int nsubsym; /* Number of constituent symbols in the MULTI */ + struct symbol **subsym; /* Array of constituent symbols */ +}; + +/* Each production rule in the grammar is stored in the following +** structure. */ +struct rule { + struct symbol *lhs; /* Left-hand side of the rule */ + const char *lhsalias; /* Alias for the LHS (NULL if none) */ + int lhsStart; /* True if left-hand side is the start symbol */ + int ruleline; /* Line number for the rule */ + int nrhs; /* Number of RHS symbols */ + struct symbol **rhs; /* The RHS symbols */ + const char **rhsalias; /* An alias for each RHS symbol (NULL if none) */ + int line; /* Line number at which code begins */ + const char *code; /* The code executed when this rule is reduced */ + const char *codePrefix; /* Setup code before code[] above */ + const char *codeSuffix; /* Breakdown code after code[] above */ + struct symbol *precsym; /* Precedence symbol for this rule */ + int index; /* An index number for this rule */ + int iRule; /* Rule number as used in the generated tables */ + Boolean noCode; /* True if this rule has no associated C code */ + Boolean codeEmitted; /* True if the code has been emitted already */ + Boolean canReduce; /* True if this rule is ever reduced */ + Boolean doesReduce; /* Reduce actions occur after optimization */ + Boolean neverReduce; /* Reduce is theoretically possible, but prevented + ** by actions or other outside implementation */ + struct rule *nextlhs; /* Next rule with the same LHS */ + struct rule *next; /* Next rule in the global list */ +}; + +/* A configuration is a production rule of the grammar together with +** a mark (dot) showing how much of that rule has been processed so far. +** Configurations also contain a follow-set which is a list of terminal +** symbols which are allowed to immediately follow the end of the rule. +** Every configuration is recorded as an instance of the following: */ +enum cfgstatus { + COMPLETE, + INCOMPLETE +}; +struct config { + struct rule *rp; /* The rule upon which the configuration is based */ + int dot; /* The parse point */ + char *fws; /* Follow-set for this configuration only */ + struct plink *fplp; /* Follow-set forward propagation links */ + struct plink *bplp; /* Follow-set backwards propagation links */ + struct state *stp; /* Pointer to state which contains this */ + enum cfgstatus status; /* used during followset and shift computations */ + struct config *next; /* Next configuration in the state */ + struct config *bp; /* The next basis configuration */ +}; + +enum e_action { + SHIFT, + ACCEPT, + REDUCE, + ERROR, + SSCONFLICT, /* A shift/shift conflict */ + SRCONFLICT, /* Was a reduce, but part of a conflict */ + RRCONFLICT, /* Was a reduce, but part of a conflict */ + SH_RESOLVED, /* Was a shift. Precedence resolved conflict */ + RD_RESOLVED, /* Was reduce. Precedence resolved conflict */ + NOT_USED, /* Deleted by compression */ + SHIFTREDUCE /* Shift first, then reduce */ +}; + +/* Every shift or reduce operation is stored as one of the following */ +struct action { + struct symbol *sp; /* The look-ahead symbol */ + enum e_action type; + union { + struct state *stp; /* The new state, if a shift */ + struct rule *rp; /* The rule, if a reduce */ + } x; + struct symbol *spOpt; /* SHIFTREDUCE optimization to this symbol */ + struct action *next; /* Next action for this state */ + struct action *collide; /* Next action with the same hash */ +}; + +/* Each state of the generated parser's finite state machine +** is encoded as an instance of the following structure. */ +struct state { + struct config *bp; /* The basis configurations for this state */ + struct config *cfp; /* All configurations in this set */ + int statenum; /* Sequential number for this state */ + struct action *ap; /* List of actions for this state */ + int nTknAct, nNtAct; /* Number of actions on terminals and nonterminals */ + int iTknOfst, iNtOfst; /* yy_action[] offset for terminals and nonterms */ + int iDfltReduce; /* Default action is to REDUCE by this rule */ + struct rule *pDfltReduce;/* The default REDUCE rule. */ + int autoReduce; /* True if this is an auto-reduce state */ +}; +#define NO_OFFSET (-2147483647) + +/* A followset propagation link indicates that the contents of one +** configuration followset should be propagated to another whenever +** the first changes. */ +struct plink { + struct config *cfp; /* The configuration to which linked */ + struct plink *next; /* The next propagate link */ +}; + +/* The state vector for the entire parser generator is recorded as +** follows. (LEMON uses no global variables and makes little use of +** static variables. Fields in the following structure can be thought +** of as begin global variables in the program.) */ +struct lemon { + struct state **sorted; /* Table of states sorted by state number */ + struct rule *rule; /* List of all rules */ + struct rule *startRule; /* First rule */ + int nstate; /* Number of states */ + int nxstate; /* nstate with tail degenerate states removed */ + int nrule; /* Number of rules */ + int nruleWithAction; /* Number of rules with actions */ + int nsymbol; /* Number of terminal and nonterminal symbols */ + int nterminal; /* Number of terminal symbols */ + int minShiftReduce; /* Minimum shift-reduce action value */ + int errAction; /* Error action value */ + int accAction; /* Accept action value */ + int noAction; /* No-op action value */ + int minReduce; /* Minimum reduce action */ + int maxAction; /* Maximum action value of any kind */ + struct symbol **symbols; /* Sorted array of pointers to symbols */ + int errorcnt; /* Number of errors */ + struct symbol *errsym; /* The error symbol */ + struct symbol *wildcard; /* Token that matches anything */ + char *name; /* Name of the generated parser */ + char *arg; /* Declaration of the 3rd argument to parser */ + char *ctx; /* Declaration of 2nd argument to constructor */ + char *tokentype; /* Type of terminal symbols in the parser stack */ + char *vartype; /* The default type of non-terminal symbols */ + char *start; /* Name of the start symbol for the grammar */ + char *stacksize; /* Size of the parser stack */ + char *include; /* Code to put at the start of the C file */ + char *error; /* Code to execute when an error is seen */ + char *overflow; /* Code to execute on a stack overflow */ + char *failure; /* Code to execute on parser failure */ + char *accept; /* Code to execute when the parser excepts */ + char *extracode; /* Code appended to the generated file */ + char *tokendest; /* Code to execute to destroy token data */ + char *vardest; /* Code for the default non-terminal destructor */ + char *filename; /* Name of the input file */ + char *outname; /* Name of the current output file */ + char *tokenprefix; /* A prefix added to token names in the .h file */ + char *reallocFunc; /* Function to use to allocate stack space */ + char *freeFunc; /* Function to use to free stack space */ + int nconflict; /* Number of parsing conflicts */ + int nactiontab; /* Number of entries in the yy_action[] table */ + int nlookaheadtab; /* Number of entries in yy_lookahead[] */ + int tablesize; /* Total table size of all tables in bytes */ + int basisflag; /* Print only basis configurations */ + int printPreprocessed; /* Show preprocessor output on stdout */ + int has_fallback; /* True if any %fallback is seen in the grammar */ + int nolinenosflag; /* True if #line statements should not be printed */ + int argc; /* Number of command-line arguments */ + char **argv; /* Command-line arguments */ +}; + +#define MemoryCheck(X) if((X)==0){ \ + extern void memory_error(); \ + memory_error(); \ +} + +/**************** From the file "table.h" *********************************/ +/* +** All code in this file has been automatically generated +** from a specification in the file +** "table.q" +** by the associative array code building program "aagen". +** Do not edit this file! Instead, edit the specification +** file, then rerun aagen. +*/ +/* +** Code for processing tables in the LEMON parser generator. +*/ +/* Routines for handling a strings */ + +const char *Strsafe(const char *); + +void Strsafe_init(void); +int Strsafe_insert(const char *); +const char *Strsafe_find(const char *); + +/* Routines for handling symbols of the grammar */ + +struct symbol *Symbol_new(const char *); +int Symbolcmpp(const void *, const void *); +void Symbol_init(void); +int Symbol_insert(struct symbol *, const char *); +struct symbol *Symbol_find(const char *); +struct symbol *Symbol_Nth(int); +int Symbol_count(void); +struct symbol **Symbol_arrayof(void); + +/* Routines to manage the state table */ + +int Configcmp(const char *, const char *); +struct state *State_new(void); +void State_init(void); +int State_insert(struct state *, struct config *); +struct state *State_find(struct config *); +struct state **State_arrayof(void); + +/* Routines used for efficiency in Configlist_add */ + +void Configtable_init(void); +int Configtable_insert(struct config *); +struct config *Configtable_find(struct config *); +void Configtable_clear(int(*)(struct config *)); + +/****************** From the file "action.c" *******************************/ +/* +** Routines processing parser actions in the LEMON parser generator. +*/ + +/* Allocate a new parser action */ +static struct action *Action_new(void){ + static struct action *actionfreelist = 0; + struct action *newaction; + + if( actionfreelist==0 ){ + int i; + int amt = 100; + actionfreelist = (struct action *)lemon_calloc(amt, sizeof(struct action)); + if( actionfreelist==0 ){ + fprintf(stderr,"Unable to allocate memory for a new parser action."); + exit(1); + } + for(i=0; inext; + return newaction; +} + +/* Compare two actions for sorting purposes. Return negative, zero, or +** positive if the first action is less than, equal to, or greater than +** the first +*/ +static int actioncmp( + struct action *ap1, + struct action *ap2 +){ + int rc; + rc = ap1->sp->index - ap2->sp->index; + if( rc==0 ){ + rc = (int)ap1->type - (int)ap2->type; + } + if( rc==0 && (ap1->type==REDUCE || ap1->type==SHIFTREDUCE) ){ + rc = ap1->x.rp->index - ap2->x.rp->index; + } + if( rc==0 ){ + rc = (int) (ap2 - ap1); + } + return rc; +} + +/* Sort parser actions */ +static struct action *Action_sort( + struct action *ap +){ + ap = (struct action *)msort((char *)ap,(char **)&ap->next, + (int(*)(const char*,const char*))actioncmp); + return ap; +} + +void Action_add( + struct action **app, + enum e_action type, + struct symbol *sp, + char *arg +){ + struct action *newaction; + newaction = Action_new(); + newaction->next = *app; + *app = newaction; + newaction->type = type; + newaction->sp = sp; + newaction->spOpt = 0; + if( type==SHIFT ){ + newaction->x.stp = (struct state *)arg; + }else{ + newaction->x.rp = (struct rule *)arg; + } +} +/********************** New code to implement the "acttab" module ***********/ +/* +** This module implements routines use to construct the yy_action[] table. +*/ + +/* +** The state of the yy_action table under construction is an instance of +** the following structure. +** +** The yy_action table maps the pair (state_number, lookahead) into an +** action_number. The table is an array of integers pairs. The state_number +** determines an initial offset into the yy_action array. The lookahead +** value is then added to this initial offset to get an index X into the +** yy_action array. If the aAction[X].lookahead equals the value of the +** of the lookahead input, then the value of the action_number output is +** aAction[X].action. If the lookaheads do not match then the +** default action for the state_number is returned. +** +** All actions associated with a single state_number are first entered +** into aLookahead[] using multiple calls to acttab_action(). Then the +** actions for that single state_number are placed into the aAction[] +** array with a single call to acttab_insert(). The acttab_insert() call +** also resets the aLookahead[] array in preparation for the next +** state number. +*/ +struct lookahead_action { + int lookahead; /* Value of the lookahead token */ + int action; /* Action to take on the given lookahead */ +}; +typedef struct acttab acttab; +struct acttab { + int nAction; /* Number of used slots in aAction[] */ + int nActionAlloc; /* Slots allocated for aAction[] */ + struct lookahead_action + *aAction, /* The yy_action[] table under construction */ + *aLookahead; /* A single new transaction set */ + int mnLookahead; /* Minimum aLookahead[].lookahead */ + int mnAction; /* Action associated with mnLookahead */ + int mxLookahead; /* Maximum aLookahead[].lookahead */ + int nLookahead; /* Used slots in aLookahead[] */ + int nLookaheadAlloc; /* Slots allocated in aLookahead[] */ + int nterminal; /* Number of terminal symbols */ + int nsymbol; /* total number of symbols */ +}; + +/* Return the number of entries in the yy_action table */ +#define acttab_lookahead_size(X) ((X)->nAction) + +/* The value for the N-th entry in yy_action */ +#define acttab_yyaction(X,N) ((X)->aAction[N].action) + +/* The value for the N-th entry in yy_lookahead */ +#define acttab_yylookahead(X,N) ((X)->aAction[N].lookahead) + +/* Free all memory associated with the given acttab */ +void acttab_free(acttab *p){ + lemon_free( p->aAction ); + lemon_free( p->aLookahead ); + lemon_free( p ); +} + +/* Allocate a new acttab structure */ +acttab *acttab_alloc(int nsymbol, int nterminal){ + acttab *p = (acttab *) lemon_calloc( 1, sizeof(*p) ); + if( p==0 ){ + fprintf(stderr,"Unable to allocate memory for a new acttab."); + exit(1); + } + memset(p, 0, sizeof(*p)); + p->nsymbol = nsymbol; + p->nterminal = nterminal; + return p; +} + +/* Add a new action to the current transaction set. +** +** This routine is called once for each lookahead for a particular +** state. +*/ +void acttab_action(acttab *p, int lookahead, int action){ + if( p->nLookahead>=p->nLookaheadAlloc ){ + p->nLookaheadAlloc += 25; + p->aLookahead = (struct lookahead_action *) lemon_realloc( p->aLookahead, + sizeof(p->aLookahead[0])*p->nLookaheadAlloc ); + if( p->aLookahead==0 ){ + fprintf(stderr,"malloc failed\n"); + exit(1); + } + } + if( p->nLookahead==0 ){ + p->mxLookahead = lookahead; + p->mnLookahead = lookahead; + p->mnAction = action; + }else{ + if( p->mxLookaheadmxLookahead = lookahead; + if( p->mnLookahead>lookahead ){ + p->mnLookahead = lookahead; + p->mnAction = action; + } + } + p->aLookahead[p->nLookahead].lookahead = lookahead; + p->aLookahead[p->nLookahead].action = action; + p->nLookahead++; +} + +/* +** Add the transaction set built up with prior calls to acttab_action() +** into the current action table. Then reset the transaction set back +** to an empty set in preparation for a new round of acttab_action() calls. +** +** Return the offset into the action table of the new transaction. +** +** If the makeItSafe parameter is true, then the offset is chosen so that +** it is impossible to overread the yy_lookaside[] table regardless of +** the lookaside token. This is done for the terminal symbols, as they +** come from external inputs and can contain syntax errors. When makeItSafe +** is false, there is more flexibility in selecting offsets, resulting in +** a smaller table. For non-terminal symbols, which are never syntax errors, +** makeItSafe can be false. +*/ +int acttab_insert(acttab *p, int makeItSafe){ + int i, j, k, n, end; + assert( p->nLookahead>0 ); + + /* Make sure we have enough space to hold the expanded action table + ** in the worst case. The worst case occurs if the transaction set + ** must be appended to the current action table + */ + n = p->nsymbol + 1; + if( p->nAction + n >= p->nActionAlloc ){ + int oldAlloc = p->nActionAlloc; + p->nActionAlloc = p->nAction + n + p->nActionAlloc + 20; + p->aAction = (struct lookahead_action *) lemon_realloc( p->aAction, + sizeof(p->aAction[0])*p->nActionAlloc); + if( p->aAction==0 ){ + fprintf(stderr,"malloc failed\n"); + exit(1); + } + for(i=oldAlloc; inActionAlloc; i++){ + p->aAction[i].lookahead = -1; + p->aAction[i].action = -1; + } + } + + /* Scan the existing action table looking for an offset that is a + ** duplicate of the current transaction set. Fall out of the loop + ** if and when the duplicate is found. + ** + ** i is the index in p->aAction[] where p->mnLookahead is inserted. + */ + end = makeItSafe ? p->mnLookahead : 0; + for(i=p->nAction-1; i>=end; i--){ + if( p->aAction[i].lookahead==p->mnLookahead ){ + /* All lookaheads and actions in the aLookahead[] transaction + ** must match against the candidate aAction[i] entry. */ + if( p->aAction[i].action!=p->mnAction ) continue; + for(j=0; jnLookahead; j++){ + k = p->aLookahead[j].lookahead - p->mnLookahead + i; + if( k<0 || k>=p->nAction ) break; + if( p->aLookahead[j].lookahead!=p->aAction[k].lookahead ) break; + if( p->aLookahead[j].action!=p->aAction[k].action ) break; + } + if( jnLookahead ) continue; + + /* No possible lookahead value that is not in the aLookahead[] + ** transaction is allowed to match aAction[i] */ + n = 0; + for(j=0; jnAction; j++){ + if( p->aAction[j].lookahead<0 ) continue; + if( p->aAction[j].lookahead==j+p->mnLookahead-i ) n++; + } + if( n==p->nLookahead ){ + break; /* An exact match is found at offset i */ + } + } + } + + /* If no existing offsets exactly match the current transaction, find an + ** an empty offset in the aAction[] table in which we can add the + ** aLookahead[] transaction. + */ + if( inAction, which means the + ** transaction will be appended. */ + i = makeItSafe ? p->mnLookahead : 0; + for(; inActionAlloc - p->mxLookahead; i++){ + if( p->aAction[i].lookahead<0 ){ + for(j=0; jnLookahead; j++){ + k = p->aLookahead[j].lookahead - p->mnLookahead + i; + if( k<0 ) break; + if( p->aAction[k].lookahead>=0 ) break; + } + if( jnLookahead ) continue; + for(j=0; jnAction; j++){ + if( p->aAction[j].lookahead==j+p->mnLookahead-i ) break; + } + if( j==p->nAction ){ + break; /* Fits in empty slots */ + } + } + } + } + /* Insert transaction set at index i. */ +#if 0 + printf("Acttab:"); + for(j=0; jnLookahead; j++){ + printf(" %d", p->aLookahead[j].lookahead); + } + printf(" inserted at %d\n", i); +#endif + for(j=0; jnLookahead; j++){ + k = p->aLookahead[j].lookahead - p->mnLookahead + i; + p->aAction[k] = p->aLookahead[j]; + if( k>=p->nAction ) p->nAction = k+1; + } + if( makeItSafe && i+p->nterminal>=p->nAction ) p->nAction = i+p->nterminal+1; + p->nLookahead = 0; + + /* Return the offset that is added to the lookahead in order to get the + ** index into yy_action of the action */ + return i - p->mnLookahead; +} + +/* +** Return the size of the action table without the trailing syntax error +** entries. +*/ +int acttab_action_size(acttab *p){ + int n = p->nAction; + while( n>0 && p->aAction[n-1].lookahead<0 ){ n--; } + return n; +} + +/********************** From the file "build.c" *****************************/ +/* +** Routines to construction the finite state machine for the LEMON +** parser generator. +*/ + +/* Find a precedence symbol of every rule in the grammar. +** +** Those rules which have a precedence symbol coded in the input +** grammar using the "[symbol]" construct will already have the +** rp->precsym field filled. Other rules take as their precedence +** symbol the first RHS symbol with a defined precedence. If there +** are not RHS symbols with a defined precedence, the precedence +** symbol field is left blank. +*/ +void FindRulePrecedences(struct lemon *xp) +{ + struct rule *rp; + for(rp=xp->rule; rp; rp=rp->next){ + if( rp->precsym==0 ){ + int i, j; + for(i=0; inrhs && rp->precsym==0; i++){ + struct symbol *sp = rp->rhs[i]; + if( sp->type==MULTITERMINAL ){ + for(j=0; jnsubsym; j++){ + if( sp->subsym[j]->prec>=0 ){ + rp->precsym = sp->subsym[j]; + break; + } + } + }else if( sp->prec>=0 ){ + rp->precsym = rp->rhs[i]; + } + } + } + } + return; +} + +/* Find all nonterminals which will generate the empty string. +** Then go back and compute the first sets of every nonterminal. +** The first set is the set of all terminal symbols which can begin +** a string generated by that nonterminal. +*/ +void FindFirstSets(struct lemon *lemp) +{ + int i, j; + struct rule *rp; + int progress; + + for(i=0; insymbol; i++){ + lemp->symbols[i]->lambda = LEMON_FALSE; + } + for(i=lemp->nterminal; insymbol; i++){ + lemp->symbols[i]->firstset = SetNew(); + } + + /* First compute all lambdas */ + do{ + progress = 0; + for(rp=lemp->rule; rp; rp=rp->next){ + if( rp->lhs->lambda ) continue; + for(i=0; inrhs; i++){ + struct symbol *sp = rp->rhs[i]; + assert( sp->type==NONTERMINAL || sp->lambda==LEMON_FALSE ); + if( sp->lambda==LEMON_FALSE ) break; + } + if( i==rp->nrhs ){ + rp->lhs->lambda = LEMON_TRUE; + progress = 1; + } + } + }while( progress ); + + /* Now compute all first sets */ + do{ + struct symbol *s1, *s2; + progress = 0; + for(rp=lemp->rule; rp; rp=rp->next){ + s1 = rp->lhs; + for(i=0; inrhs; i++){ + s2 = rp->rhs[i]; + if( s2->type==TERMINAL ){ + progress += SetAdd(s1->firstset,s2->index); + break; + }else if( s2->type==MULTITERMINAL ){ + for(j=0; jnsubsym; j++){ + progress += SetAdd(s1->firstset,s2->subsym[j]->index); + } + break; + }else if( s1==s2 ){ + if( s1->lambda==LEMON_FALSE ) break; + }else{ + progress += SetUnion(s1->firstset,s2->firstset); + if( s2->lambda==LEMON_FALSE ) break; + } + } + } + }while( progress ); + return; +} + +/* Compute all LR(0) states for the grammar. Links +** are added to between some states so that the LR(1) follow sets +** can be computed later. +*/ +PRIVATE struct state *getstate(struct lemon *); /* forward reference */ +void FindStates(struct lemon *lemp) +{ + struct symbol *sp; + struct rule *rp; + + Configlist_init(); + + /* Find the start symbol */ + if( lemp->start ){ + sp = Symbol_find(lemp->start); + if( sp==0 ){ + ErrorMsg(lemp->filename,0, + "The specified start symbol \"%s\" is not " + "in a nonterminal of the grammar. \"%s\" will be used as the start " + "symbol instead.",lemp->start,lemp->startRule->lhs->name); + lemp->errorcnt++; + sp = lemp->startRule->lhs; + } + }else if( lemp->startRule ){ + sp = lemp->startRule->lhs; + }else{ + ErrorMsg(lemp->filename,0,"Internal error - no start rule\n"); + exit(1); + } + + /* Make sure the start symbol doesn't occur on the right-hand side of + ** any rule. Report an error if it does. (YACC would generate a new + ** start symbol in this case.) */ + for(rp=lemp->rule; rp; rp=rp->next){ + int i; + for(i=0; inrhs; i++){ + if( rp->rhs[i]==sp ){ /* FIX ME: Deal with multiterminals */ + ErrorMsg(lemp->filename,0, + "The start symbol \"%s\" occurs on the " + "right-hand side of a rule. This will result in a parser which " + "does not work properly.",sp->name); + lemp->errorcnt++; + } + } + } + + /* The basis configuration set for the first state + ** is all rules which have the start symbol as their + ** left-hand side */ + for(rp=sp->rule; rp; rp=rp->nextlhs){ + struct config *newcfp; + rp->lhsStart = 1; + newcfp = Configlist_addbasis(rp,0); + SetAdd(newcfp->fws,0); + } + + /* Compute the first state. All other states will be + ** computed automatically during the computation of the first one. + ** The returned pointer to the first state is not used. */ + (void)getstate(lemp); + return; +} + +/* Return a pointer to a state which is described by the configuration +** list which has been built from calls to Configlist_add. +*/ +PRIVATE void buildshifts(struct lemon *, struct state *); /* Forwd ref */ +PRIVATE struct state *getstate(struct lemon *lemp) +{ + struct config *cfp, *bp; + struct state *stp; + + /* Extract the sorted basis of the new state. The basis was constructed + ** by prior calls to "Configlist_addbasis()". */ + Configlist_sortbasis(); + bp = Configlist_basis(); + + /* Get a state with the same basis */ + stp = State_find(bp); + if( stp ){ + /* A state with the same basis already exists! Copy all the follow-set + ** propagation links from the state under construction into the + ** preexisting state, then return a pointer to the preexisting state */ + struct config *x, *y; + for(x=bp, y=stp->bp; x && y; x=x->bp, y=y->bp){ + Plink_copy(&y->bplp,x->bplp); + Plink_delete(x->fplp); + x->fplp = x->bplp = 0; + } + cfp = Configlist_return(); + Configlist_eat(cfp); + }else{ + /* This really is a new state. Construct all the details */ + Configlist_closure(lemp); /* Compute the configuration closure */ + Configlist_sort(); /* Sort the configuration closure */ + cfp = Configlist_return(); /* Get a pointer to the config list */ + stp = State_new(); /* A new state structure */ + MemoryCheck(stp); + stp->bp = bp; /* Remember the configuration basis */ + stp->cfp = cfp; /* Remember the configuration closure */ + stp->statenum = lemp->nstate++; /* Every state gets a sequence number */ + stp->ap = 0; /* No actions, yet. */ + State_insert(stp,stp->bp); /* Add to the state table */ + buildshifts(lemp,stp); /* Recursively compute successor states */ + } + return stp; +} + +/* +** Return true if two symbols are the same. +*/ +int same_symbol(struct symbol *a, struct symbol *b) +{ + int i; + if( a==b ) return 1; + if( a->type!=MULTITERMINAL ) return 0; + if( b->type!=MULTITERMINAL ) return 0; + if( a->nsubsym!=b->nsubsym ) return 0; + for(i=0; insubsym; i++){ + if( a->subsym[i]!=b->subsym[i] ) return 0; + } + return 1; +} + +/* Construct all successor states to the given state. A "successor" +** state is any state which can be reached by a shift action. +*/ +PRIVATE void buildshifts(struct lemon *lemp, struct state *stp) +{ + struct config *cfp; /* For looping thru the config closure of "stp" */ + struct config *bcfp; /* For the inner loop on config closure of "stp" */ + struct config *newcfg; /* */ + struct symbol *sp; /* Symbol following the dot in configuration "cfp" */ + struct symbol *bsp; /* Symbol following the dot in configuration "bcfp" */ + struct state *newstp; /* A pointer to a successor state */ + + /* Each configuration becomes complete after it contributes to a successor + ** state. Initially, all configurations are incomplete */ + for(cfp=stp->cfp; cfp; cfp=cfp->next) cfp->status = INCOMPLETE; + + /* Loop through all configurations of the state "stp" */ + for(cfp=stp->cfp; cfp; cfp=cfp->next){ + if( cfp->status==COMPLETE ) continue; /* Already used by inner loop */ + if( cfp->dot>=cfp->rp->nrhs ) continue; /* Can't shift this config */ + Configlist_reset(); /* Reset the new config set */ + sp = cfp->rp->rhs[cfp->dot]; /* Symbol after the dot */ + + /* For every configuration in the state "stp" which has the symbol "sp" + ** following its dot, add the same configuration to the basis set under + ** construction but with the dot shifted one symbol to the right. */ + for(bcfp=cfp; bcfp; bcfp=bcfp->next){ + if( bcfp->status==COMPLETE ) continue; /* Already used */ + if( bcfp->dot>=bcfp->rp->nrhs ) continue; /* Can't shift this one */ + bsp = bcfp->rp->rhs[bcfp->dot]; /* Get symbol after dot */ + if( !same_symbol(bsp,sp) ) continue; /* Must be same as for "cfp" */ + bcfp->status = COMPLETE; /* Mark this config as used */ + newcfg = Configlist_addbasis(bcfp->rp,bcfp->dot+1); + Plink_add(&newcfg->bplp,bcfp); + } + + /* Get a pointer to the state described by the basis configuration set + ** constructed in the preceding loop */ + newstp = getstate(lemp); + + /* The state "newstp" is reached from the state "stp" by a shift action + ** on the symbol "sp" */ + if( sp->type==MULTITERMINAL ){ + int i; + for(i=0; insubsym; i++){ + Action_add(&stp->ap,SHIFT,sp->subsym[i],(char*)newstp); + } + }else{ + Action_add(&stp->ap,SHIFT,sp,(char *)newstp); + } + } +} + +/* +** Construct the propagation links +*/ +void FindLinks(struct lemon *lemp) +{ + int i; + struct config *cfp, *other; + struct state *stp; + struct plink *plp; + + /* Housekeeping detail: + ** Add to every propagate link a pointer back to the state to + ** which the link is attached. */ + for(i=0; instate; i++){ + stp = lemp->sorted[i]; + for(cfp=stp?stp->cfp:0; cfp; cfp=cfp->next){ + cfp->stp = stp; + } + } + + /* Convert all backlinks into forward links. Only the forward + ** links are used in the follow-set computation. */ + for(i=0; instate; i++){ + stp = lemp->sorted[i]; + for(cfp=stp?stp->cfp:0; cfp; cfp=cfp->next){ + for(plp=cfp->bplp; plp; plp=plp->next){ + other = plp->cfp; + Plink_add(&other->fplp,cfp); + } + } + } +} + +/* Compute all followsets. +** +** A followset is the set of all symbols which can come immediately +** after a configuration. +*/ +void FindFollowSets(struct lemon *lemp) +{ + int i; + struct config *cfp; + struct plink *plp; + int progress; + int change; + + for(i=0; instate; i++){ + assert( lemp->sorted[i]!=0 ); + for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){ + cfp->status = INCOMPLETE; + } + } + + do{ + progress = 0; + for(i=0; instate; i++){ + assert( lemp->sorted[i]!=0 ); + for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){ + if( cfp->status==COMPLETE ) continue; + for(plp=cfp->fplp; plp; plp=plp->next){ + change = SetUnion(plp->cfp->fws,cfp->fws); + if( change ){ + plp->cfp->status = INCOMPLETE; + progress = 1; + } + } + cfp->status = COMPLETE; + } + } + }while( progress ); +} + +static int resolve_conflict(struct action *,struct action *); + +/* Compute the reduce actions, and resolve conflicts. +*/ +void FindActions(struct lemon *lemp) +{ + int i,j; + struct config *cfp; + struct state *stp; + struct symbol *sp; + struct rule *rp; + + /* Add all of the reduce actions + ** A reduce action is added for each element of the followset of + ** a configuration which has its dot at the extreme right. + */ + for(i=0; instate; i++){ /* Loop over all states */ + stp = lemp->sorted[i]; + for(cfp=stp->cfp; cfp; cfp=cfp->next){ /* Loop over all configurations */ + if( cfp->rp->nrhs==cfp->dot ){ /* Is dot at extreme right? */ + for(j=0; jnterminal; j++){ + if( SetFind(cfp->fws,j) ){ + /* Add a reduce action to the state "stp" which will reduce by the + ** rule "cfp->rp" if the lookahead symbol is "lemp->symbols[j]" */ + Action_add(&stp->ap,REDUCE,lemp->symbols[j],(char *)cfp->rp); + } + } + } + } + } + + /* Add the accepting token */ + if( lemp->start ){ + sp = Symbol_find(lemp->start); + if( sp==0 ){ + if( lemp->startRule==0 ){ + fprintf(stderr, "internal error on source line %d: no start rule\n", + __LINE__); + exit(1); + } + sp = lemp->startRule->lhs; + } + }else{ + sp = lemp->startRule->lhs; + } + /* Add to the first state (which is always the starting state of the + ** finite state machine) an action to ACCEPT if the lookahead is the + ** start nonterminal. */ + Action_add(&lemp->sorted[0]->ap,ACCEPT,sp,0); + + /* Resolve conflicts */ + for(i=0; instate; i++){ + struct action *ap, *nap; + stp = lemp->sorted[i]; + /* assert( stp->ap ); */ + stp->ap = Action_sort(stp->ap); + for(ap=stp->ap; ap && ap->next; ap=ap->next){ + for(nap=ap->next; nap && nap->sp==ap->sp; nap=nap->next){ + /* The two actions "ap" and "nap" have the same lookahead. + ** Figure out which one should be used */ + lemp->nconflict += resolve_conflict(ap,nap); + } + } + } + + /* Report an error for each rule that can never be reduced. */ + for(rp=lemp->rule; rp; rp=rp->next) rp->canReduce = LEMON_FALSE; + for(i=0; instate; i++){ + struct action *ap; + for(ap=lemp->sorted[i]->ap; ap; ap=ap->next){ + if( ap->type==REDUCE ) ap->x.rp->canReduce = LEMON_TRUE; + } + } + for(rp=lemp->rule; rp; rp=rp->next){ + if( rp->canReduce ) continue; + ErrorMsg(lemp->filename,rp->ruleline,"This rule can not be reduced.\n"); + lemp->errorcnt++; + } +} + +/* Resolve a conflict between the two given actions. If the +** conflict can't be resolved, return non-zero. +** +** NO LONGER TRUE: +** To resolve a conflict, first look to see if either action +** is on an error rule. In that case, take the action which +** is not associated with the error rule. If neither or both +** actions are associated with an error rule, then try to +** use precedence to resolve the conflict. +** +** If either action is a SHIFT, then it must be apx. This +** function won't work if apx->type==REDUCE and apy->type==SHIFT. +*/ +static int resolve_conflict( + struct action *apx, + struct action *apy +){ + struct symbol *spx, *spy; + int errcnt = 0; + assert( apx->sp==apy->sp ); /* Otherwise there would be no conflict */ + if( apx->type==SHIFT && apy->type==SHIFT ){ + apy->type = SSCONFLICT; + errcnt++; + } + if( apx->type==SHIFT && apy->type==REDUCE ){ + spx = apx->sp; + spy = apy->x.rp->precsym; + if( spy==0 || spx->prec<0 || spy->prec<0 ){ + /* Not enough precedence information. */ + apy->type = SRCONFLICT; + errcnt++; + }else if( spx->prec>spy->prec ){ /* higher precedence wins */ + apy->type = RD_RESOLVED; + }else if( spx->precprec ){ + apx->type = SH_RESOLVED; + }else if( spx->prec==spy->prec && spx->assoc==RIGHT ){ /* Use operator */ + apy->type = RD_RESOLVED; /* associativity */ + }else if( spx->prec==spy->prec && spx->assoc==LEFT ){ /* to break tie */ + apx->type = SH_RESOLVED; + }else{ + assert( spx->prec==spy->prec && spx->assoc==NONE ); + apx->type = ERROR; + } + }else if( apx->type==REDUCE && apy->type==REDUCE ){ + spx = apx->x.rp->precsym; + spy = apy->x.rp->precsym; + if( spx==0 || spy==0 || spx->prec<0 || + spy->prec<0 || spx->prec==spy->prec ){ + apy->type = RRCONFLICT; + errcnt++; + }else if( spx->prec>spy->prec ){ + apy->type = RD_RESOLVED; + }else if( spx->precprec ){ + apx->type = RD_RESOLVED; + } + }else{ + assert( + apx->type==SH_RESOLVED || + apx->type==RD_RESOLVED || + apx->type==SSCONFLICT || + apx->type==SRCONFLICT || + apx->type==RRCONFLICT || + apy->type==SH_RESOLVED || + apy->type==RD_RESOLVED || + apy->type==SSCONFLICT || + apy->type==SRCONFLICT || + apy->type==RRCONFLICT + ); + /* The REDUCE/SHIFT case cannot happen because SHIFTs come before + ** REDUCEs on the list. If we reach this point it must be because + ** the parser conflict had already been resolved. */ + } + return errcnt; +} +/********************* From the file "configlist.c" *************************/ +/* +** Routines to processing a configuration list and building a state +** in the LEMON parser generator. +*/ + +static struct config *freelist = 0; /* List of free configurations */ +static struct config *current = 0; /* Top of list of configurations */ +static struct config **currentend = 0; /* Last on list of configs */ +static struct config *basis = 0; /* Top of list of basis configs */ +static struct config **basisend = 0; /* End of list of basis configs */ + +/* Return a pointer to a new configuration */ +PRIVATE struct config *newconfig(void){ + return (struct config*)lemon_calloc(1, sizeof(struct config)); +} + +/* The configuration "old" is no longer used */ +PRIVATE void deleteconfig(struct config *old) +{ + old->next = freelist; + freelist = old; +} + +/* Initialized the configuration list builder */ +void Configlist_init(void){ + current = 0; + currentend = ¤t; + basis = 0; + basisend = &basis; + Configtable_init(); + return; +} + +/* Initialized the configuration list builder */ +void Configlist_reset(void){ + current = 0; + currentend = ¤t; + basis = 0; + basisend = &basis; + Configtable_clear(0); + return; +} + +/* Add another configuration to the configuration list */ +struct config *Configlist_add( + struct rule *rp, /* The rule */ + int dot /* Index into the RHS of the rule where the dot goes */ +){ + struct config *cfp, model; + + assert( currentend!=0 ); + model.rp = rp; + model.dot = dot; + cfp = Configtable_find(&model); + if( cfp==0 ){ + cfp = newconfig(); + cfp->rp = rp; + cfp->dot = dot; + cfp->fws = SetNew(); + cfp->stp = 0; + cfp->fplp = cfp->bplp = 0; + cfp->next = 0; + cfp->bp = 0; + *currentend = cfp; + currentend = &cfp->next; + Configtable_insert(cfp); + } + return cfp; +} + +/* Add a basis configuration to the configuration list */ +struct config *Configlist_addbasis(struct rule *rp, int dot) +{ + struct config *cfp, model; + + assert( basisend!=0 ); + assert( currentend!=0 ); + model.rp = rp; + model.dot = dot; + cfp = Configtable_find(&model); + if( cfp==0 ){ + cfp = newconfig(); + cfp->rp = rp; + cfp->dot = dot; + cfp->fws = SetNew(); + cfp->stp = 0; + cfp->fplp = cfp->bplp = 0; + cfp->next = 0; + cfp->bp = 0; + *currentend = cfp; + currentend = &cfp->next; + *basisend = cfp; + basisend = &cfp->bp; + Configtable_insert(cfp); + } + return cfp; +} + +/* Compute the closure of the configuration list */ +void Configlist_closure(struct lemon *lemp) +{ + struct config *cfp, *newcfp; + struct rule *rp, *newrp; + struct symbol *sp, *xsp; + int i, dot; + + assert( currentend!=0 ); + for(cfp=current; cfp; cfp=cfp->next){ + rp = cfp->rp; + dot = cfp->dot; + if( dot>=rp->nrhs ) continue; + sp = rp->rhs[dot]; + if( sp->type==NONTERMINAL ){ + if( sp->rule==0 && sp!=lemp->errsym ){ + ErrorMsg(lemp->filename,rp->line,"Nonterminal \"%s\" has no rules.", + sp->name); + lemp->errorcnt++; + } + for(newrp=sp->rule; newrp; newrp=newrp->nextlhs){ + newcfp = Configlist_add(newrp,0); + for(i=dot+1; inrhs; i++){ + xsp = rp->rhs[i]; + if( xsp->type==TERMINAL ){ + SetAdd(newcfp->fws,xsp->index); + break; + }else if( xsp->type==MULTITERMINAL ){ + int k; + for(k=0; knsubsym; k++){ + SetAdd(newcfp->fws, xsp->subsym[k]->index); + } + break; + }else{ + SetUnion(newcfp->fws,xsp->firstset); + if( xsp->lambda==LEMON_FALSE ) break; + } + } + if( i==rp->nrhs ) Plink_add(&cfp->fplp,newcfp); + } + } + } + return; +} + +/* Sort the configuration list */ +void Configlist_sort(void){ + current = (struct config*)msort((char*)current,(char**)&(current->next), + Configcmp); + currentend = 0; + return; +} + +/* Sort the basis configuration list */ +void Configlist_sortbasis(void){ + basis = (struct config*)msort((char*)current,(char**)&(current->bp), + Configcmp); + basisend = 0; + return; +} + +/* Return a pointer to the head of the configuration list and +** reset the list */ +struct config *Configlist_return(void){ + struct config *old; + old = current; + current = 0; + currentend = 0; + return old; +} + +/* Return a pointer to the head of the configuration list and +** reset the list */ +struct config *Configlist_basis(void){ + struct config *old; + old = basis; + basis = 0; + basisend = 0; + return old; +} + +/* Free all elements of the given configuration list */ +void Configlist_eat(struct config *cfp) +{ + struct config *nextcfp; + for(; cfp; cfp=nextcfp){ + nextcfp = cfp->next; + assert( cfp->fplp==0 ); + assert( cfp->bplp==0 ); + if( cfp->fws ) SetFree(cfp->fws); + deleteconfig(cfp); + } + return; +} +/***************** From the file "error.c" *********************************/ +/* +** Code for printing error message. +*/ + +void ErrorMsg(const char *filename, int lineno, const char *format, ...){ + va_list ap; + fprintf(stderr, "%s:%d: ", filename, lineno); + va_start(ap, format); + vfprintf(stderr,format,ap); + va_end(ap); + fprintf(stderr, "\n"); +} +/**************** From the file "main.c" ************************************/ +/* +** Main program file for the LEMON parser generator. +*/ + +/* Report an out-of-memory condition and abort. This function +** is used mostly by the "MemoryCheck" macro in struct.h +*/ +void memory_error(void){ + fprintf(stderr,"Out of memory. Aborting...\n"); + exit(1); +} + +static int nDefine = 0; /* Number of -D options on the command line */ +static int nDefineUsed = 0; /* Number of -D options actually used */ +static char **azDefine = 0; /* Name of the -D macros */ +static char *bDefineUsed = 0; /* True for every -D macro actually used */ + +/* This routine is called with the argument to each -D command-line option. +** Add the macro defined to the azDefine array. +*/ +static void handle_D_option(char *z){ + char **paz; + nDefine++; + azDefine = (char **) lemon_realloc(azDefine, sizeof(azDefine[0])*nDefine); + if( azDefine==0 ){ + fprintf(stderr,"out of memory\n"); + exit(1); + } + bDefineUsed = (char*)lemon_realloc(bDefineUsed, nDefine); + if( bDefineUsed==0 ){ + fprintf(stderr,"out of memory\n"); + exit(1); + } + bDefineUsed[nDefine-1] = 0; + paz = &azDefine[nDefine-1]; + *paz = (char *) lemon_malloc( lemonStrlen(z)+1 ); + if( *paz==0 ){ + fprintf(stderr,"out of memory\n"); + exit(1); + } + lemon_strcpy(*paz, z); + for(z=*paz; *z && *z!='='; z++){} + *z = 0; +} + +/* Rember the name of the output directory +*/ +static char *outputDir = NULL; +static void handle_d_option(char *z){ + outputDir = (char *) lemon_malloc( lemonStrlen(z)+1 ); + if( outputDir==0 ){ + fprintf(stderr,"out of memory\n"); + exit(1); + } + lemon_strcpy(outputDir, z); +} + +static char *user_templatename = NULL; +static void handle_T_option(char *z){ + user_templatename = (char *) lemon_malloc( lemonStrlen(z)+1 ); + if( user_templatename==0 ){ + memory_error(); + } + lemon_strcpy(user_templatename, z); +} + +/* Merge together to lists of rules ordered by rule.iRule */ +static struct rule *Rule_merge(struct rule *pA, struct rule *pB){ + struct rule *pFirst = 0; + struct rule **ppPrev = &pFirst; + while( pA && pB ){ + if( pA->iRuleiRule ){ + *ppPrev = pA; + ppPrev = &pA->next; + pA = pA->next; + }else{ + *ppPrev = pB; + ppPrev = &pB->next; + pB = pB->next; + } + } + if( pA ){ + *ppPrev = pA; + }else{ + *ppPrev = pB; + } + return pFirst; +} + +/* +** Sort a list of rules in order of increasing iRule value +*/ +static struct rule *Rule_sort(struct rule *rp){ + unsigned int i; + struct rule *pNext; + struct rule *x[32]; + memset(x, 0, sizeof(x)); + while( rp ){ + pNext = rp->next; + rp->next = 0; + for(i=0; iindex = i; + qsort(lem.symbols,lem.nsymbol,sizeof(struct symbol*), Symbolcmpp); + for(i=0; iindex = i; + while( lem.symbols[i-1]->type==MULTITERMINAL ){ i--; } + assert( strcmp(lem.symbols[i-1]->name,"{default}")==0 ); + lem.nsymbol = i - 1; + for(i=1; ISUPPER(lem.symbols[i]->name[0]); i++); + lem.nterminal = i; + + /* Assign sequential rule numbers. Start with 0. Put rules that have no + ** reduce action C-code associated with them last, so that the switch() + ** statement that selects reduction actions will have a smaller jump table. + */ + for(i=0, rp=lem.rule; rp; rp=rp->next){ + rp->iRule = rp->code ? i++ : -1; + } + lem.nruleWithAction = i; + for(rp=lem.rule; rp; rp=rp->next){ + if( rp->iRule<0 ) rp->iRule = i++; + } + lem.startRule = lem.rule; + lem.rule = Rule_sort(lem.rule); + + /* Generate a reprint of the grammar, if requested on the command line */ + if( rpflag ){ + Reprint(&lem); + }else{ + /* Initialize the size for all follow and first sets */ + SetSize(lem.nterminal+1); + + /* Find the precedence for every production rule (that has one) */ + FindRulePrecedences(&lem); + + /* Compute the lambda-nonterminals and the first-sets for every + ** nonterminal */ + FindFirstSets(&lem); + + /* Compute all LR(0) states. Also record follow-set propagation + ** links so that the follow-set can be computed later */ + lem.nstate = 0; + FindStates(&lem); + lem.sorted = State_arrayof(); + + /* Tie up loose ends on the propagation links */ + FindLinks(&lem); + + /* Compute the follow set of every reducible configuration */ + FindFollowSets(&lem); + + /* Compute the action tables */ + FindActions(&lem); + + /* Compress the action tables */ + if( compress==0 ) CompressTables(&lem); + + /* Reorder and renumber the states so that states with fewer choices + ** occur at the end. This is an optimization that helps make the + ** generated parser tables smaller. */ + if( noResort==0 ) ResortStates(&lem); + + /* Generate a report of the parser generated. (the "y.output" file) */ + if( !quiet ) ReportOutput(&lem); + + /* Generate the source code for the parser */ + ReportTable(&lem, mhflag, sqlFlag); + + /* Produce a header file for use by the scanner. (This step is + ** omitted if the "-m" option is used because makeheaders will + ** generate the file for us.) */ + if( !mhflag ) ReportHeader(&lem); + } + if( statistics ){ + printf("Parser statistics:\n"); + stats_line("terminal symbols", lem.nterminal); + stats_line("non-terminal symbols", lem.nsymbol - lem.nterminal); + stats_line("total symbols", lem.nsymbol); + stats_line("rules", lem.nrule); + stats_line("states", lem.nxstate); + stats_line("conflicts", lem.nconflict); + stats_line("action table entries", lem.nactiontab); + stats_line("lookahead table entries", lem.nlookaheadtab); + stats_line("total table size (bytes)", lem.tablesize); + } + if( lem.nconflict > 0 ){ + fprintf(stderr,"%d parsing conflicts.\n",lem.nconflict); + } + + /* return 0 on success, 1 on failure. */ + exitcode = ((lem.errorcnt > 0) || (lem.nconflict > 0)) ? 1 : 0; + lemon_free_all(); + exit(exitcode); + return (exitcode); +} +/******************** From the file "msort.c" *******************************/ +/* +** A generic merge-sort program. +** +** USAGE: +** Let "ptr" be a pointer to some structure which is at the head of +** a null-terminated list. Then to sort the list call: +** +** ptr = msort(ptr,&(ptr->next),cmpfnc); +** +** In the above, "cmpfnc" is a pointer to a function which compares +** two instances of the structure and returns an integer, as in +** strcmp. The second argument is a pointer to the pointer to the +** second element of the linked list. This address is used to compute +** the offset to the "next" field within the structure. The offset to +** the "next" field must be constant for all structures in the list. +** +** The function returns a new pointer which is the head of the list +** after sorting. +** +** ALGORITHM: +** Merge-sort. +*/ + +/* +** Return a pointer to the next structure in the linked list. +*/ +#define NEXT(A) (*(char**)(((char*)A)+offset)) + +/* +** Inputs: +** a: A sorted, null-terminated linked list. (May be null). +** b: A sorted, null-terminated linked list. (May be null). +** cmp: A pointer to the comparison function. +** offset: Offset in the structure to the "next" field. +** +** Return Value: +** A pointer to the head of a sorted list containing the elements +** of both a and b. +** +** Side effects: +** The "next" pointers for elements in the lists a and b are +** changed. +*/ +static char *merge( + char *a, + char *b, + int (*cmp)(const char*,const char*), + int offset +){ + char *ptr, *head; + + if( a==0 ){ + head = b; + }else if( b==0 ){ + head = a; + }else{ + if( (*cmp)(a,b)<=0 ){ + ptr = a; + a = NEXT(a); + }else{ + ptr = b; + b = NEXT(b); + } + head = ptr; + while( a && b ){ + if( (*cmp)(a,b)<=0 ){ + NEXT(ptr) = a; + ptr = a; + a = NEXT(a); + }else{ + NEXT(ptr) = b; + ptr = b; + b = NEXT(b); + } + } + if( a ) NEXT(ptr) = a; + else NEXT(ptr) = b; + } + return head; +} + +/* +** Inputs: +** list: Pointer to a singly-linked list of structures. +** next: Pointer to pointer to the second element of the list. +** cmp: A comparison function. +** +** Return Value: +** A pointer to the head of a sorted list containing the elements +** originally in list. +** +** Side effects: +** The "next" pointers for elements in list are changed. +*/ +#define LISTSIZE 30 +static char *msort( + char *list, + char **next, + int (*cmp)(const char*,const char*) +){ + unsigned long offset; + char *ep; + char *set[LISTSIZE]; + int i; + offset = (unsigned long)((char*)next - (char*)list); + for(i=0; istate = WAITING_FOR_DECL_KEYWORD; + }else if( ISLOWER(x[0]) ){ + psp->lhs = Symbol_new(x); + psp->nrhs = 0; + psp->lhsalias = 0; + psp->state = WAITING_FOR_ARROW; + }else if( x[0]=='{' ){ + if( psp->prevrule==0 ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "There is no prior rule upon which to attach the code " + "fragment which begins on this line."); + psp->errorcnt++; + }else if( psp->prevrule->code!=0 ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Code fragment beginning on this line is not the first " + "to follow the previous rule."); + psp->errorcnt++; + }else if( strcmp(x, "{NEVER-REDUCE")==0 ){ + psp->prevrule->neverReduce = 1; + }else{ + psp->prevrule->line = psp->tokenlineno; + psp->prevrule->code = &x[1]; + psp->prevrule->noCode = 0; + } + }else if( x[0]=='[' ){ + psp->state = PRECEDENCE_MARK_1; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Token \"%s\" should be either \"%%\" or a nonterminal name.", + x); + psp->errorcnt++; + } + break; + case PRECEDENCE_MARK_1: + if( !ISUPPER(x[0]) ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "The precedence symbol must be a terminal."); + psp->errorcnt++; + }else if( psp->prevrule==0 ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "There is no prior rule to assign precedence \"[%s]\".",x); + psp->errorcnt++; + }else if( psp->prevrule->precsym!=0 ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Precedence mark on this line is not the first " + "to follow the previous rule."); + psp->errorcnt++; + }else{ + psp->prevrule->precsym = Symbol_new(x); + } + psp->state = PRECEDENCE_MARK_2; + break; + case PRECEDENCE_MARK_2: + if( x[0]!=']' ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Missing \"]\" on precedence mark."); + psp->errorcnt++; + } + psp->state = WAITING_FOR_DECL_OR_RULE; + break; + case WAITING_FOR_ARROW: + if( x[0]==':' && x[1]==':' && x[2]=='=' ){ + psp->state = IN_RHS; + }else if( x[0]=='(' ){ + psp->state = LHS_ALIAS_1; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Expected to see a \":\" following the LHS symbol \"%s\".", + psp->lhs->name); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case LHS_ALIAS_1: + if( ISALPHA(x[0]) ){ + psp->lhsalias = x; + psp->state = LHS_ALIAS_2; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "\"%s\" is not a valid alias for the LHS \"%s\"\n", + x,psp->lhs->name); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case LHS_ALIAS_2: + if( x[0]==')' ){ + psp->state = LHS_ALIAS_3; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Missing \")\" following LHS alias name \"%s\".",psp->lhsalias); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case LHS_ALIAS_3: + if( x[0]==':' && x[1]==':' && x[2]=='=' ){ + psp->state = IN_RHS; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Missing \"->\" following: \"%s(%s)\".", + psp->lhs->name,psp->lhsalias); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case IN_RHS: + if( x[0]=='.' ){ + struct rule *rp; + rp = (struct rule *)lemon_calloc( sizeof(struct rule) + + sizeof(struct symbol*)*psp->nrhs + sizeof(char*)*psp->nrhs, 1); + if( rp==0 ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Can't allocate enough memory for this rule."); + psp->errorcnt++; + psp->prevrule = 0; + }else{ + int i; + rp->ruleline = psp->tokenlineno; + rp->rhs = (struct symbol**)&rp[1]; + rp->rhsalias = (const char**)&(rp->rhs[psp->nrhs]); + for(i=0; inrhs; i++){ + rp->rhs[i] = psp->rhs[i]; + rp->rhsalias[i] = psp->alias[i]; + if( rp->rhsalias[i]!=0 ){ rp->rhs[i]->bContent = 1; } + } + rp->lhs = psp->lhs; + rp->lhsalias = psp->lhsalias; + rp->nrhs = psp->nrhs; + rp->code = 0; + rp->noCode = 1; + rp->precsym = 0; + rp->index = psp->gp->nrule++; + rp->nextlhs = rp->lhs->rule; + rp->lhs->rule = rp; + rp->next = 0; + if( psp->firstrule==0 ){ + psp->firstrule = psp->lastrule = rp; + }else{ + psp->lastrule->next = rp; + psp->lastrule = rp; + } + psp->prevrule = rp; + } + psp->state = WAITING_FOR_DECL_OR_RULE; + }else if( ISALPHA(x[0]) ){ + if( psp->nrhs>=MAXRHS ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Too many symbols on RHS of rule beginning at \"%s\".", + x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + }else{ + psp->rhs[psp->nrhs] = Symbol_new(x); + psp->alias[psp->nrhs] = 0; + psp->nrhs++; + } + }else if( (x[0]=='|' || x[0]=='/') && psp->nrhs>0 && ISUPPER(x[1]) ){ + struct symbol *msp = psp->rhs[psp->nrhs-1]; + if( msp->type!=MULTITERMINAL ){ + struct symbol *origsp = msp; + msp = (struct symbol *) lemon_calloc(1,sizeof(*msp)); + memset(msp, 0, sizeof(*msp)); + msp->type = MULTITERMINAL; + msp->nsubsym = 1; + msp->subsym = (struct symbol**)lemon_calloc(1,sizeof(struct symbol*)); + msp->subsym[0] = origsp; + msp->name = origsp->name; + psp->rhs[psp->nrhs-1] = msp; + } + msp->nsubsym++; + msp->subsym = (struct symbol **) lemon_realloc(msp->subsym, + sizeof(struct symbol*)*msp->nsubsym); + msp->subsym[msp->nsubsym-1] = Symbol_new(&x[1]); + if( ISLOWER(x[1]) || ISLOWER(msp->subsym[0]->name[0]) ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Cannot form a compound containing a non-terminal"); + psp->errorcnt++; + } + }else if( x[0]=='(' && psp->nrhs>0 ){ + psp->state = RHS_ALIAS_1; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Illegal character on RHS of rule: \"%s\".",x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case RHS_ALIAS_1: + if( ISALPHA(x[0]) ){ + psp->alias[psp->nrhs-1] = x; + psp->state = RHS_ALIAS_2; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "\"%s\" is not a valid alias for the RHS symbol \"%s\"\n", + x,psp->rhs[psp->nrhs-1]->name); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case RHS_ALIAS_2: + if( x[0]==')' ){ + psp->state = IN_RHS; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Missing \")\" following LHS alias name \"%s\".",psp->lhsalias); + psp->errorcnt++; + psp->state = RESYNC_AFTER_RULE_ERROR; + } + break; + case WAITING_FOR_DECL_KEYWORD: + if( ISALPHA(x[0]) ){ + psp->declkeyword = x; + psp->declargslot = 0; + psp->decllinenoslot = 0; + psp->insertLineMacro = 1; + psp->state = WAITING_FOR_DECL_ARG; + if( strcmp(x,"name")==0 ){ + psp->declargslot = &(psp->gp->name); + psp->insertLineMacro = 0; + }else if( strcmp(x,"include")==0 ){ + psp->declargslot = &(psp->gp->include); + }else if( strcmp(x,"code")==0 ){ + psp->declargslot = &(psp->gp->extracode); + }else if( strcmp(x,"token_destructor")==0 ){ + psp->declargslot = &psp->gp->tokendest; + }else if( strcmp(x,"default_destructor")==0 ){ + psp->declargslot = &psp->gp->vardest; + }else if( strcmp(x,"token_prefix")==0 ){ + psp->declargslot = &psp->gp->tokenprefix; + psp->insertLineMacro = 0; + }else if( strcmp(x,"syntax_error")==0 ){ + psp->declargslot = &(psp->gp->error); + }else if( strcmp(x,"parse_accept")==0 ){ + psp->declargslot = &(psp->gp->accept); + }else if( strcmp(x,"parse_failure")==0 ){ + psp->declargslot = &(psp->gp->failure); + }else if( strcmp(x,"stack_overflow")==0 ){ + psp->declargslot = &(psp->gp->overflow); + }else if( strcmp(x,"extra_argument")==0 ){ + psp->declargslot = &(psp->gp->arg); + psp->insertLineMacro = 0; + }else if( strcmp(x,"extra_context")==0 ){ + psp->declargslot = &(psp->gp->ctx); + psp->insertLineMacro = 0; + }else if( strcmp(x,"token_type")==0 ){ + psp->declargslot = &(psp->gp->tokentype); + psp->insertLineMacro = 0; + }else if( strcmp(x,"default_type")==0 ){ + psp->declargslot = &(psp->gp->vartype); + psp->insertLineMacro = 0; + }else if( strcmp(x,"realloc")==0 ){ + psp->declargslot = &(psp->gp->reallocFunc); + psp->insertLineMacro = 0; + }else if( strcmp(x,"free")==0 ){ + psp->declargslot = &(psp->gp->freeFunc); + psp->insertLineMacro = 0; + }else if( strcmp(x,"stack_size")==0 ){ + psp->declargslot = &(psp->gp->stacksize); + psp->insertLineMacro = 0; + }else if( strcmp(x,"start_symbol")==0 ){ + psp->declargslot = &(psp->gp->start); + psp->insertLineMacro = 0; + }else if( strcmp(x,"left")==0 ){ + psp->preccounter++; + psp->declassoc = LEFT; + psp->state = WAITING_FOR_PRECEDENCE_SYMBOL; + }else if( strcmp(x,"right")==0 ){ + psp->preccounter++; + psp->declassoc = RIGHT; + psp->state = WAITING_FOR_PRECEDENCE_SYMBOL; + }else if( strcmp(x,"nonassoc")==0 ){ + psp->preccounter++; + psp->declassoc = NONE; + psp->state = WAITING_FOR_PRECEDENCE_SYMBOL; + }else if( strcmp(x,"destructor")==0 ){ + psp->state = WAITING_FOR_DESTRUCTOR_SYMBOL; + }else if( strcmp(x,"type")==0 ){ + psp->state = WAITING_FOR_DATATYPE_SYMBOL; + }else if( strcmp(x,"fallback")==0 ){ + psp->fallback = 0; + psp->state = WAITING_FOR_FALLBACK_ID; + }else if( strcmp(x,"token")==0 ){ + psp->state = WAITING_FOR_TOKEN_NAME; + }else if( strcmp(x,"wildcard")==0 ){ + psp->state = WAITING_FOR_WILDCARD_ID; + }else if( strcmp(x,"token_class")==0 ){ + psp->state = WAITING_FOR_CLASS_ID; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Unknown declaration keyword: \"%%%s\".",x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + } + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Illegal declaration keyword: \"%s\".",x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + } + break; + case WAITING_FOR_DESTRUCTOR_SYMBOL: + if( !ISALPHA(x[0]) ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Symbol name missing after %%destructor keyword"); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + }else{ + struct symbol *sp = Symbol_new(x); + psp->declargslot = &sp->destructor; + psp->decllinenoslot = &sp->destLineno; + psp->insertLineMacro = 1; + psp->state = WAITING_FOR_DECL_ARG; + } + break; + case WAITING_FOR_DATATYPE_SYMBOL: + if( !ISALPHA(x[0]) ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Symbol name missing after %%type keyword"); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + }else{ + struct symbol *sp = Symbol_find(x); + if((sp) && (sp->datatype)){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Symbol %%type \"%s\" already defined", x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + }else{ + if (!sp){ + sp = Symbol_new(x); + } + psp->declargslot = &sp->datatype; + psp->insertLineMacro = 0; + psp->state = WAITING_FOR_DECL_ARG; + } + } + break; + case WAITING_FOR_PRECEDENCE_SYMBOL: + if( x[0]=='.' ){ + psp->state = WAITING_FOR_DECL_OR_RULE; + }else if( ISUPPER(x[0]) ){ + struct symbol *sp; + sp = Symbol_new(x); + if( sp->prec>=0 ){ + ErrorMsg(psp->filename,psp->tokenlineno, + "Symbol \"%s\" has already be given a precedence.",x); + psp->errorcnt++; + }else{ + sp->prec = psp->preccounter; + sp->assoc = psp->declassoc; + } + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Can't assign a precedence to \"%s\".",x); + psp->errorcnt++; + } + break; + case WAITING_FOR_DECL_ARG: + if( x[0]=='{' || x[0]=='\"' || ISALNUM(x[0]) ){ + const char *zOld, *zNew; + char *zBuf, *z; + int nOld, n, nLine = 0, nNew, nBack; + int addLineMacro; + char zLine[50]; + zNew = x; + if( zNew[0]=='"' || zNew[0]=='{' ) zNew++; + nNew = lemonStrlen(zNew); + if( *psp->declargslot ){ + zOld = *psp->declargslot; + }else{ + zOld = ""; + } + nOld = lemonStrlen(zOld); + n = nOld + nNew + 20; + addLineMacro = !psp->gp->nolinenosflag + && psp->insertLineMacro + && psp->tokenlineno>1 + && (psp->decllinenoslot==0 || psp->decllinenoslot[0]!=0); + if( addLineMacro ){ + for(z=psp->filename, nBack=0; *z; z++){ + if( *z=='\\' ) nBack++; + } + lemon_sprintf(zLine, "#line %d ", psp->tokenlineno); + nLine = lemonStrlen(zLine); + n += nLine + lemonStrlen(psp->filename) + nBack; + } + *psp->declargslot = (char *) lemon_realloc(*psp->declargslot, n); + zBuf = *psp->declargslot + nOld; + if( addLineMacro ){ + if( nOld && zBuf[-1]!='\n' ){ + *(zBuf++) = '\n'; + } + memcpy(zBuf, zLine, nLine); + zBuf += nLine; + *(zBuf++) = '"'; + for(z=psp->filename; *z; z++){ + if( *z=='\\' ){ + *(zBuf++) = '\\'; + } + *(zBuf++) = *z; + } + *(zBuf++) = '"'; + *(zBuf++) = '\n'; + } + if( psp->decllinenoslot && psp->decllinenoslot[0]==0 ){ + psp->decllinenoslot[0] = psp->tokenlineno; + } + memcpy(zBuf, zNew, nNew); + zBuf += nNew; + *zBuf = 0; + psp->state = WAITING_FOR_DECL_OR_RULE; + }else{ + ErrorMsg(psp->filename,psp->tokenlineno, + "Illegal argument to %%%s: %s",psp->declkeyword,x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + } + break; + case WAITING_FOR_FALLBACK_ID: + if( x[0]=='.' ){ + psp->state = WAITING_FOR_DECL_OR_RULE; + }else if( !ISUPPER(x[0]) ){ + ErrorMsg(psp->filename, psp->tokenlineno, + "%%fallback argument \"%s\" should be a token", x); + psp->errorcnt++; + }else{ + struct symbol *sp = Symbol_new(x); + if( psp->fallback==0 ){ + psp->fallback = sp; + }else if( sp->fallback ){ + ErrorMsg(psp->filename, psp->tokenlineno, + "More than one fallback assigned to token %s", x); + psp->errorcnt++; + }else{ + sp->fallback = psp->fallback; + psp->gp->has_fallback = 1; + } + } + break; + case WAITING_FOR_TOKEN_NAME: + /* Tokens do not have to be declared before use. But they can be + ** in order to control their assigned integer number. The number for + ** each token is assigned when it is first seen. So by including + ** + ** %token ONE TWO THREE. + ** + ** early in the grammar file, that assigns small consecutive values + ** to each of the tokens ONE TWO and THREE. + */ + if( x[0]=='.' ){ + psp->state = WAITING_FOR_DECL_OR_RULE; + }else if( !ISUPPER(x[0]) ){ + ErrorMsg(psp->filename, psp->tokenlineno, + "%%token argument \"%s\" should be a token", x); + psp->errorcnt++; + }else{ + (void)Symbol_new(x); + } + break; + case WAITING_FOR_WILDCARD_ID: + if( x[0]=='.' ){ + psp->state = WAITING_FOR_DECL_OR_RULE; + }else if( !ISUPPER(x[0]) ){ + ErrorMsg(psp->filename, psp->tokenlineno, + "%%wildcard argument \"%s\" should be a token", x); + psp->errorcnt++; + }else{ + struct symbol *sp = Symbol_new(x); + if( psp->gp->wildcard==0 ){ + psp->gp->wildcard = sp; + }else{ + ErrorMsg(psp->filename, psp->tokenlineno, + "Extra wildcard to token: %s", x); + psp->errorcnt++; + } + } + break; + case WAITING_FOR_CLASS_ID: + if( !ISLOWER(x[0]) ){ + ErrorMsg(psp->filename, psp->tokenlineno, + "%%token_class must be followed by an identifier: %s", x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + }else if( Symbol_find(x) ){ + ErrorMsg(psp->filename, psp->tokenlineno, + "Symbol \"%s\" already used", x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + }else{ + psp->tkclass = Symbol_new(x); + psp->tkclass->type = MULTITERMINAL; + psp->state = WAITING_FOR_CLASS_TOKEN; + } + break; + case WAITING_FOR_CLASS_TOKEN: + if( x[0]=='.' ){ + psp->state = WAITING_FOR_DECL_OR_RULE; + }else if( ISUPPER(x[0]) || ((x[0]=='|' || x[0]=='/') && ISUPPER(x[1])) ){ + struct symbol *msp = psp->tkclass; + msp->nsubsym++; + msp->subsym = (struct symbol **) lemon_realloc(msp->subsym, + sizeof(struct symbol*)*msp->nsubsym); + if( !ISUPPER(x[0]) ) x++; + msp->subsym[msp->nsubsym-1] = Symbol_new(x); + }else{ + ErrorMsg(psp->filename, psp->tokenlineno, + "%%token_class argument \"%s\" should be a token", x); + psp->errorcnt++; + psp->state = RESYNC_AFTER_DECL_ERROR; + } + break; + case RESYNC_AFTER_RULE_ERROR: +/* if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE; +** break; */ + case RESYNC_AFTER_DECL_ERROR: + if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE; + if( x[0]=='%' ) psp->state = WAITING_FOR_DECL_KEYWORD; + break; + } +} + +/* The text in the input is part of the argument to an %ifdef or %ifndef. +** Evaluate the text as a boolean expression. Return true or false. +*/ +static int eval_preprocessor_boolean(char *z, int lineno){ + int neg = 0; + int res = 0; + int okTerm = 1; + int i; + for(i=0; z[i]!=0; i++){ + if( ISSPACE(z[i]) ) continue; + if( z[i]=='!' ){ + if( !okTerm ) goto pp_syntax_error; + neg = !neg; + continue; + } + if( z[i]=='|' && z[i+1]=='|' ){ + if( okTerm ) goto pp_syntax_error; + if( res ) return 1; + i++; + okTerm = 1; + continue; + } + if( z[i]=='&' && z[i+1]=='&' ){ + if( okTerm ) goto pp_syntax_error; + if( !res ) return 0; + i++; + okTerm = 1; + continue; + } + if( z[i]=='(' ){ + int k; + int n = 1; + if( !okTerm ) goto pp_syntax_error; + for(k=i+1; z[k]; k++){ + if( z[k]==')' ){ + n--; + if( n==0 ){ + z[k] = 0; + res = eval_preprocessor_boolean(&z[i+1], -1); + z[k] = ')'; + if( res<0 ){ + i = i-res; + goto pp_syntax_error; + } + i = k; + break; + } + }else if( z[k]=='(' ){ + n++; + }else if( z[k]==0 ){ + i = k; + goto pp_syntax_error; + } + } + if( neg ){ + res = !res; + neg = 0; + } + okTerm = 0; + continue; + } + if( ISALPHA(z[i]) ){ + int j, k, n; + if( !okTerm ) goto pp_syntax_error; + for(k=i+1; ISALNUM(z[k]) || z[k]=='_'; k++){} + n = k - i; + res = 0; + for(j=0; j0 ){ + fprintf(stderr, "%%if syntax error on line %d.\n", lineno); + fprintf(stderr, " %.*s <-- syntax error here\n", i+1, z); + exit(1); + }else{ + return -(i+1); + } +} + +/* Run the preprocessor over the input file text. The global variables +** azDefine[0] through azDefine[nDefine-1] contains the names of all defined +** macros. This routine looks for "%ifdef" and "%ifndef" and "%endif" and +** comments them out. Text in between is also commented out as appropriate. +*/ +static void preprocess_input(char *z){ + int i, j, k; + int exclude = 0; + int start = 0; + int lineno = 1; + int start_lineno = 1; + for(i=0; z[i]; i++){ + if( z[i]=='\n' ) lineno++; + if( z[i]!='%' || (i>0 && z[i-1]!='\n') ) continue; + if( strncmp(&z[i],"%endif",6)==0 && ISSPACE(z[i+6]) ){ + if( exclude ){ + exclude--; + if( exclude==0 ){ + for(j=start; jfilename; + ps.errorcnt = 0; + ps.state = INITIALIZE; + + /* Begin by reading the input file */ + fp = fopen(ps.filename,"rb"); + if( fp==0 ){ + ErrorMsg(ps.filename,0,"Can't open this file for reading."); + gp->errorcnt++; + return; + } + fseek(fp,0,2); + filesize = ftell(fp); + rewind(fp); + filebuf = (char *)lemon_malloc( filesize+1 ); + if( filesize>100000000 || filebuf==0 ){ + ErrorMsg(ps.filename,0,"Input file too large."); + lemon_free(filebuf); + gp->errorcnt++; + fclose(fp); + return; + } + if( fread(filebuf,1,filesize,fp)!=filesize ){ + ErrorMsg(ps.filename,0,"Can't read in all %d bytes of this file.", + filesize); + lemon_free(filebuf); + gp->errorcnt++; + fclose(fp); + return; + } + fclose(fp); + filebuf[filesize] = 0; + + /* Make an initial pass through the file to handle %ifdef and %ifndef */ + preprocess_input(filebuf); + if( gp->printPreprocessed ){ + printf("%s\n", filebuf); + return; + } + + /* Now scan the text of the input file */ + lineno = 1; + for(cp=filebuf; (c= *cp)!=0; ){ + if( c=='\n' ) lineno++; /* Keep track of the line number */ + if( ISSPACE(c) ){ cp++; continue; } /* Skip all white space */ + if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments */ + cp+=2; + while( (c= *cp)!=0 && c!='\n' ) cp++; + continue; + } + if( c=='/' && cp[1]=='*' ){ /* Skip C style comments */ + cp+=2; + if( (*cp)=='/' ) cp++; + while( (c= *cp)!=0 && (c!='/' || cp[-1]!='*') ){ + if( c=='\n' ) lineno++; + cp++; + } + if( c ) cp++; + continue; + } + ps.tokenstart = cp; /* Mark the beginning of the token */ + ps.tokenlineno = lineno; /* Linenumber on which token begins */ + if( c=='\"' ){ /* String literals */ + cp++; + while( (c= *cp)!=0 && c!='\"' ){ + if( c=='\n' ) lineno++; + cp++; + } + if( c==0 ){ + ErrorMsg(ps.filename,startline, + "String starting on this line is not terminated before " + "the end of the file."); + ps.errorcnt++; + nextcp = cp; + }else{ + nextcp = cp+1; + } + }else if( c=='{' ){ /* A block of C code */ + int level; + cp++; + for(level=1; (c= *cp)!=0 && (level>1 || c!='}'); cp++){ + if( c=='\n' ) lineno++; + else if( c=='{' ) level++; + else if( c=='}' ) level--; + else if( c=='/' && cp[1]=='*' ){ /* Skip comments */ + int prevc; + cp = &cp[2]; + prevc = 0; + while( (c= *cp)!=0 && (c!='/' || prevc!='*') ){ + if( c=='\n' ) lineno++; + prevc = c; + cp++; + } + }else if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments too */ + cp = &cp[2]; + while( (c= *cp)!=0 && c!='\n' ) cp++; + if( c ) lineno++; + }else if( c=='\'' || c=='\"' ){ /* String a character literals */ + int startchar, prevc; + startchar = c; + prevc = 0; + for(cp++; (c= *cp)!=0 && (c!=startchar || prevc=='\\'); cp++){ + if( c=='\n' ) lineno++; + if( prevc=='\\' ) prevc = 0; + else prevc = c; + } + } + } + if( c==0 ){ + ErrorMsg(ps.filename,ps.tokenlineno, + "C code starting on this line is not terminated before " + "the end of the file."); + ps.errorcnt++; + nextcp = cp; + }else{ + nextcp = cp+1; + } + }else if( ISALNUM(c) ){ /* Identifiers */ + while( (c= *cp)!=0 && (ISALNUM(c) || c=='_') ) cp++; + nextcp = cp; + }else if( c==':' && cp[1]==':' && cp[2]=='=' ){ /* The operator "::=" */ + cp += 3; + nextcp = cp; + }else if( (c=='/' || c=='|') && ISALPHA(cp[1]) ){ + cp += 2; + while( (c = *cp)!=0 && (ISALNUM(c) || c=='_') ) cp++; + nextcp = cp; + }else{ /* All other (one character) operators */ + cp++; + nextcp = cp; + } + c = *cp; + *cp = 0; /* Null terminate the token */ + parseonetoken(&ps); /* Parse the token */ + *cp = (char)c; /* Restore the buffer */ + cp = nextcp; + } + lemon_free(filebuf); /* Release the buffer after parsing */ + gp->rule = ps.firstrule; + gp->errorcnt = ps.errorcnt; +} +/*************************** From the file "plink.c" *********************/ +/* +** Routines processing configuration follow-set propagation links +** in the LEMON parser generator. +*/ +static struct plink *plink_freelist = 0; + +/* Allocate a new plink */ +struct plink *Plink_new(void){ + struct plink *newlink; + + if( plink_freelist==0 ){ + int i; + int amt = 100; + plink_freelist = (struct plink *)lemon_calloc( amt, sizeof(struct plink) ); + if( plink_freelist==0 ){ + fprintf(stderr, + "Unable to allocate memory for a new follow-set propagation link.\n"); + exit(1); + } + for(i=0; inext; + return newlink; +} + +/* Add a plink to a plink list */ +void Plink_add(struct plink **plpp, struct config *cfp) +{ + struct plink *newlink; + newlink = Plink_new(); + newlink->next = *plpp; + *plpp = newlink; + newlink->cfp = cfp; +} + +/* Transfer every plink on the list "from" to the list "to" */ +void Plink_copy(struct plink **to, struct plink *from) +{ + struct plink *nextpl; + while( from ){ + nextpl = from->next; + from->next = *to; + *to = from; + from = nextpl; + } +} + +/* Delete every plink on the list */ +void Plink_delete(struct plink *plp) +{ + struct plink *nextpl; + + while( plp ){ + nextpl = plp->next; + plp->next = plink_freelist; + plink_freelist = plp; + plp = nextpl; + } +} +/*********************** From the file "report.c" **************************/ +/* +** Procedures for generating reports and tables in the LEMON parser generator. +*/ + +/* Generate a filename with the given suffix. +*/ +PRIVATE char *file_makename(struct lemon *lemp, const char *suffix) +{ + char *name; + char *cp; + char *filename = lemp->filename; + int sz; + + if( outputDir ){ + cp = strrchr(filename, '/'); + if( cp ) filename = cp + 1; + } + sz = lemonStrlen(filename); + sz += lemonStrlen(suffix); + if( outputDir ) sz += lemonStrlen(outputDir) + 1; + sz += 5; + name = (char*)lemon_malloc( sz ); + if( name==0 ){ + fprintf(stderr,"Can't allocate space for a filename.\n"); + exit(1); + } + name[0] = 0; + if( outputDir ){ + lemon_strcpy(name, outputDir); + lemon_strcat(name, "/"); + } + lemon_strcat(name,filename); + cp = strrchr(name,'.'); + if( cp ) *cp = 0; + lemon_strcat(name,suffix); + return name; +} + +/* Open a file with a name based on the name of the input file, +** but with a different (specified) suffix, and return a pointer +** to the stream */ +PRIVATE FILE *file_open( + struct lemon *lemp, + const char *suffix, + const char *mode +){ + FILE *fp; + + if( lemp->outname ) lemon_free(lemp->outname); + lemp->outname = file_makename(lemp, suffix); + fp = fopen(lemp->outname,mode); + if( fp==0 && *mode=='w' ){ + fprintf(stderr,"Can't open file \"%s\".\n",lemp->outname); + lemp->errorcnt++; + return 0; + } + return fp; +} + +/* Print the text of a rule +*/ +void rule_print(FILE *out, struct rule *rp){ + int i, j; + fprintf(out, "%s",rp->lhs->name); + /* if( rp->lhsalias ) fprintf(out,"(%s)",rp->lhsalias); */ + fprintf(out," ::="); + for(i=0; inrhs; i++){ + struct symbol *sp = rp->rhs[i]; + if( sp->type==MULTITERMINAL ){ + fprintf(out," %s", sp->subsym[0]->name); + for(j=1; jnsubsym; j++){ + fprintf(out,"|%s", sp->subsym[j]->name); + } + }else{ + fprintf(out," %s", sp->name); + } + /* if( rp->rhsalias[i] ) fprintf(out,"(%s)",rp->rhsalias[i]); */ + } +} + +/* Duplicate the input file without comments and without actions +** on rules */ +void Reprint(struct lemon *lemp) +{ + struct rule *rp; + struct symbol *sp; + int i, j, maxlen, len, ncolumns, skip; + printf("// Reprint of input file \"%s\".\n// Symbols:\n",lemp->filename); + maxlen = 10; + for(i=0; insymbol; i++){ + sp = lemp->symbols[i]; + len = lemonStrlen(sp->name); + if( len>maxlen ) maxlen = len; + } + ncolumns = 76/(maxlen+5); + if( ncolumns<1 ) ncolumns = 1; + skip = (lemp->nsymbol + ncolumns - 1)/ncolumns; + for(i=0; insymbol; j+=skip){ + sp = lemp->symbols[j]; + assert( sp->index==j ); + printf(" %3d %-*.*s",j,maxlen,maxlen,sp->name); + } + printf("\n"); + } + for(rp=lemp->rule; rp; rp=rp->next){ + rule_print(stdout, rp); + printf("."); + if( rp->precsym ) printf(" [%s]",rp->precsym->name); + /* if( rp->code ) printf("\n %s",rp->code); */ + printf("\n"); + } +} + +/* Print a single rule. +*/ +void RulePrint(FILE *fp, struct rule *rp, int iCursor){ + struct symbol *sp; + int i, j; + fprintf(fp,"%s ::=",rp->lhs->name); + for(i=0; i<=rp->nrhs; i++){ + if( i==iCursor ) fprintf(fp," *"); + if( i==rp->nrhs ) break; + sp = rp->rhs[i]; + if( sp->type==MULTITERMINAL ){ + fprintf(fp," %s", sp->subsym[0]->name); + for(j=1; jnsubsym; j++){ + fprintf(fp,"|%s",sp->subsym[j]->name); + } + }else{ + fprintf(fp," %s", sp->name); + } + } +} + +/* Print the rule for a configuration. +*/ +void ConfigPrint(FILE *fp, struct config *cfp){ + RulePrint(fp, cfp->rp, cfp->dot); +} + +/* #define TEST */ +#if 0 +/* Print a set */ +PRIVATE void SetPrint(out,set,lemp) +FILE *out; +char *set; +struct lemon *lemp; +{ + int i; + char *spacer; + spacer = ""; + fprintf(out,"%12s[",""); + for(i=0; interminal; i++){ + if( SetFind(set,i) ){ + fprintf(out,"%s%s",spacer,lemp->symbols[i]->name); + spacer = " "; + } + } + fprintf(out,"]\n"); +} + +/* Print a plink chain */ +PRIVATE void PlinkPrint(out,plp,tag) +FILE *out; +struct plink *plp; +char *tag; +{ + while( plp ){ + fprintf(out,"%12s%s (state %2d) ","",tag,plp->cfp->stp->statenum); + ConfigPrint(out,plp->cfp); + fprintf(out,"\n"); + plp = plp->next; + } +} +#endif + +/* Print an action to the given file descriptor. Return FALSE if +** nothing was actually printed. +*/ +int PrintAction( + struct action *ap, /* The action to print */ + FILE *fp, /* Print the action here */ + int indent /* Indent by this amount */ +){ + int result = 1; + switch( ap->type ){ + case SHIFT: { + struct state *stp = ap->x.stp; + fprintf(fp,"%*s shift %-7d",indent,ap->sp->name,stp->statenum); + break; + } + case REDUCE: { + struct rule *rp = ap->x.rp; + fprintf(fp,"%*s reduce %-7d",indent,ap->sp->name,rp->iRule); + RulePrint(fp, rp, -1); + break; + } + case SHIFTREDUCE: { + struct rule *rp = ap->x.rp; + fprintf(fp,"%*s shift-reduce %-7d",indent,ap->sp->name,rp->iRule); + RulePrint(fp, rp, -1); + break; + } + case ACCEPT: + fprintf(fp,"%*s accept",indent,ap->sp->name); + break; + case ERROR: + fprintf(fp,"%*s error",indent,ap->sp->name); + break; + case SRCONFLICT: + case RRCONFLICT: + fprintf(fp,"%*s reduce %-7d ** Parsing conflict **", + indent,ap->sp->name,ap->x.rp->iRule); + break; + case SSCONFLICT: + fprintf(fp,"%*s shift %-7d ** Parsing conflict **", + indent,ap->sp->name,ap->x.stp->statenum); + break; + case SH_RESOLVED: + if( showPrecedenceConflict ){ + fprintf(fp,"%*s shift %-7d -- dropped by precedence", + indent,ap->sp->name,ap->x.stp->statenum); + }else{ + result = 0; + } + break; + case RD_RESOLVED: + if( showPrecedenceConflict ){ + fprintf(fp,"%*s reduce %-7d -- dropped by precedence", + indent,ap->sp->name,ap->x.rp->iRule); + }else{ + result = 0; + } + break; + case NOT_USED: + result = 0; + break; + } + if( result && ap->spOpt ){ + fprintf(fp," /* because %s==%s */", ap->sp->name, ap->spOpt->name); + } + return result; +} + +/* Generate the "*.out" log file */ +void ReportOutput(struct lemon *lemp) +{ + int i, n; + struct state *stp; + struct config *cfp; + struct action *ap; + struct rule *rp; + FILE *fp; + + fp = file_open(lemp,".out","wb"); + if( fp==0 ) return; + for(i=0; inxstate; i++){ + stp = lemp->sorted[i]; + fprintf(fp,"State %d:\n",stp->statenum); + if( lemp->basisflag ) cfp=stp->bp; + else cfp=stp->cfp; + while( cfp ){ + char buf[20]; + if( cfp->dot==cfp->rp->nrhs ){ + lemon_sprintf(buf,"(%d)",cfp->rp->iRule); + fprintf(fp," %5s ",buf); + }else{ + fprintf(fp," "); + } + ConfigPrint(fp,cfp); + fprintf(fp,"\n"); +#if 0 + SetPrint(fp,cfp->fws,lemp); + PlinkPrint(fp,cfp->fplp,"To "); + PlinkPrint(fp,cfp->bplp,"From"); +#endif + if( lemp->basisflag ) cfp=cfp->bp; + else cfp=cfp->next; + } + fprintf(fp,"\n"); + for(ap=stp->ap; ap; ap=ap->next){ + if( PrintAction(ap,fp,30) ) fprintf(fp,"\n"); + } + fprintf(fp,"\n"); + } + fprintf(fp, "----------------------------------------------------\n"); + fprintf(fp, "Symbols:\n"); + fprintf(fp, "The first-set of non-terminals is shown after the name.\n\n"); + for(i=0; insymbol; i++){ + int j; + struct symbol *sp; + + sp = lemp->symbols[i]; + fprintf(fp, " %3d: %s", i, sp->name); + if( sp->type==NONTERMINAL ){ + fprintf(fp, ":"); + if( sp->lambda ){ + fprintf(fp, " "); + } + for(j=0; jnterminal; j++){ + if( sp->firstset && SetFind(sp->firstset, j) ){ + fprintf(fp, " %s", lemp->symbols[j]->name); + } + } + } + if( sp->prec>=0 ) fprintf(fp," (precedence=%d)", sp->prec); + fprintf(fp, "\n"); + } + fprintf(fp, "----------------------------------------------------\n"); + fprintf(fp, "Syntax-only Symbols:\n"); + fprintf(fp, "The following symbols never carry semantic content.\n\n"); + for(i=n=0; insymbol; i++){ + int w; + struct symbol *sp = lemp->symbols[i]; + if( sp->bContent ) continue; + w = (int)strlen(sp->name); + if( n>0 && n+w>75 ){ + fprintf(fp,"\n"); + n = 0; + } + if( n>0 ){ + fprintf(fp, " "); + n++; + } + fprintf(fp, "%s", sp->name); + n += w; + } + if( n>0 ) fprintf(fp, "\n"); + fprintf(fp, "----------------------------------------------------\n"); + fprintf(fp, "Rules:\n"); + for(rp=lemp->rule; rp; rp=rp->next){ + fprintf(fp, "%4d: ", rp->iRule); + rule_print(fp, rp); + fprintf(fp,"."); + if( rp->precsym ){ + fprintf(fp," [%s precedence=%d]", + rp->precsym->name, rp->precsym->prec); + } + fprintf(fp,"\n"); + } + fclose(fp); + return; +} + +/* Search for the file "name" which is in the same directory as +** the executable */ +PRIVATE char *pathsearch(char *argv0, char *name, int modemask) +{ + const char *pathlist; + char *pathbufptr = 0; + char *pathbuf = 0; + char *path,*cp; + char c; + +#ifdef __WIN32__ + cp = strrchr(argv0,'\\'); +#else + cp = strrchr(argv0,'/'); +#endif + if( cp ){ + c = *cp; + *cp = 0; + path = (char *)lemon_malloc( lemonStrlen(argv0) + lemonStrlen(name) + 2 ); + if( path ) lemon_sprintf(path,"%s/%s",argv0,name); + *cp = c; + }else{ + pathlist = getenv("PATH"); + if( pathlist==0 ) pathlist = ".:/bin:/usr/bin"; + pathbuf = (char *) lemon_malloc( lemonStrlen(pathlist) + 1 ); + path = (char *)lemon_malloc( lemonStrlen(pathlist)+lemonStrlen(name)+2 ); + if( (pathbuf != 0) && (path!=0) ){ + pathbufptr = pathbuf; + lemon_strcpy(pathbuf, pathlist); + while( *pathbuf ){ + cp = strchr(pathbuf,':'); + if( cp==0 ) cp = &pathbuf[lemonStrlen(pathbuf)]; + c = *cp; + *cp = 0; + lemon_sprintf(path,"%s/%s",pathbuf,name); + *cp = c; + if( c==0 ) pathbuf[0] = 0; + else pathbuf = &cp[1]; + if( access(path,modemask)==0 ) break; + } + } + lemon_free(pathbufptr); + } + return path; +} + +/* Given an action, compute the integer value for that action +** which is to be put in the action table of the generated machine. +** Return negative if no action should be generated. +*/ +PRIVATE int compute_action(struct lemon *lemp, struct action *ap) +{ + int act; + switch( ap->type ){ + case SHIFT: act = ap->x.stp->statenum; break; + case SHIFTREDUCE: { + /* Since a SHIFT is inherient after a prior REDUCE, convert any + ** SHIFTREDUCE action with a nonterminal on the LHS into a simple + ** REDUCE action: */ + if( ap->sp->index>=lemp->nterminal + && (lemp->errsym==0 || ap->sp->index!=lemp->errsym->index) + ){ + act = lemp->minReduce + ap->x.rp->iRule; + }else{ + act = lemp->minShiftReduce + ap->x.rp->iRule; + } + break; + } + case REDUCE: act = lemp->minReduce + ap->x.rp->iRule; break; + case ERROR: act = lemp->errAction; break; + case ACCEPT: act = lemp->accAction; break; + default: act = -1; break; + } + return act; +} + +#define LINESIZE 1000 +/* The next cluster of routines are for reading the template file +** and writing the results to the generated parser */ +/* The first function transfers data from "in" to "out" until +** a line is seen which begins with "%%". The line number is +** tracked. +** +** if name!=0, then any word that begin with "Parse" is changed to +** begin with *name instead. +*/ +PRIVATE void tplt_xfer(char *name, FILE *in, FILE *out, int *lineno) +{ + int i, iStart; + char line[LINESIZE]; + while( fgets(line,LINESIZE,in) && (line[0]!='%' || line[1]!='%') ){ + (*lineno)++; + iStart = 0; + if( name ){ + for(i=0; line[i]; i++){ + if( line[i]=='P' && strncmp(&line[i],"Parse",5)==0 + && (i==0 || !ISALPHA(line[i-1])) + ){ + if( i>iStart ) fprintf(out,"%.*s",i-iStart,&line[iStart]); + fprintf(out,"%s",name); + i += 4; + iStart = i+1; + } + } + } + fprintf(out,"%s",&line[iStart]); + } +} + +/* Skip forward past the header of the template file to the first "%%" +*/ +PRIVATE void tplt_skip_header(FILE *in, int *lineno) +{ + char line[LINESIZE]; + while( fgets(line,LINESIZE,in) && (line[0]!='%' || line[1]!='%') ){ + (*lineno)++; + } +} + +/* The next function finds the template file and opens it, returning +** a pointer to the opened file. */ +PRIVATE FILE *tplt_open(struct lemon *lemp) +{ + static char templatename[] = "lempar.c"; + char buf[1000]; + FILE *in; + char *tpltname; + char *toFree = 0; + char *cp; + + /* first, see if user specified a template filename on the command line. */ + if (user_templatename != 0) { + if( access(user_templatename,004)==-1 ){ + fprintf(stderr,"Can't find the parser driver template file \"%s\".\n", + user_templatename); + lemp->errorcnt++; + return 0; + } + in = fopen(user_templatename,"rb"); + if( in==0 ){ + fprintf(stderr,"Can't open the template file \"%s\".\n", + user_templatename); + lemp->errorcnt++; + return 0; + } + return in; + } + + cp = strrchr(lemp->filename,'.'); + if( cp ){ + lemon_sprintf(buf,"%.*s.lt",(int)(cp-lemp->filename),lemp->filename); + }else{ + lemon_sprintf(buf,"%s.lt",lemp->filename); + } + if( access(buf,004)==0 ){ + tpltname = buf; + }else if( access(templatename,004)==0 ){ + tpltname = templatename; + }else{ + toFree = tpltname = pathsearch(lemp->argv[0],templatename,0); + } + if( tpltname==0 ){ + fprintf(stderr,"Can't find the parser driver template file \"%s\".\n", + templatename); + lemp->errorcnt++; + return 0; + } + in = fopen(tpltname,"rb"); + if( in==0 ){ + fprintf(stderr,"Can't open the template file \"%s\".\n",tpltname); + lemp->errorcnt++; + } + lemon_free(toFree); + return in; +} + +/* Print a #line directive line to the output file. */ +PRIVATE void tplt_linedir(FILE *out, int lineno, char *filename) +{ + fprintf(out,"#line %d \"",lineno); + while( *filename ){ + if( *filename == '\\' ) putc('\\',out); + putc(*filename,out); + filename++; + } + fprintf(out,"\"\n"); +} + +/* Print a string to the file and keep the linenumber up to date */ +PRIVATE void tplt_print(FILE *out, struct lemon *lemp, char *str, int *lineno) +{ + if( str==0 ) return; + while( *str ){ + putc(*str,out); + if( *str=='\n' ) (*lineno)++; + str++; + } + if( str[-1]!='\n' ){ + putc('\n',out); + (*lineno)++; + } + if (!lemp->nolinenosflag) { + (*lineno)++; tplt_linedir(out,*lineno,lemp->outname); + } + return; +} + +/* +** The following routine emits code for the destructor for the +** symbol sp +*/ +void emit_destructor_code( + FILE *out, + struct symbol *sp, + struct lemon *lemp, + int *lineno +){ + char *cp = 0; + + if( sp->type==TERMINAL ){ + cp = lemp->tokendest; + if( cp==0 ) return; + fprintf(out,"{\n"); (*lineno)++; + }else if( sp->destructor ){ + cp = sp->destructor; + fprintf(out,"{\n"); (*lineno)++; + if( !lemp->nolinenosflag ){ + (*lineno)++; + tplt_linedir(out,sp->destLineno,lemp->filename); + } + }else if( lemp->vardest ){ + cp = lemp->vardest; + if( cp==0 ) return; + fprintf(out,"{\n"); (*lineno)++; + }else{ + assert( 0 ); /* Cannot happen */ + } + for(; *cp; cp++){ + if( *cp=='$' && cp[1]=='$' ){ + fprintf(out,"(yypminor->yy%d)",sp->dtnum); + cp++; + continue; + } + if( *cp=='\n' ) (*lineno)++; + fputc(*cp,out); + } + fprintf(out,"\n"); (*lineno)++; + if (!lemp->nolinenosflag) { + (*lineno)++; tplt_linedir(out,*lineno,lemp->outname); + } + fprintf(out,"}\n"); (*lineno)++; + return; +} + +/* +** Return TRUE (non-zero) if the given symbol has a destructor. +*/ +int has_destructor(struct symbol *sp, struct lemon *lemp) +{ + int ret; + if( sp->type==TERMINAL ){ + ret = lemp->tokendest!=0; + }else{ + ret = lemp->vardest!=0 || sp->destructor!=0; + } + return ret; +} + +/* +** Append text to a dynamically allocated string. If zText is 0 then +** reset the string to be empty again. Always return the complete text +** of the string (which is overwritten with each call). +** +** n bytes of zText are stored. If n==0 then all of zText up to the first +** \000 terminator is stored. zText can contain up to two instances of +** %d. The values of p1 and p2 are written into the first and second +** %d. +** +** If n==-1, then the previous character is overwritten. +*/ +PRIVATE char *append_str(const char *zText, int n, int p1, int p2){ + static char empty[1] = { 0 }; + static char *z = 0; + static int alloced = 0; + static int used = 0; + int c; + char zInt[40]; + if( zText==0 ){ + if( used==0 && z!=0 ) z[0] = 0; + used = 0; + return z; + } + if( n<=0 ){ + if( n<0 ){ + used += n; + assert( used>=0 ); + } + n = lemonStrlen(zText); + } + if( (int) (n+sizeof(zInt)*2+used) >= alloced ){ + alloced = n + sizeof(zInt)*2 + used + 200; + z = (char *) lemon_realloc(z, alloced); + } + if( z==0 ) return empty; + while( n-- > 0 ){ + c = *(zText++); + if( c=='%' && n>0 && zText[0]=='d' ){ + lemon_sprintf(zInt, "%d", p1); + p1 = p2; + lemon_strcpy(&z[used], zInt); + used += lemonStrlen(&z[used]); + zText++; + n--; + }else{ + z[used++] = (char)c; + } + } + z[used] = 0; + return z; +} + +/* +** Write and transform the rp->code string so that symbols are expanded. +** Populate the rp->codePrefix and rp->codeSuffix strings, as appropriate. +** +** Return 1 if the expanded code requires that "yylhsminor" local variable +** to be defined. +*/ +PRIVATE int translate_code(struct lemon *lemp, struct rule *rp){ + char *cp, *xp; + int i; + int rc = 0; /* True if yylhsminor is used */ + int dontUseRhs0 = 0; /* If true, use of left-most RHS label is illegal */ + const char *zSkip = 0; /* The zOvwrt comment within rp->code, or NULL */ + char lhsused = 0; /* True if the LHS element has been used */ + char lhsdirect; /* True if LHS writes directly into stack */ + char used[MAXRHS]; /* True for each RHS element which is used */ + char zLhs[50]; /* Convert the LHS symbol into this string */ + char zOvwrt[900]; /* Comment that to allow LHS to overwrite RHS */ + + for(i=0; inrhs; i++) used[i] = 0; + lhsused = 0; + + if( rp->code==0 ){ + static char newlinestr[2] = { '\n', '\0' }; + rp->code = newlinestr; + rp->line = rp->ruleline; + rp->noCode = 1; + }else{ + rp->noCode = 0; + } + + + if( rp->nrhs==0 ){ + /* If there are no RHS symbols, then writing directly to the LHS is ok */ + lhsdirect = 1; + }else if( rp->rhsalias[0]==0 ){ + /* The left-most RHS symbol has no value. LHS direct is ok. But + ** we have to call the destructor on the RHS symbol first. */ + lhsdirect = 1; + if( has_destructor(rp->rhs[0],lemp) ){ + append_str(0,0,0,0); + append_str(" yy_destructor(yypParser,%d,&yymsp[%d].minor);\n", 0, + rp->rhs[0]->index,1-rp->nrhs); + rp->codePrefix = Strsafe(append_str(0,0,0,0)); + rp->noCode = 0; + } + }else if( rp->lhsalias==0 ){ + /* There is no LHS value symbol. */ + lhsdirect = 1; + }else if( strcmp(rp->lhsalias,rp->rhsalias[0])==0 ){ + /* The LHS symbol and the left-most RHS symbol are the same, so + ** direct writing is allowed */ + lhsdirect = 1; + lhsused = 1; + used[0] = 1; + if( rp->lhs->dtnum!=rp->rhs[0]->dtnum ){ + ErrorMsg(lemp->filename,rp->ruleline, + "%s(%s) and %s(%s) share the same label but have " + "different datatypes.", + rp->lhs->name, rp->lhsalias, rp->rhs[0]->name, rp->rhsalias[0]); + lemp->errorcnt++; + } + }else{ + lemon_sprintf(zOvwrt, "/*%s-overwrites-%s*/", + rp->lhsalias, rp->rhsalias[0]); + zSkip = strstr(rp->code, zOvwrt); + if( zSkip!=0 ){ + /* The code contains a special comment that indicates that it is safe + ** for the LHS label to overwrite left-most RHS label. */ + lhsdirect = 1; + }else{ + lhsdirect = 0; + } + } + if( lhsdirect ){ + sprintf(zLhs, "yymsp[%d].minor.yy%d",1-rp->nrhs,rp->lhs->dtnum); + }else{ + rc = 1; + sprintf(zLhs, "yylhsminor.yy%d",rp->lhs->dtnum); + } + + append_str(0,0,0,0); + + /* This const cast is wrong but harmless, if we're careful. */ + for(cp=(char *)rp->code; *cp; cp++){ + if( cp==zSkip ){ + append_str(zOvwrt,0,0,0); + cp += lemonStrlen(zOvwrt)-1; + dontUseRhs0 = 1; + continue; + } + if( ISALPHA(*cp) && (cp==rp->code || (!ISALNUM(cp[-1]) && cp[-1]!='_')) ){ + char saved; + for(xp= &cp[1]; ISALNUM(*xp) || *xp=='_'; xp++); + saved = *xp; + *xp = 0; + if( rp->lhsalias && strcmp(cp,rp->lhsalias)==0 ){ + append_str(zLhs,0,0,0); + cp = xp; + lhsused = 1; + }else{ + for(i=0; inrhs; i++){ + if( rp->rhsalias[i] && strcmp(cp,rp->rhsalias[i])==0 ){ + if( i==0 && dontUseRhs0 ){ + ErrorMsg(lemp->filename,rp->ruleline, + "Label %s used after '%s'.", + rp->rhsalias[0], zOvwrt); + lemp->errorcnt++; + }else if( cp!=rp->code && cp[-1]=='@' ){ + /* If the argument is of the form @X then substituted + ** the token number of X, not the value of X */ + append_str("yymsp[%d].major",-1,i-rp->nrhs+1,0); + }else{ + struct symbol *sp = rp->rhs[i]; + int dtnum; + if( sp->type==MULTITERMINAL ){ + dtnum = sp->subsym[0]->dtnum; + }else{ + dtnum = sp->dtnum; + } + append_str("yymsp[%d].minor.yy%d",0,i-rp->nrhs+1, dtnum); + } + cp = xp; + used[i] = 1; + break; + } + } + } + *xp = saved; + } + append_str(cp, 1, 0, 0); + } /* End loop */ + + /* Main code generation completed */ + cp = append_str(0,0,0,0); + if( cp && cp[0] ) rp->code = Strsafe(cp); + append_str(0,0,0,0); + + /* Check to make sure the LHS has been used */ + if( rp->lhsalias && !lhsused ){ + ErrorMsg(lemp->filename,rp->ruleline, + "Label \"%s\" for \"%s(%s)\" is never used.", + rp->lhsalias,rp->lhs->name,rp->lhsalias); + lemp->errorcnt++; + } + + /* Generate destructor code for RHS minor values which are not referenced. + ** Generate error messages for unused labels and duplicate labels. + */ + for(i=0; inrhs; i++){ + if( rp->rhsalias[i] ){ + if( i>0 ){ + int j; + if( rp->lhsalias && strcmp(rp->lhsalias,rp->rhsalias[i])==0 ){ + ErrorMsg(lemp->filename,rp->ruleline, + "%s(%s) has the same label as the LHS but is not the left-most " + "symbol on the RHS.", + rp->rhs[i]->name, rp->rhsalias[i]); + lemp->errorcnt++; + } + for(j=0; jrhsalias[j] && strcmp(rp->rhsalias[j],rp->rhsalias[i])==0 ){ + ErrorMsg(lemp->filename,rp->ruleline, + "Label %s used for multiple symbols on the RHS of a rule.", + rp->rhsalias[i]); + lemp->errorcnt++; + break; + } + } + } + if( !used[i] ){ + ErrorMsg(lemp->filename,rp->ruleline, + "Label %s for \"%s(%s)\" is never used.", + rp->rhsalias[i],rp->rhs[i]->name,rp->rhsalias[i]); + lemp->errorcnt++; + } + }else if( i>0 && has_destructor(rp->rhs[i],lemp) ){ + append_str(" yy_destructor(yypParser,%d,&yymsp[%d].minor);\n", 0, + rp->rhs[i]->index,i-rp->nrhs+1); + } + } + + /* If unable to write LHS values directly into the stack, write the + ** saved LHS value now. */ + if( lhsdirect==0 ){ + append_str(" yymsp[%d].minor.yy%d = ", 0, 1-rp->nrhs, rp->lhs->dtnum); + append_str(zLhs, 0, 0, 0); + append_str(";\n", 0, 0, 0); + } + + /* Suffix code generation complete */ + cp = append_str(0,0,0,0); + if( cp && cp[0] ){ + rp->codeSuffix = Strsafe(cp); + rp->noCode = 0; + } + + return rc; +} + +/* +** Generate code which executes when the rule "rp" is reduced. Write +** the code to "out". Make sure lineno stays up-to-date. +*/ +PRIVATE void emit_code( + FILE *out, + struct rule *rp, + struct lemon *lemp, + int *lineno +){ + const char *cp; + + /* Setup code prior to the #line directive */ + if( rp->codePrefix && rp->codePrefix[0] ){ + fprintf(out, "{%s", rp->codePrefix); + for(cp=rp->codePrefix; *cp; cp++){ if( *cp=='\n' ) (*lineno)++; } + } + + /* Generate code to do the reduce action */ + if( rp->code ){ + if( !lemp->nolinenosflag ){ + (*lineno)++; + tplt_linedir(out,rp->line,lemp->filename); + } + fprintf(out,"{%s",rp->code); + for(cp=rp->code; *cp; cp++){ if( *cp=='\n' ) (*lineno)++; } + fprintf(out,"}\n"); (*lineno)++; + if( !lemp->nolinenosflag ){ + (*lineno)++; + tplt_linedir(out,*lineno,lemp->outname); + } + } + + /* Generate breakdown code that occurs after the #line directive */ + if( rp->codeSuffix && rp->codeSuffix[0] ){ + fprintf(out, "%s", rp->codeSuffix); + for(cp=rp->codeSuffix; *cp; cp++){ if( *cp=='\n' ) (*lineno)++; } + } + + if( rp->codePrefix ){ + fprintf(out, "}\n"); (*lineno)++; + } + + return; +} + +/* +** Print the definition of the union used for the parser's data stack. +** This union contains fields for every possible data type for tokens +** and nonterminals. In the process of computing and printing this +** union, also set the ".dtnum" field of every terminal and nonterminal +** symbol. +*/ +void print_stack_union( + FILE *out, /* The output stream */ + struct lemon *lemp, /* The main info structure for this parser */ + int *plineno, /* Pointer to the line number */ + int mhflag /* True if generating makeheaders output */ +){ + int lineno; /* The line number of the output */ + char **types; /* A hash table of datatypes */ + int arraysize; /* Size of the "types" array */ + int maxdtlength; /* Maximum length of any ".datatype" field. */ + char *stddt; /* Standardized name for a datatype */ + int i,j; /* Loop counters */ + unsigned hash; /* For hashing the name of a type */ + const char *name; /* Name of the parser */ + + /* Allocate and initialize types[] and allocate stddt[] */ + arraysize = lemp->nsymbol * 2; + types = (char**)lemon_calloc( arraysize, sizeof(char*) ); + if( types==0 ){ + fprintf(stderr,"Out of memory.\n"); + exit(1); + } + for(i=0; ivartype ){ + maxdtlength = lemonStrlen(lemp->vartype); + } + for(i=0; insymbol; i++){ + int len; + struct symbol *sp = lemp->symbols[i]; + if( sp->datatype==0 ) continue; + len = lemonStrlen(sp->datatype); + if( len>maxdtlength ) maxdtlength = len; + } + stddt = (char*)lemon_malloc( maxdtlength*2 + 1 ); + if( stddt==0 ){ + fprintf(stderr,"Out of memory.\n"); + exit(1); + } + + /* Build a hash table of datatypes. The ".dtnum" field of each symbol + ** is filled in with the hash index plus 1. A ".dtnum" value of 0 is + ** used for terminal symbols. If there is no %default_type defined then + ** 0 is also used as the .dtnum value for nonterminals which do not specify + ** a datatype using the %type directive. + */ + for(i=0; insymbol; i++){ + struct symbol *sp = lemp->symbols[i]; + char *cp; + if( sp==lemp->errsym ){ + sp->dtnum = arraysize+1; + continue; + } + if( sp->type!=NONTERMINAL || (sp->datatype==0 && lemp->vartype==0) ){ + sp->dtnum = 0; + continue; + } + cp = sp->datatype; + if( cp==0 ) cp = lemp->vartype; + j = 0; + while( ISSPACE(*cp) ) cp++; + while( *cp ) stddt[j++] = *cp++; + while( j>0 && ISSPACE(stddt[j-1]) ) j--; + stddt[j] = 0; + if( lemp->tokentype && strcmp(stddt, lemp->tokentype)==0 ){ + sp->dtnum = 0; + continue; + } + hash = 0; + for(j=0; stddt[j]; j++){ + hash = hash*53 + stddt[j]; + } + hash = (hash & 0x7fffffff)%arraysize; + while( types[hash] ){ + if( strcmp(types[hash],stddt)==0 ){ + sp->dtnum = hash + 1; + break; + } + hash++; + if( hash>=(unsigned)arraysize ) hash = 0; + } + if( types[hash]==0 ){ + sp->dtnum = hash + 1; + types[hash] = (char*)lemon_malloc( lemonStrlen(stddt)+1 ); + if( types[hash]==0 ){ + fprintf(stderr,"Out of memory.\n"); + exit(1); + } + lemon_strcpy(types[hash],stddt); + } + } + + /* Print out the definition of YYTOKENTYPE and YYMINORTYPE */ + name = lemp->name ? lemp->name : "Parse"; + lineno = *plineno; + if( mhflag ){ fprintf(out,"#if INTERFACE\n"); lineno++; } + fprintf(out,"#define %sTOKENTYPE %s\n",name, + lemp->tokentype?lemp->tokentype:"void*"); lineno++; + if( mhflag ){ fprintf(out,"#endif\n"); lineno++; } + fprintf(out,"typedef union {\n"); lineno++; + fprintf(out," int yyinit;\n"); lineno++; + fprintf(out," %sTOKENTYPE yy0;\n",name); lineno++; + for(i=0; ierrsym && lemp->errsym->useCnt ){ + fprintf(out," int yy%d;\n",lemp->errsym->dtnum); lineno++; + } + lemon_free(stddt); + lemon_free(types); + fprintf(out,"} YYMINORTYPE;\n"); lineno++; + *plineno = lineno; +} + +/* +** Return the name of a C datatype able to represent values between +** lwr and upr, inclusive. If pnByte!=NULL then also write the sizeof +** for that type (1, 2, or 4) into *pnByte. +*/ +static const char *minimum_size_type(int lwr, int upr, int *pnByte){ + const char *zType = "int"; + int nByte = 4; + if( lwr>=0 ){ + if( upr<=255 ){ + zType = "unsigned char"; + nByte = 1; + }else if( upr<65535 ){ + zType = "unsigned short int"; + nByte = 2; + }else{ + zType = "unsigned int"; + nByte = 4; + } + }else if( lwr>=-127 && upr<=127 ){ + zType = "signed char"; + nByte = 1; + }else if( lwr>=-32767 && upr<32767 ){ + zType = "short"; + nByte = 2; + } + if( pnByte ) *pnByte = nByte; + return zType; +} + +/* +** Each state contains a set of token transaction and a set of +** nonterminal transactions. Each of these sets makes an instance +** of the following structure. An array of these structures is used +** to order the creation of entries in the yy_action[] table. +*/ +struct axset { + struct state *stp; /* A pointer to a state */ + int isTkn; /* True to use tokens. False for non-terminals */ + int nAction; /* Number of actions */ + int iOrder; /* Original order of action sets */ +}; + +/* +** Compare to axset structures for sorting purposes +*/ +static int axset_compare(const void *a, const void *b){ + struct axset *p1 = (struct axset*)a; + struct axset *p2 = (struct axset*)b; + int c; + c = p2->nAction - p1->nAction; + if( c==0 ){ + c = p1->iOrder - p2->iOrder; + } + assert( c!=0 || p1==p2 ); + return c; +} + +/* +** Write text on "out" that describes the rule "rp". +*/ +static void writeRuleText(FILE *out, struct rule *rp){ + int j; + fprintf(out,"%s ::=", rp->lhs->name); + for(j=0; jnrhs; j++){ + struct symbol *sp = rp->rhs[j]; + if( sp->type!=MULTITERMINAL ){ + fprintf(out," %s", sp->name); + }else{ + int k; + fprintf(out," %s", sp->subsym[0]->name); + for(k=1; knsubsym; k++){ + fprintf(out,"|%s",sp->subsym[k]->name); + } + } + } +} + + +/* Generate C source code for the parser */ +void ReportTable( + struct lemon *lemp, + int mhflag, /* Output in makeheaders format if true */ + int sqlFlag /* Generate the *.sql file too */ +){ + FILE *out, *in, *sql; + int lineno; + struct state *stp; + struct action *ap; + struct rule *rp; + struct acttab *pActtab; + int i, j, n, sz, mn, mx; + int nLookAhead; + int szActionType; /* sizeof(YYACTIONTYPE) */ + int szCodeType; /* sizeof(YYCODETYPE) */ + const char *name; + int mnTknOfst, mxTknOfst; + int mnNtOfst, mxNtOfst; + struct axset *ax; + char *prefix; + + lemp->minShiftReduce = lemp->nstate; + lemp->errAction = lemp->minShiftReduce + lemp->nrule; + lemp->accAction = lemp->errAction + 1; + lemp->noAction = lemp->accAction + 1; + lemp->minReduce = lemp->noAction + 1; + lemp->maxAction = lemp->minReduce + lemp->nrule; + + in = tplt_open(lemp); + if( in==0 ) return; + out = file_open(lemp,".c","wb"); + if( out==0 ){ + fclose(in); + return; + } + if( sqlFlag==0 ){ + sql = 0; + }else{ + sql = file_open(lemp, ".sql", "wb"); + if( sql==0 ){ + fclose(in); + fclose(out); + return; + } + fprintf(sql, + "BEGIN;\n" + "CREATE TABLE symbol(\n" + " id INTEGER PRIMARY KEY,\n" + " name TEXT NOT NULL,\n" + " isTerminal BOOLEAN NOT NULL,\n" + " fallback INTEGER REFERENCES symbol" + " DEFERRABLE INITIALLY DEFERRED\n" + ");\n" + ); + for(i=0; insymbol; i++){ + fprintf(sql, + "INSERT INTO symbol(id,name,isTerminal,fallback)" + "VALUES(%d,'%s',%s", + i, lemp->symbols[i]->name, + interminal ? "TRUE" : "FALSE" + ); + if( lemp->symbols[i]->fallback ){ + fprintf(sql, ",%d);\n", lemp->symbols[i]->fallback->index); + }else{ + fprintf(sql, ",NULL);\n"); + } + } + fprintf(sql, + "CREATE TABLE rule(\n" + " ruleid INTEGER PRIMARY KEY,\n" + " lhs INTEGER REFERENCES symbol(id),\n" + " txt TEXT\n" + ");\n" + "CREATE TABLE rulerhs(\n" + " ruleid INTEGER REFERENCES rule(ruleid),\n" + " pos INTEGER,\n" + " sym INTEGER REFERENCES symbol(id)\n" + ");\n" + ); + for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){ + assert( i==rp->iRule ); + fprintf(sql, + "INSERT INTO rule(ruleid,lhs,txt)VALUES(%d,%d,'", + rp->iRule, rp->lhs->index + ); + writeRuleText(sql, rp); + fprintf(sql,"');\n"); + for(j=0; jnrhs; j++){ + struct symbol *sp = rp->rhs[j]; + if( sp->type!=MULTITERMINAL ){ + fprintf(sql, + "INSERT INTO rulerhs(ruleid,pos,sym)VALUES(%d,%d,%d);\n", + i,j,sp->index + ); + }else{ + int k; + for(k=0; knsubsym; k++){ + fprintf(sql, + "INSERT INTO rulerhs(ruleid,pos,sym)VALUES(%d,%d,%d);\n", + i,j,sp->subsym[k]->index + ); + } + } + } + } + fprintf(sql, "COMMIT;\n"); + } + lineno = 1; + + fprintf(out, + "/* This file is automatically generated by Lemon from input grammar\n" + "** source file \"%s\"", lemp->filename); lineno++; + if( nDefineUsed==0 ){ + fprintf(out, ".\n*/\n"); lineno += 2; + }else{ + fprintf(out, " with these options:\n**\n"); lineno += 2; + for(i=0; iinclude==0 ) lemp->include = ""; + for(i=0; ISSPACE(lemp->include[i]); i++){ + if( lemp->include[i]=='\n' ){ + lemp->include += i+1; + i = -1; + } + } + if( lemp->include[0]=='/' ){ + tplt_skip_header(in,&lineno); + }else{ + tplt_xfer(lemp->name,in,out,&lineno); + } + + /* Generate the include code, if any */ + tplt_print(out,lemp,lemp->include,&lineno); + if( mhflag ){ + char *incName = file_makename(lemp, ".h"); + fprintf(out,"#include \"%s\"\n", incName); lineno++; + lemon_free(incName); + } + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate #defines for all tokens */ + if( lemp->tokenprefix ) prefix = lemp->tokenprefix; + else prefix = ""; + if( mhflag ){ + fprintf(out,"#if INTERFACE\n"); lineno++; + }else{ + fprintf(out,"#ifndef %s%s\n", prefix, lemp->symbols[1]->name); + } + for(i=1; interminal; i++){ + fprintf(out,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i); + lineno++; + } + fprintf(out,"#endif\n"); lineno++; + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate the defines */ + fprintf(out,"#define YYCODETYPE %s\n", + minimum_size_type(0, lemp->nsymbol, &szCodeType)); lineno++; + fprintf(out,"#define YYNOCODE %d\n",lemp->nsymbol); lineno++; + fprintf(out,"#define YYACTIONTYPE %s\n", + minimum_size_type(0,lemp->maxAction,&szActionType)); lineno++; + if( lemp->wildcard ){ + fprintf(out,"#define YYWILDCARD %d\n", + lemp->wildcard->index); lineno++; + } + print_stack_union(out,lemp,&lineno,mhflag); + fprintf(out, "#ifndef YYSTACKDEPTH\n"); lineno++; + if( lemp->stacksize ){ + fprintf(out,"#define YYSTACKDEPTH %s\n",lemp->stacksize); lineno++; + }else{ + fprintf(out,"#define YYSTACKDEPTH 100\n"); lineno++; + } + fprintf(out, "#endif\n"); lineno++; + if( mhflag ){ + fprintf(out,"#if INTERFACE\n"); lineno++; + } + name = lemp->name ? lemp->name : "Parse"; + if( lemp->arg && lemp->arg[0] ){ + i = lemonStrlen(lemp->arg); + while( i>=1 && ISSPACE(lemp->arg[i-1]) ) i--; + while( i>=1 && (ISALNUM(lemp->arg[i-1]) || lemp->arg[i-1]=='_') ) i--; + fprintf(out,"#define %sARG_SDECL %s;\n",name,lemp->arg); lineno++; + fprintf(out,"#define %sARG_PDECL ,%s\n",name,lemp->arg); lineno++; + fprintf(out,"#define %sARG_PARAM ,%s\n",name,&lemp->arg[i]); lineno++; + fprintf(out,"#define %sARG_FETCH %s=yypParser->%s;\n", + name,lemp->arg,&lemp->arg[i]); lineno++; + fprintf(out,"#define %sARG_STORE yypParser->%s=%s;\n", + name,&lemp->arg[i],&lemp->arg[i]); lineno++; + }else{ + fprintf(out,"#define %sARG_SDECL\n",name); lineno++; + fprintf(out,"#define %sARG_PDECL\n",name); lineno++; + fprintf(out,"#define %sARG_PARAM\n",name); lineno++; + fprintf(out,"#define %sARG_FETCH\n",name); lineno++; + fprintf(out,"#define %sARG_STORE\n",name); lineno++; + } + if( lemp->reallocFunc ){ + fprintf(out,"#define YYREALLOC %s\n", lemp->reallocFunc); lineno++; + }else{ + fprintf(out,"#define YYREALLOC realloc\n"); lineno++; + } + if( lemp->freeFunc ){ + fprintf(out,"#define YYFREE %s\n", lemp->freeFunc); lineno++; + }else{ + fprintf(out,"#define YYFREE free\n"); lineno++; + } + if( lemp->reallocFunc && lemp->freeFunc ){ + fprintf(out,"#define YYDYNSTACK 1\n"); lineno++; + }else{ + fprintf(out,"#define YYDYNSTACK 0\n"); lineno++; + } + if( lemp->ctx && lemp->ctx[0] ){ + i = lemonStrlen(lemp->ctx); + while( i>=1 && ISSPACE(lemp->ctx[i-1]) ) i--; + while( i>=1 && (ISALNUM(lemp->ctx[i-1]) || lemp->ctx[i-1]=='_') ) i--; + fprintf(out,"#define %sCTX_SDECL %s;\n",name,lemp->ctx); lineno++; + fprintf(out,"#define %sCTX_PDECL ,%s\n",name,lemp->ctx); lineno++; + fprintf(out,"#define %sCTX_PARAM ,%s\n",name,&lemp->ctx[i]); lineno++; + fprintf(out,"#define %sCTX_FETCH %s=yypParser->%s;\n", + name,lemp->ctx,&lemp->ctx[i]); lineno++; + fprintf(out,"#define %sCTX_STORE yypParser->%s=%s;\n", + name,&lemp->ctx[i],&lemp->ctx[i]); lineno++; + }else{ + fprintf(out,"#define %sCTX_SDECL\n",name); lineno++; + fprintf(out,"#define %sCTX_PDECL\n",name); lineno++; + fprintf(out,"#define %sCTX_PARAM\n",name); lineno++; + fprintf(out,"#define %sCTX_FETCH\n",name); lineno++; + fprintf(out,"#define %sCTX_STORE\n",name); lineno++; + } + if( mhflag ){ + fprintf(out,"#endif\n"); lineno++; + } + if( lemp->errsym && lemp->errsym->useCnt ){ + fprintf(out,"#define YYERRORSYMBOL %d\n",lemp->errsym->index); lineno++; + fprintf(out,"#define YYERRSYMDT yy%d\n",lemp->errsym->dtnum); lineno++; + } + if( lemp->has_fallback ){ + fprintf(out,"#define YYFALLBACK 1\n"); lineno++; + } + + /* Compute the action table, but do not output it yet. The action + ** table must be computed before generating the YYNSTATE macro because + ** we need to know how many states can be eliminated. + */ + ax = (struct axset *) lemon_calloc(lemp->nxstate*2, sizeof(ax[0])); + if( ax==0 ){ + fprintf(stderr,"malloc failed\n"); + exit(1); + } + for(i=0; inxstate; i++){ + stp = lemp->sorted[i]; + ax[i*2].stp = stp; + ax[i*2].isTkn = 1; + ax[i*2].nAction = stp->nTknAct; + ax[i*2+1].stp = stp; + ax[i*2+1].isTkn = 0; + ax[i*2+1].nAction = stp->nNtAct; + } + mxTknOfst = mnTknOfst = 0; + mxNtOfst = mnNtOfst = 0; + /* In an effort to minimize the action table size, use the heuristic + ** of placing the largest action sets first */ + for(i=0; inxstate*2; i++) ax[i].iOrder = i; + qsort(ax, lemp->nxstate*2, sizeof(ax[0]), axset_compare); + pActtab = acttab_alloc(lemp->nsymbol, lemp->nterminal); + for(i=0; inxstate*2 && ax[i].nAction>0; i++){ + stp = ax[i].stp; + if( ax[i].isTkn ){ + for(ap=stp->ap; ap; ap=ap->next){ + int action; + if( ap->sp->index>=lemp->nterminal ) continue; + action = compute_action(lemp, ap); + if( action<0 ) continue; + acttab_action(pActtab, ap->sp->index, action); + } + stp->iTknOfst = acttab_insert(pActtab, 1); + if( stp->iTknOfstiTknOfst; + if( stp->iTknOfst>mxTknOfst ) mxTknOfst = stp->iTknOfst; + }else{ + for(ap=stp->ap; ap; ap=ap->next){ + int action; + if( ap->sp->indexnterminal ) continue; + if( ap->sp->index==lemp->nsymbol ) continue; + action = compute_action(lemp, ap); + if( action<0 ) continue; + acttab_action(pActtab, ap->sp->index, action); + } + stp->iNtOfst = acttab_insert(pActtab, 0); + if( stp->iNtOfstiNtOfst; + if( stp->iNtOfst>mxNtOfst ) mxNtOfst = stp->iNtOfst; + } +#if 0 /* Uncomment for a trace of how the yy_action[] table fills out */ + { int jj, nn; + for(jj=nn=0; jjnAction; jj++){ + if( pActtab->aAction[jj].action<0 ) nn++; + } + printf("%4d: State %3d %s n: %2d size: %5d freespace: %d\n", + i, stp->statenum, ax[i].isTkn ? "Token" : "Var ", + ax[i].nAction, pActtab->nAction, nn); + } +#endif + } + lemon_free(ax); + + /* Mark rules that are actually used for reduce actions after all + ** optimizations have been applied + */ + for(rp=lemp->rule; rp; rp=rp->next) rp->doesReduce = LEMON_FALSE; + for(i=0; inxstate; i++){ + for(ap=lemp->sorted[i]->ap; ap; ap=ap->next){ + if( ap->type==REDUCE || ap->type==SHIFTREDUCE ){ + ap->x.rp->doesReduce = 1; + } + } + } + + /* Finish rendering the constants now that the action table has + ** been computed */ + fprintf(out,"#define YYNSTATE %d\n",lemp->nxstate); lineno++; + fprintf(out,"#define YYNRULE %d\n",lemp->nrule); lineno++; + fprintf(out,"#define YYNRULE_WITH_ACTION %d\n",lemp->nruleWithAction); + lineno++; + fprintf(out,"#define YYNTOKEN %d\n",lemp->nterminal); lineno++; + fprintf(out,"#define YY_MAX_SHIFT %d\n",lemp->nxstate-1); lineno++; + i = lemp->minShiftReduce; + fprintf(out,"#define YY_MIN_SHIFTREDUCE %d\n",i); lineno++; + i += lemp->nrule; + fprintf(out,"#define YY_MAX_SHIFTREDUCE %d\n", i-1); lineno++; + fprintf(out,"#define YY_ERROR_ACTION %d\n", lemp->errAction); lineno++; + fprintf(out,"#define YY_ACCEPT_ACTION %d\n", lemp->accAction); lineno++; + fprintf(out,"#define YY_NO_ACTION %d\n", lemp->noAction); lineno++; + fprintf(out,"#define YY_MIN_REDUCE %d\n", lemp->minReduce); lineno++; + i = lemp->minReduce + lemp->nrule; + fprintf(out,"#define YY_MAX_REDUCE %d\n", i-1); lineno++; + + /* Minimum and maximum token values that have a destructor */ + mn = mx = 0; + for(i=0; insymbol; i++){ + struct symbol *sp = lemp->symbols[i]; + + if( sp && sp->type!=TERMINAL && sp->destructor ){ + if( mn==0 || sp->indexindex; + if( sp->index>mx ) mx = sp->index; + } + } + if( lemp->tokendest ) mn = 0; + if( lemp->vardest ) mx = lemp->nsymbol-1; + fprintf(out,"#define YY_MIN_DSTRCTR %d\n", mn); lineno++; + fprintf(out,"#define YY_MAX_DSTRCTR %d\n", mx); lineno++; + + tplt_xfer(lemp->name,in,out,&lineno); + + /* Now output the action table and its associates: + ** + ** yy_action[] A single table containing all actions. + ** yy_lookahead[] A table containing the lookahead for each entry in + ** yy_action. Used to detect hash collisions. + ** yy_shift_ofst[] For each state, the offset into yy_action for + ** shifting terminals. + ** yy_reduce_ofst[] For each state, the offset into yy_action for + ** shifting non-terminals after a reduce. + ** yy_default[] Default action for each state. + */ + + /* Output the yy_action table */ + lemp->nactiontab = n = acttab_action_size(pActtab); + lemp->tablesize += n*szActionType; + fprintf(out,"#define YY_ACTTAB_COUNT (%d)\n", n); lineno++; + fprintf(out,"static const YYACTIONTYPE yy_action[] = {\n"); lineno++; + for(i=j=0; inoAction; + if( j==0 ) fprintf(out," /* %5d */ ", i); + fprintf(out, " %4d,", action); + if( j==9 || i==n-1 ){ + fprintf(out, "\n"); lineno++; + j = 0; + }else{ + j++; + } + } + fprintf(out, "};\n"); lineno++; + + /* Output the yy_lookahead table */ + lemp->nlookaheadtab = n = acttab_lookahead_size(pActtab); + lemp->tablesize += n*szCodeType; + fprintf(out,"static const YYCODETYPE yy_lookahead[] = {\n"); lineno++; + for(i=j=0; insymbol; + if( j==0 ) fprintf(out," /* %5d */ ", i); + fprintf(out, " %4d,", la); + if( j==9 ){ + fprintf(out, "\n"); lineno++; + j = 0; + }else{ + j++; + } + } + /* Add extra entries to the end of the yy_lookahead[] table so that + ** yy_shift_ofst[]+iToken will always be a valid index into the array, + ** even for the largest possible value of yy_shift_ofst[] and iToken. */ + nLookAhead = lemp->nterminal + lemp->nactiontab; + while( interminal); + if( j==9 ){ + fprintf(out, "\n"); lineno++; + j = 0; + }else{ + j++; + } + i++; + } + if( j>0 ){ fprintf(out, "\n"); lineno++; } + fprintf(out, "};\n"); lineno++; + + /* Output the yy_shift_ofst[] table */ + n = lemp->nxstate; + while( n>0 && lemp->sorted[n-1]->iTknOfst==NO_OFFSET ) n--; + fprintf(out, "#define YY_SHIFT_COUNT (%d)\n", n-1); lineno++; + fprintf(out, "#define YY_SHIFT_MIN (%d)\n", mnTknOfst); lineno++; + fprintf(out, "#define YY_SHIFT_MAX (%d)\n", mxTknOfst); lineno++; + fprintf(out, "static const %s yy_shift_ofst[] = {\n", + minimum_size_type(mnTknOfst, lemp->nterminal+lemp->nactiontab, &sz)); + lineno++; + lemp->tablesize += n*sz; + for(i=j=0; isorted[i]; + ofst = stp->iTknOfst; + if( ofst==NO_OFFSET ) ofst = lemp->nactiontab; + if( j==0 ) fprintf(out," /* %5d */ ", i); + fprintf(out, " %4d,", ofst); + if( j==9 || i==n-1 ){ + fprintf(out, "\n"); lineno++; + j = 0; + }else{ + j++; + } + } + fprintf(out, "};\n"); lineno++; + + /* Output the yy_reduce_ofst[] table */ + n = lemp->nxstate; + while( n>0 && lemp->sorted[n-1]->iNtOfst==NO_OFFSET ) n--; + fprintf(out, "#define YY_REDUCE_COUNT (%d)\n", n-1); lineno++; + fprintf(out, "#define YY_REDUCE_MIN (%d)\n", mnNtOfst); lineno++; + fprintf(out, "#define YY_REDUCE_MAX (%d)\n", mxNtOfst); lineno++; + fprintf(out, "static const %s yy_reduce_ofst[] = {\n", + minimum_size_type(mnNtOfst-1, mxNtOfst, &sz)); lineno++; + lemp->tablesize += n*sz; + for(i=j=0; isorted[i]; + ofst = stp->iNtOfst; + if( ofst==NO_OFFSET ) ofst = mnNtOfst - 1; + if( j==0 ) fprintf(out," /* %5d */ ", i); + fprintf(out, " %4d,", ofst); + if( j==9 || i==n-1 ){ + fprintf(out, "\n"); lineno++; + j = 0; + }else{ + j++; + } + } + fprintf(out, "};\n"); lineno++; + + /* Output the default action table */ + fprintf(out, "static const YYACTIONTYPE yy_default[] = {\n"); lineno++; + n = lemp->nxstate; + lemp->tablesize += n*szActionType; + for(i=j=0; isorted[i]; + if( j==0 ) fprintf(out," /* %5d */ ", i); + if( stp->iDfltReduce<0 ){ + fprintf(out, " %4d,", lemp->errAction); + }else{ + fprintf(out, " %4d,", stp->iDfltReduce + lemp->minReduce); + } + if( j==9 || i==n-1 ){ + fprintf(out, "\n"); lineno++; + j = 0; + }else{ + j++; + } + } + fprintf(out, "};\n"); lineno++; + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate the table of fallback tokens. + */ + if( lemp->has_fallback ){ + mx = lemp->nterminal - 1; + /* 2019-08-28: Generate fallback entries for every token to avoid + ** having to do a range check on the index */ + /* while( mx>0 && lemp->symbols[mx]->fallback==0 ){ mx--; } */ + lemp->tablesize += (mx+1)*szCodeType; + for(i=0; i<=mx; i++){ + struct symbol *p = lemp->symbols[i]; + if( p->fallback==0 ){ + fprintf(out, " 0, /* %10s => nothing */\n", p->name); + }else{ + fprintf(out, " %3d, /* %10s => %s */\n", p->fallback->index, + p->name, p->fallback->name); + } + lineno++; + } + } + tplt_xfer(lemp->name, in, out, &lineno); + + /* Generate a table containing the symbolic name of every symbol + */ + for(i=0; insymbol; i++){ + fprintf(out," /* %4d */ \"%s\",\n",i, lemp->symbols[i]->name); lineno++; + } + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate a table containing a text string that describes every + ** rule in the rule set of the grammar. This information is used + ** when tracing REDUCE actions. + */ + for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){ + assert( rp->iRule==i ); + fprintf(out," /* %3d */ \"", i); + writeRuleText(out, rp); + fprintf(out,"\",\n"); lineno++; + } + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate code which executes every time a symbol is popped from + ** the stack while processing errors or while destroying the parser. + ** (In other words, generate the %destructor actions) + */ + if( lemp->tokendest ){ + int once = 1; + for(i=0; insymbol; i++){ + struct symbol *sp = lemp->symbols[i]; + if( sp==0 || sp->type!=TERMINAL ) continue; + if( once ){ + fprintf(out, " /* TERMINAL Destructor */\n"); lineno++; + once = 0; + } + fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++; + } + for(i=0; insymbol && lemp->symbols[i]->type!=TERMINAL; i++); + if( insymbol ){ + emit_destructor_code(out,lemp->symbols[i],lemp,&lineno); + fprintf(out," break;\n"); lineno++; + } + } + if( lemp->vardest ){ + struct symbol *dflt_sp = 0; + int once = 1; + for(i=0; insymbol; i++){ + struct symbol *sp = lemp->symbols[i]; + if( sp==0 || sp->type==TERMINAL || + sp->index<=0 || sp->destructor!=0 ) continue; + if( once ){ + fprintf(out, " /* Default NON-TERMINAL Destructor */\n");lineno++; + once = 0; + } + fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++; + dflt_sp = sp; + } + if( dflt_sp!=0 ){ + emit_destructor_code(out,dflt_sp,lemp,&lineno); + } + fprintf(out," break;\n"); lineno++; + } + for(i=0; insymbol; i++){ + struct symbol *sp = lemp->symbols[i]; + if( sp==0 || sp->type==TERMINAL || sp->destructor==0 ) continue; + if( sp->destLineno<0 ) continue; /* Already emitted */ + fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++; + + /* Combine duplicate destructors into a single case */ + for(j=i+1; jnsymbol; j++){ + struct symbol *sp2 = lemp->symbols[j]; + if( sp2 && sp2->type!=TERMINAL && sp2->destructor + && sp2->dtnum==sp->dtnum + && strcmp(sp->destructor,sp2->destructor)==0 ){ + fprintf(out," case %d: /* %s */\n", + sp2->index, sp2->name); lineno++; + sp2->destLineno = -1; /* Avoid emitting this destructor again */ + } + } + + emit_destructor_code(out,lemp->symbols[i],lemp,&lineno); + fprintf(out," break;\n"); lineno++; + } + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate code which executes whenever the parser stack overflows */ + tplt_print(out,lemp,lemp->overflow,&lineno); + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate the tables of rule information. yyRuleInfoLhs[] and + ** yyRuleInfoNRhs[]. + ** + ** Note: This code depends on the fact that rules are number + ** sequentially beginning with 0. + */ + for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){ + fprintf(out," %4d, /* (%d) ", rp->lhs->index, i); + rule_print(out, rp); + fprintf(out," */\n"); lineno++; + } + tplt_xfer(lemp->name,in,out,&lineno); + for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){ + fprintf(out," %3d, /* (%d) ", -rp->nrhs, i); + rule_print(out, rp); + fprintf(out," */\n"); lineno++; + } + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate code which execution during each REDUCE action */ + i = 0; + for(rp=lemp->rule; rp; rp=rp->next){ + i += translate_code(lemp, rp); + } + if( i ){ + fprintf(out," YYMINORTYPE yylhsminor;\n"); lineno++; + } + /* First output rules other than the default: rule */ + for(rp=lemp->rule; rp; rp=rp->next){ + struct rule *rp2; /* Other rules with the same action */ + if( rp->codeEmitted ) continue; + if( rp->noCode ){ + /* No C code actions, so this will be part of the "default:" rule */ + continue; + } + fprintf(out," case %d: /* ", rp->iRule); + writeRuleText(out, rp); + fprintf(out, " */\n"); lineno++; + for(rp2=rp->next; rp2; rp2=rp2->next){ + if( rp2->code==rp->code && rp2->codePrefix==rp->codePrefix + && rp2->codeSuffix==rp->codeSuffix ){ + fprintf(out," case %d: /* ", rp2->iRule); + writeRuleText(out, rp2); + fprintf(out," */ yytestcase(yyruleno==%d);\n", rp2->iRule); lineno++; + rp2->codeEmitted = 1; + } + } + emit_code(out,rp,lemp,&lineno); + fprintf(out," break;\n"); lineno++; + rp->codeEmitted = 1; + } + /* Finally, output the default: rule. We choose as the default: all + ** empty actions. */ + fprintf(out," default:\n"); lineno++; + for(rp=lemp->rule; rp; rp=rp->next){ + if( rp->codeEmitted ) continue; + assert( rp->noCode ); + fprintf(out," /* (%d) ", rp->iRule); + writeRuleText(out, rp); + if( rp->neverReduce ){ + fprintf(out, " (NEVER REDUCES) */ assert(yyruleno!=%d);\n", + rp->iRule); lineno++; + }else if( rp->doesReduce ){ + fprintf(out, " */ yytestcase(yyruleno==%d);\n", rp->iRule); lineno++; + }else{ + fprintf(out, " (OPTIMIZED OUT) */ assert(yyruleno!=%d);\n", + rp->iRule); lineno++; + } + } + fprintf(out," break;\n"); lineno++; + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate code which executes if a parse fails */ + tplt_print(out,lemp,lemp->failure,&lineno); + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate code which executes when a syntax error occurs */ + tplt_print(out,lemp,lemp->error,&lineno); + tplt_xfer(lemp->name,in,out,&lineno); + + /* Generate code which executes when the parser accepts its input */ + tplt_print(out,lemp,lemp->accept,&lineno); + tplt_xfer(lemp->name,in,out,&lineno); + + /* Append any addition code the user desires */ + tplt_print(out,lemp,lemp->extracode,&lineno); + + acttab_free(pActtab); + fclose(in); + fclose(out); + if( sql ) fclose(sql); + return; +} + +/* Generate a header file for the parser */ +void ReportHeader(struct lemon *lemp) +{ + FILE *out, *in; + const char *prefix; + char line[LINESIZE]; + char pattern[LINESIZE]; + int i; + + if( lemp->tokenprefix ) prefix = lemp->tokenprefix; + else prefix = ""; + in = file_open(lemp,".h","rb"); + if( in ){ + int nextChar; + for(i=1; interminal && fgets(line,LINESIZE,in); i++){ + lemon_sprintf(pattern,"#define %s%-30s %3d\n", + prefix,lemp->symbols[i]->name,i); + if( strcmp(line,pattern) ) break; + } + nextChar = fgetc(in); + fclose(in); + if( i==lemp->nterminal && nextChar==EOF ){ + /* No change in the file. Don't rewrite it. */ + return; + } + } + out = file_open(lemp,".h","wb"); + if( out ){ + for(i=1; interminal; i++){ + fprintf(out,"#define %s%-30s %3d\n",prefix,lemp->symbols[i]->name,i); + } + fclose(out); + } + return; +} + +/* Reduce the size of the action tables, if possible, by making use +** of defaults. +** +** In this version, we take the most frequent REDUCE action and make +** it the default. Except, there is no default if the wildcard token +** is a possible look-ahead. +*/ +void CompressTables(struct lemon *lemp) +{ + struct state *stp; + struct action *ap, *ap2, *nextap; + struct rule *rp, *rp2, *rbest; + int nbest, n; + int i; + int usesWildcard; + + for(i=0; instate; i++){ + stp = lemp->sorted[i]; + nbest = 0; + rbest = 0; + usesWildcard = 0; + + for(ap=stp->ap; ap; ap=ap->next){ + if( ap->type==SHIFT && ap->sp==lemp->wildcard ){ + usesWildcard = 1; + } + if( ap->type!=REDUCE ) continue; + rp = ap->x.rp; + if( rp->lhsStart ) continue; + if( rp==rbest ) continue; + n = 1; + for(ap2=ap->next; ap2; ap2=ap2->next){ + if( ap2->type!=REDUCE ) continue; + rp2 = ap2->x.rp; + if( rp2==rbest ) continue; + if( rp2==rp ) n++; + } + if( n>nbest ){ + nbest = n; + rbest = rp; + } + } + + /* Do not make a default if the number of rules to default + ** is not at least 1 or if the wildcard token is a possible + ** lookahead. + */ + if( nbest<1 || usesWildcard ) continue; + + + /* Combine matching REDUCE actions into a single default */ + for(ap=stp->ap; ap; ap=ap->next){ + if( ap->type==REDUCE && ap->x.rp==rbest ) break; + } + assert( ap ); + ap->sp = Symbol_new("{default}"); + for(ap=ap->next; ap; ap=ap->next){ + if( ap->type==REDUCE && ap->x.rp==rbest ) ap->type = NOT_USED; + } + stp->ap = Action_sort(stp->ap); + + for(ap=stp->ap; ap; ap=ap->next){ + if( ap->type==SHIFT ) break; + if( ap->type==REDUCE && ap->x.rp!=rbest ) break; + } + if( ap==0 ){ + stp->autoReduce = 1; + stp->pDfltReduce = rbest; + } + } + + /* Make a second pass over all states and actions. Convert + ** every action that is a SHIFT to an autoReduce state into + ** a SHIFTREDUCE action. + */ + for(i=0; instate; i++){ + stp = lemp->sorted[i]; + for(ap=stp->ap; ap; ap=ap->next){ + struct state *pNextState; + if( ap->type!=SHIFT ) continue; + pNextState = ap->x.stp; + if( pNextState->autoReduce && pNextState->pDfltReduce!=0 ){ + ap->type = SHIFTREDUCE; + ap->x.rp = pNextState->pDfltReduce; + } + } + } + + /* If a SHIFTREDUCE action specifies a rule that has a single RHS term + ** (meaning that the SHIFTREDUCE will land back in the state where it + ** started) and if there is no C-code associated with the reduce action, + ** then we can go ahead and convert the action to be the same as the + ** action for the RHS of the rule. + */ + for(i=0; instate; i++){ + stp = lemp->sorted[i]; + for(ap=stp->ap; ap; ap=nextap){ + nextap = ap->next; + if( ap->type!=SHIFTREDUCE ) continue; + rp = ap->x.rp; + if( rp->noCode==0 ) continue; + if( rp->nrhs!=1 ) continue; +#if 1 + /* Only apply this optimization to non-terminals. It would be OK to + ** apply it to terminal symbols too, but that makes the parser tables + ** larger. */ + if( ap->sp->indexnterminal ) continue; +#endif + /* If we reach this point, it means the optimization can be applied */ + nextap = ap; + for(ap2=stp->ap; ap2 && (ap2==ap || ap2->sp!=rp->lhs); ap2=ap2->next){} + assert( ap2!=0 ); + ap->spOpt = ap2->sp; + ap->type = ap2->type; + ap->x = ap2->x; + } + } +} + + +/* +** Compare two states for sorting purposes. The smaller state is the +** one with the most non-terminal actions. If they have the same number +** of non-terminal actions, then the smaller is the one with the most +** token actions. +*/ +static int stateResortCompare(const void *a, const void *b){ + const struct state *pA = *(const struct state**)a; + const struct state *pB = *(const struct state**)b; + int n; + + n = pB->nNtAct - pA->nNtAct; + if( n==0 ){ + n = pB->nTknAct - pA->nTknAct; + if( n==0 ){ + n = pB->statenum - pA->statenum; + } + } + assert( n!=0 ); + return n; +} + + +/* +** Renumber and resort states so that states with fewer choices +** occur at the end. Except, keep state 0 as the first state. +*/ +void ResortStates(struct lemon *lemp) +{ + int i; + struct state *stp; + struct action *ap; + + for(i=0; instate; i++){ + stp = lemp->sorted[i]; + stp->nTknAct = stp->nNtAct = 0; + stp->iDfltReduce = -1; /* Init dflt action to "syntax error" */ + stp->iTknOfst = NO_OFFSET; + stp->iNtOfst = NO_OFFSET; + for(ap=stp->ap; ap; ap=ap->next){ + int iAction = compute_action(lemp,ap); + if( iAction>=0 ){ + if( ap->sp->indexnterminal ){ + stp->nTknAct++; + }else if( ap->sp->indexnsymbol ){ + stp->nNtAct++; + }else{ + assert( stp->autoReduce==0 || stp->pDfltReduce==ap->x.rp ); + stp->iDfltReduce = iAction; + } + } + } + } + qsort(&lemp->sorted[1], lemp->nstate-1, sizeof(lemp->sorted[0]), + stateResortCompare); + for(i=0; instate; i++){ + lemp->sorted[i]->statenum = i; + } + lemp->nxstate = lemp->nstate; + while( lemp->nxstate>1 && lemp->sorted[lemp->nxstate-1]->autoReduce ){ + lemp->nxstate--; + } +} + + +/***************** From the file "set.c" ************************************/ +/* +** Set manipulation routines for the LEMON parser generator. +*/ + +static int size = 0; + +/* Set the set size */ +void SetSize(int n) +{ + size = n+1; +} + +/* Allocate a new set */ +char *SetNew(void){ + char *s; + s = (char*)lemon_calloc( size, 1); + if( s==0 ){ + memory_error(); + } + return s; +} + +/* Deallocate a set */ +void SetFree(char *s) +{ + lemon_free(s); +} + +/* Add a new element to the set. Return TRUE if the element was added +** and FALSE if it was already there. */ +int SetAdd(char *s, int e) +{ + int rv; + assert( e>=0 && esize = 1024; + x1a->count = 0; + x1a->tbl = (x1node*)lemon_calloc(1024, sizeof(x1node) + sizeof(x1node*)); + if( x1a->tbl==0 ){ + lemon_free(x1a); + x1a = 0; + }else{ + int i; + x1a->ht = (x1node**)&(x1a->tbl[1024]); + for(i=0; i<1024; i++) x1a->ht[i] = 0; + } + } +} +/* Insert a new record into the array. Return TRUE if successful. +** Prior data with the same key is NOT overwritten */ +int Strsafe_insert(const char *data) +{ + x1node *np; + unsigned h; + unsigned ph; + + if( x1a==0 ) return 0; + ph = strhash(data); + h = ph & (x1a->size-1); + np = x1a->ht[h]; + while( np ){ + if( strcmp(np->data,data)==0 ){ + /* An existing entry with the same key is found. */ + /* Fail because overwrite is not allows. */ + return 0; + } + np = np->next; + } + if( x1a->count>=x1a->size ){ + /* Need to make the hash table bigger */ + int i,arrSize; + struct s_x1 array; + array.size = arrSize = x1a->size*2; + array.count = x1a->count; + array.tbl = (x1node*)lemon_calloc(arrSize, sizeof(x1node)+sizeof(x1node*)); + if( array.tbl==0 ) return 0; /* Fail due to malloc failure */ + array.ht = (x1node**)&(array.tbl[arrSize]); + for(i=0; icount; i++){ + x1node *oldnp, *newnp; + oldnp = &(x1a->tbl[i]); + h = strhash(oldnp->data) & (arrSize-1); + newnp = &(array.tbl[i]); + if( array.ht[h] ) array.ht[h]->from = &(newnp->next); + newnp->next = array.ht[h]; + newnp->data = oldnp->data; + newnp->from = &(array.ht[h]); + array.ht[h] = newnp; + } + /* lemon_free(x1a->tbl); // This program was originally for 16-bit machines. + ** Don't worry about freeing memory on modern platforms. */ + *x1a = array; + } + /* Insert the new data */ + h = ph & (x1a->size-1); + np = &(x1a->tbl[x1a->count++]); + np->data = data; + if( x1a->ht[h] ) x1a->ht[h]->from = &(np->next); + np->next = x1a->ht[h]; + x1a->ht[h] = np; + np->from = &(x1a->ht[h]); + return 1; +} + +/* Return a pointer to data assigned to the given key. Return NULL +** if no such key. */ +const char *Strsafe_find(const char *key) +{ + unsigned h; + x1node *np; + + if( x1a==0 ) return 0; + h = strhash(key) & (x1a->size-1); + np = x1a->ht[h]; + while( np ){ + if( strcmp(np->data,key)==0 ) break; + np = np->next; + } + return np ? np->data : 0; +} + +/* Return a pointer to the (terminal or nonterminal) symbol "x". +** Create a new symbol if this is the first time "x" has been seen. +*/ +struct symbol *Symbol_new(const char *x) +{ + struct symbol *sp; + + sp = Symbol_find(x); + if( sp==0 ){ + sp = (struct symbol *)lemon_calloc(1, sizeof(struct symbol) ); + MemoryCheck(sp); + sp->name = Strsafe(x); + sp->type = ISUPPER(*x) ? TERMINAL : NONTERMINAL; + sp->rule = 0; + sp->fallback = 0; + sp->prec = -1; + sp->assoc = UNK; + sp->firstset = 0; + sp->lambda = LEMON_FALSE; + sp->destructor = 0; + sp->destLineno = 0; + sp->datatype = 0; + sp->useCnt = 0; + Symbol_insert(sp,sp->name); + } + sp->useCnt++; + return sp; +} + +/* Compare two symbols for sorting purposes. Return negative, +** zero, or positive if a is less then, equal to, or greater +** than b. +** +** Symbols that begin with upper case letters (terminals or tokens) +** must sort before symbols that begin with lower case letters +** (non-terminals). And MULTITERMINAL symbols (created using the +** %token_class directive) must sort at the very end. Other than +** that, the order does not matter. +** +** We find experimentally that leaving the symbols in their original +** order (the order they appeared in the grammar file) gives the +** smallest parser tables in SQLite. +*/ +int Symbolcmpp(const void *_a, const void *_b) +{ + const struct symbol *a = *(const struct symbol **) _a; + const struct symbol *b = *(const struct symbol **) _b; + int i1 = a->type==MULTITERMINAL ? 3 : a->name[0]>'Z' ? 2 : 1; + int i2 = b->type==MULTITERMINAL ? 3 : b->name[0]>'Z' ? 2 : 1; + return i1==i2 ? a->index - b->index : i1 - i2; +} + +/* There is one instance of the following structure for each +** associative array of type "x2". +*/ +struct s_x2 { + int size; /* The number of available slots. */ + /* Must be a power of 2 greater than or */ + /* equal to 1 */ + int count; /* Number of currently slots filled */ + struct s_x2node *tbl; /* The data stored here */ + struct s_x2node **ht; /* Hash table for lookups */ +}; + +/* There is one instance of this structure for every data element +** in an associative array of type "x2". +*/ +typedef struct s_x2node { + struct symbol *data; /* The data */ + const char *key; /* The key */ + struct s_x2node *next; /* Next entry with the same hash */ + struct s_x2node **from; /* Previous link */ +} x2node; + +/* There is only one instance of the array, which is the following */ +static struct s_x2 *x2a; + +/* Allocate a new associative array */ +void Symbol_init(void){ + if( x2a ) return; + x2a = (struct s_x2*)lemon_malloc( sizeof(struct s_x2) ); + if( x2a ){ + x2a->size = 128; + x2a->count = 0; + x2a->tbl = (x2node*)lemon_calloc(128, sizeof(x2node) + sizeof(x2node*)); + if( x2a->tbl==0 ){ + lemon_free(x2a); + x2a = 0; + }else{ + int i; + x2a->ht = (x2node**)&(x2a->tbl[128]); + for(i=0; i<128; i++) x2a->ht[i] = 0; + } + } +} +/* Insert a new record into the array. Return TRUE if successful. +** Prior data with the same key is NOT overwritten */ +int Symbol_insert(struct symbol *data, const char *key) +{ + x2node *np; + unsigned h; + unsigned ph; + + if( x2a==0 ) return 0; + ph = strhash(key); + h = ph & (x2a->size-1); + np = x2a->ht[h]; + while( np ){ + if( strcmp(np->key,key)==0 ){ + /* An existing entry with the same key is found. */ + /* Fail because overwrite is not allows. */ + return 0; + } + np = np->next; + } + if( x2a->count>=x2a->size ){ + /* Need to make the hash table bigger */ + int i,arrSize; + struct s_x2 array; + array.size = arrSize = x2a->size*2; + array.count = x2a->count; + array.tbl = (x2node*)lemon_calloc(arrSize, sizeof(x2node)+sizeof(x2node*)); + if( array.tbl==0 ) return 0; /* Fail due to malloc failure */ + array.ht = (x2node**)&(array.tbl[arrSize]); + for(i=0; icount; i++){ + x2node *oldnp, *newnp; + oldnp = &(x2a->tbl[i]); + h = strhash(oldnp->key) & (arrSize-1); + newnp = &(array.tbl[i]); + if( array.ht[h] ) array.ht[h]->from = &(newnp->next); + newnp->next = array.ht[h]; + newnp->key = oldnp->key; + newnp->data = oldnp->data; + newnp->from = &(array.ht[h]); + array.ht[h] = newnp; + } + /* lemon_free(x2a->tbl); // This program was originally written for 16-bit + ** machines. Don't worry about freeing this trivial amount of memory + ** on modern platforms. Just leak it. */ + *x2a = array; + } + /* Insert the new data */ + h = ph & (x2a->size-1); + np = &(x2a->tbl[x2a->count++]); + np->key = key; + np->data = data; + if( x2a->ht[h] ) x2a->ht[h]->from = &(np->next); + np->next = x2a->ht[h]; + x2a->ht[h] = np; + np->from = &(x2a->ht[h]); + return 1; +} + +/* Return a pointer to data assigned to the given key. Return NULL +** if no such key. */ +struct symbol *Symbol_find(const char *key) +{ + unsigned h; + x2node *np; + + if( x2a==0 ) return 0; + h = strhash(key) & (x2a->size-1); + np = x2a->ht[h]; + while( np ){ + if( strcmp(np->key,key)==0 ) break; + np = np->next; + } + return np ? np->data : 0; +} + +/* Return the n-th data. Return NULL if n is out of range. */ +struct symbol *Symbol_Nth(int n) +{ + struct symbol *data; + if( x2a && n>0 && n<=x2a->count ){ + data = x2a->tbl[n-1].data; + }else{ + data = 0; + } + return data; +} + +/* Return the size of the array */ +int Symbol_count() +{ + return x2a ? x2a->count : 0; +} + +/* Return an array of pointers to all data in the table. +** The array is obtained from malloc. Return NULL if memory allocation +** problems, or if the array is empty. */ +struct symbol **Symbol_arrayof() +{ + struct symbol **array; + int i,arrSize; + if( x2a==0 ) return 0; + arrSize = x2a->count; + array = (struct symbol **)lemon_calloc(arrSize, sizeof(struct symbol *)); + if( array ){ + for(i=0; itbl[i].data; + } + return array; +} + +/* Compare two configurations */ +int Configcmp(const char *_a,const char *_b) +{ + const struct config *a = (struct config *) _a; + const struct config *b = (struct config *) _b; + int x; + x = a->rp->index - b->rp->index; + if( x==0 ) x = a->dot - b->dot; + return x; +} + +/* Compare two states */ +PRIVATE int statecmp(struct config *a, struct config *b) +{ + int rc; + for(rc=0; rc==0 && a && b; a=a->bp, b=b->bp){ + rc = a->rp->index - b->rp->index; + if( rc==0 ) rc = a->dot - b->dot; + } + if( rc==0 ){ + if( a ) rc = 1; + if( b ) rc = -1; + } + return rc; +} + +/* Hash a state */ +PRIVATE unsigned statehash(struct config *a) +{ + unsigned h=0; + while( a ){ + h = h*571 + a->rp->index*37 + a->dot; + a = a->bp; + } + return h; +} + +/* Allocate a new state structure */ +struct state *State_new() +{ + struct state *newstate; + newstate = (struct state *)lemon_calloc(1, sizeof(struct state) ); + MemoryCheck(newstate); + return newstate; +} + +/* There is one instance of the following structure for each +** associative array of type "x3". +*/ +struct s_x3 { + int size; /* The number of available slots. */ + /* Must be a power of 2 greater than or */ + /* equal to 1 */ + int count; /* Number of currently slots filled */ + struct s_x3node *tbl; /* The data stored here */ + struct s_x3node **ht; /* Hash table for lookups */ +}; + +/* There is one instance of this structure for every data element +** in an associative array of type "x3". +*/ +typedef struct s_x3node { + struct state *data; /* The data */ + struct config *key; /* The key */ + struct s_x3node *next; /* Next entry with the same hash */ + struct s_x3node **from; /* Previous link */ +} x3node; + +/* There is only one instance of the array, which is the following */ +static struct s_x3 *x3a; + +/* Allocate a new associative array */ +void State_init(void){ + if( x3a ) return; + x3a = (struct s_x3*)lemon_malloc( sizeof(struct s_x3) ); + if( x3a ){ + x3a->size = 128; + x3a->count = 0; + x3a->tbl = (x3node*)lemon_calloc(128, sizeof(x3node) + sizeof(x3node*)); + if( x3a->tbl==0 ){ + lemon_free(x3a); + x3a = 0; + }else{ + int i; + x3a->ht = (x3node**)&(x3a->tbl[128]); + for(i=0; i<128; i++) x3a->ht[i] = 0; + } + } +} +/* Insert a new record into the array. Return TRUE if successful. +** Prior data with the same key is NOT overwritten */ +int State_insert(struct state *data, struct config *key) +{ + x3node *np; + unsigned h; + unsigned ph; + + if( x3a==0 ) return 0; + ph = statehash(key); + h = ph & (x3a->size-1); + np = x3a->ht[h]; + while( np ){ + if( statecmp(np->key,key)==0 ){ + /* An existing entry with the same key is found. */ + /* Fail because overwrite is not allows. */ + return 0; + } + np = np->next; + } + if( x3a->count>=x3a->size ){ + /* Need to make the hash table bigger */ + int i,arrSize; + struct s_x3 array; + array.size = arrSize = x3a->size*2; + array.count = x3a->count; + array.tbl = (x3node*)lemon_calloc(arrSize, sizeof(x3node)+sizeof(x3node*)); + if( array.tbl==0 ) return 0; /* Fail due to malloc failure */ + array.ht = (x3node**)&(array.tbl[arrSize]); + for(i=0; icount; i++){ + x3node *oldnp, *newnp; + oldnp = &(x3a->tbl[i]); + h = statehash(oldnp->key) & (arrSize-1); + newnp = &(array.tbl[i]); + if( array.ht[h] ) array.ht[h]->from = &(newnp->next); + newnp->next = array.ht[h]; + newnp->key = oldnp->key; + newnp->data = oldnp->data; + newnp->from = &(array.ht[h]); + array.ht[h] = newnp; + } + lemon_free(x3a->tbl); + *x3a = array; + } + /* Insert the new data */ + h = ph & (x3a->size-1); + np = &(x3a->tbl[x3a->count++]); + np->key = key; + np->data = data; + if( x3a->ht[h] ) x3a->ht[h]->from = &(np->next); + np->next = x3a->ht[h]; + x3a->ht[h] = np; + np->from = &(x3a->ht[h]); + return 1; +} + +/* Return a pointer to data assigned to the given key. Return NULL +** if no such key. */ +struct state *State_find(struct config *key) +{ + unsigned h; + x3node *np; + + if( x3a==0 ) return 0; + h = statehash(key) & (x3a->size-1); + np = x3a->ht[h]; + while( np ){ + if( statecmp(np->key,key)==0 ) break; + np = np->next; + } + return np ? np->data : 0; +} + +/* Return an array of pointers to all data in the table. +** The array is obtained from malloc. Return NULL if memory allocation +** problems, or if the array is empty. */ +struct state **State_arrayof(void) +{ + struct state **array; + int i,arrSize; + if( x3a==0 ) return 0; + arrSize = x3a->count; + array = (struct state **)lemon_calloc(arrSize, sizeof(struct state *)); + if( array ){ + for(i=0; itbl[i].data; + } + return array; +} + +/* Hash a configuration */ +PRIVATE unsigned confighash(struct config *a) +{ + unsigned h=0; + h = h*571 + a->rp->index*37 + a->dot; + return h; +} + +/* There is one instance of the following structure for each +** associative array of type "x4". +*/ +struct s_x4 { + int size; /* The number of available slots. */ + /* Must be a power of 2 greater than or */ + /* equal to 1 */ + int count; /* Number of currently slots filled */ + struct s_x4node *tbl; /* The data stored here */ + struct s_x4node **ht; /* Hash table for lookups */ +}; + +/* There is one instance of this structure for every data element +** in an associative array of type "x4". +*/ +typedef struct s_x4node { + struct config *data; /* The data */ + struct s_x4node *next; /* Next entry with the same hash */ + struct s_x4node **from; /* Previous link */ +} x4node; + +/* There is only one instance of the array, which is the following */ +static struct s_x4 *x4a; + +/* Allocate a new associative array */ +void Configtable_init(void){ + if( x4a ) return; + x4a = (struct s_x4*)lemon_malloc( sizeof(struct s_x4) ); + if( x4a ){ + x4a->size = 64; + x4a->count = 0; + x4a->tbl = (x4node*)lemon_calloc(64, sizeof(x4node) + sizeof(x4node*)); + if( x4a->tbl==0 ){ + lemon_free(x4a); + x4a = 0; + }else{ + int i; + x4a->ht = (x4node**)&(x4a->tbl[64]); + for(i=0; i<64; i++) x4a->ht[i] = 0; + } + } +} +/* Insert a new record into the array. Return TRUE if successful. +** Prior data with the same key is NOT overwritten */ +int Configtable_insert(struct config *data) +{ + x4node *np; + unsigned h; + unsigned ph; + + if( x4a==0 ) return 0; + ph = confighash(data); + h = ph & (x4a->size-1); + np = x4a->ht[h]; + while( np ){ + if( Configcmp((const char *) np->data,(const char *) data)==0 ){ + /* An existing entry with the same key is found. */ + /* Fail because overwrite is not allows. */ + return 0; + } + np = np->next; + } + if( x4a->count>=x4a->size ){ + /* Need to make the hash table bigger */ + int i,arrSize; + struct s_x4 array; + array.size = arrSize = x4a->size*2; + array.count = x4a->count; + array.tbl = (x4node*)lemon_calloc(arrSize, + sizeof(x4node) + sizeof(x4node*)); + if( array.tbl==0 ) return 0; /* Fail due to malloc failure */ + array.ht = (x4node**)&(array.tbl[arrSize]); + for(i=0; icount; i++){ + x4node *oldnp, *newnp; + oldnp = &(x4a->tbl[i]); + h = confighash(oldnp->data) & (arrSize-1); + newnp = &(array.tbl[i]); + if( array.ht[h] ) array.ht[h]->from = &(newnp->next); + newnp->next = array.ht[h]; + newnp->data = oldnp->data; + newnp->from = &(array.ht[h]); + array.ht[h] = newnp; + } + *x4a = array; + } + /* Insert the new data */ + h = ph & (x4a->size-1); + np = &(x4a->tbl[x4a->count++]); + np->data = data; + if( x4a->ht[h] ) x4a->ht[h]->from = &(np->next); + np->next = x4a->ht[h]; + x4a->ht[h] = np; + np->from = &(x4a->ht[h]); + return 1; +} + +/* Return a pointer to data assigned to the given key. Return NULL +** if no such key. */ +struct config *Configtable_find(struct config *key) +{ + int h; + x4node *np; + + if( x4a==0 ) return 0; + h = confighash(key) & (x4a->size-1); + np = x4a->ht[h]; + while( np ){ + if( Configcmp((const char *) np->data,(const char *) key)==0 ) break; + np = np->next; + } + return np ? np->data : 0; +} + +/* Remove all data from the table. Pass each data to the function "f" +** as it is removed. ("f" may be null to avoid this step.) */ +void Configtable_clear(int(*f)(struct config *)) +{ + int i; + if( x4a==0 || x4a->count==0 ) return; + if( f ) for(i=0; icount; i++) (*f)(x4a->tbl[i].data); + for(i=0; isize; i++) x4a->ht[i] = 0; + x4a->count = 0; + return; +} diff --git a/contrib/lemon/lempar.c b/contrib/lemon/lempar.c new file mode 100644 index 00000000000..851a0e2e543 --- /dev/null +++ b/contrib/lemon/lempar.c @@ -0,0 +1,1086 @@ +/* +** 2000-05-29 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Driver template for the LEMON parser generator. +** +** The "lemon" program processes an LALR(1) input grammar file, then uses +** this template to construct a parser. The "lemon" program inserts text +** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the +** interstitial "-" characters) contained in this template is changed into +** the value of the %name directive from the grammar. Otherwise, the content +** of this template is copied straight through into the generate parser +** source file. +** +** The following is the concatenation of all %include directives from the +** input grammar file: +*/ +/************ Begin %include sections from the grammar ************************/ +%% +/**************** End of %include directives **********************************/ +/* These constants specify the various numeric values for terminal symbols. +***************** Begin token definitions *************************************/ +%% +/**************** End token definitions ***************************************/ + +/* The next sections is a series of control #defines. +** various aspects of the generated parser. +** YYCODETYPE is the data type used to store the integer codes +** that represent terminal and non-terminal symbols. +** "unsigned char" is used if there are fewer than +** 256 symbols. Larger types otherwise. +** YYNOCODE is a number of type YYCODETYPE that is not used for +** any terminal or nonterminal symbol. +** YYFALLBACK If defined, this indicates that one or more tokens +** (also known as: "terminal symbols") have fall-back +** values which should be used if the original symbol +** would not parse. This permits keywords to sometimes +** be used as identifiers, for example. +** YYACTIONTYPE is the data type used for "action codes" - numbers +** that indicate what to do in response to the next +** token. +** ParseTOKENTYPE is the data type used for minor type for terminal +** symbols. Background: A "minor type" is a semantic +** value associated with a terminal or non-terminal +** symbols. For example, for an "ID" terminal symbol, +** the minor type might be the name of the identifier. +** Each non-terminal can have a different minor type. +** Terminal symbols all have the same minor type, though. +** This macros defines the minor type for terminal +** symbols. +** YYMINORTYPE is the data type used for all minor types. +** This is typically a union of many types, one of +** which is ParseTOKENTYPE. The entry in the union +** for terminal symbols is called "yy0". +** YYSTACKDEPTH is the maximum depth of the parser's stack. If +** zero the stack is dynamically sized using realloc() +** ParseARG_SDECL A static variable declaration for the %extra_argument +** ParseARG_PDECL A parameter declaration for the %extra_argument +** ParseARG_PARAM Code to pass %extra_argument as a subroutine parameter +** ParseARG_STORE Code to store %extra_argument into yypParser +** ParseARG_FETCH Code to extract %extra_argument from yypParser +** ParseCTX_* As ParseARG_ except for %extra_context +** YYREALLOC Name of the realloc() function to use +** YYFREE Name of the free() function to use +** YYDYNSTACK True if stack space should be extended on heap +** YYERRORSYMBOL is the code number of the error symbol. If not +** defined, then do no error processing. +** YYNSTATE the combined number of states. +** YYNRULE the number of rules in the grammar +** YYNTOKEN Number of terminal symbols +** YY_MAX_SHIFT Maximum value for shift actions +** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions +** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions +** YY_ERROR_ACTION The yy_action[] code for syntax error +** YY_ACCEPT_ACTION The yy_action[] code for accept +** YY_NO_ACTION The yy_action[] code for no-op +** YY_MIN_REDUCE Minimum value for reduce actions +** YY_MAX_REDUCE Maximum value for reduce actions +** YY_MIN_DSTRCTR Minimum symbol value that has a destructor +** YY_MAX_DSTRCTR Maximum symbol value that has a destructor +*/ +#ifndef INTERFACE +# define INTERFACE 1 +#endif +/************* Begin control #defines *****************************************/ +%% +/************* End control #defines *******************************************/ +#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) + +/* Define the yytestcase() macro to be a no-op if is not already defined +** otherwise. +** +** Applications can choose to define yytestcase() in the %include section +** to a macro that can assist in verifying code coverage. For production +** code the yytestcase() macro should be turned off. But it is useful +** for testing. +*/ +#ifndef yytestcase +# define yytestcase(X) +#endif + +/* Macro to determine if stack space has the ability to grow using +** heap memory. +*/ +#if YYSTACKDEPTH<=0 || YYDYNSTACK +# define YYGROWABLESTACK 1 +#else +# define YYGROWABLESTACK 0 +#endif + +/* Guarantee a minimum number of initial stack slots. +*/ +#if YYSTACKDEPTH<=0 +# undef YYSTACKDEPTH +# define YYSTACKDEPTH 2 /* Need a minimum stack size */ +#endif + + +/* Next are the tables used to determine what action to take based on the +** current state and lookahead token. These tables are used to implement +** functions that take a state number and lookahead value and return an +** action integer. +** +** Suppose the action integer is N. Then the action is determined as +** follows +** +** 0 <= N <= YY_MAX_SHIFT Shift N. That is, push the lookahead +** token onto the stack and goto state N. +** +** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then +** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE. +** +** N == YY_ERROR_ACTION A syntax error has occurred. +** +** N == YY_ACCEPT_ACTION The parser accepts its input. +** +** N == YY_NO_ACTION No such action. Denotes unused +** slots in the yy_action[] table. +** +** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE +** and YY_MAX_REDUCE +** +** The action table is constructed as a single large table named yy_action[]. +** Given state S and lookahead X, the action is computed as either: +** +** (A) N = yy_action[ yy_shift_ofst[S] + X ] +** (B) N = yy_default[S] +** +** The (A) formula is preferred. The B formula is used instead if +** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X. +** +** The formulas above are for computing the action when the lookahead is +** a terminal symbol. If the lookahead is a non-terminal (as occurs after +** a reduce action) then the yy_reduce_ofst[] array is used in place of +** the yy_shift_ofst[] array. +** +** The following are the tables generated in this section: +** +** yy_action[] A single table containing all actions. +** yy_lookahead[] A table containing the lookahead for each entry in +** yy_action. Used to detect hash collisions. +** yy_shift_ofst[] For each state, the offset into yy_action for +** shifting terminals. +** yy_reduce_ofst[] For each state, the offset into yy_action for +** shifting non-terminals after a reduce. +** yy_default[] Default action for each state. +** +*********** Begin parsing tables **********************************************/ +%% +/********** End of lemon-generated parsing tables *****************************/ + +/* The next table maps tokens (terminal symbols) into fallback tokens. +** If a construct like the following: +** +** %fallback ID X Y Z. +** +** appears in the grammar, then ID becomes a fallback token for X, Y, +** and Z. Whenever one of the tokens X, Y, or Z is input to the parser +** but it does not parse, the type of the token is changed to ID and +** the parse is retried before an error is thrown. +** +** This feature can be used, for example, to cause some keywords in a language +** to revert to identifiers if they keyword does not apply in the context where +** it appears. +*/ +#ifdef YYFALLBACK +static const YYCODETYPE yyFallback[] = { +%% +}; +#endif /* YYFALLBACK */ + +/* The following structure represents a single element of the +** parser's stack. Information stored includes: +** +** + The state number for the parser at this level of the stack. +** +** + The value of the token stored at this level of the stack. +** (In other words, the "major" token.) +** +** + The semantic value stored at this level of the stack. This is +** the information used by the action routines in the grammar. +** It is sometimes called the "minor" token. +** +** After the "shift" half of a SHIFTREDUCE action, the stateno field +** actually contains the reduce action for the second half of the +** SHIFTREDUCE. +*/ +struct yyStackEntry { + YYACTIONTYPE stateno; /* The state-number, or reduce action in SHIFTREDUCE */ + YYCODETYPE major; /* The major token value. This is the code + ** number for the token at this stack level */ + YYMINORTYPE minor; /* The user-supplied minor token value. This + ** is the value of the token */ +}; +typedef struct yyStackEntry yyStackEntry; + +/* The state of the parser is completely contained in an instance of +** the following structure */ +struct yyParser { + yyStackEntry *yytos; /* Pointer to top element of the stack */ +#ifdef YYTRACKMAXSTACKDEPTH + int yyhwm; /* High-water mark of the stack */ +#endif +#ifndef YYNOERRORRECOVERY + int yyerrcnt; /* Shifts left before out of the error */ +#endif + ParseARG_SDECL /* A place to hold %extra_argument */ + ParseCTX_SDECL /* A place to hold %extra_context */ + yyStackEntry *yystackEnd; /* Last entry in the stack */ + yyStackEntry *yystack; /* The parser stack */ + yyStackEntry yystk0[YYSTACKDEPTH]; /* Initial stack space */ +}; +typedef struct yyParser yyParser; + +#include +#ifndef NDEBUG +#include +static FILE *yyTraceFILE = 0; +static char *yyTracePrompt = 0; +#endif /* NDEBUG */ + +#ifndef NDEBUG +/* +** Turn parser tracing on by giving a stream to which to write the trace +** and a prompt to preface each trace message. Tracing is turned off +** by making either argument NULL +** +** Inputs: +**
    +**
  • A FILE* to which trace output should be written. +** If NULL, then tracing is turned off. +**
  • A prefix string written at the beginning of every +** line of trace output. If NULL, then tracing is +** turned off. +**
+** +** Outputs: +** None. +*/ +void ParseTrace(FILE *TraceFILE, char *zTracePrompt){ + yyTraceFILE = TraceFILE; + yyTracePrompt = zTracePrompt; + if( yyTraceFILE==0 ) yyTracePrompt = 0; + else if( yyTracePrompt==0 ) yyTraceFILE = 0; +} +#endif /* NDEBUG */ + +#if defined(YYCOVERAGE) || !defined(NDEBUG) +/* For tracing shifts, the names of all terminals and nonterminals +** are required. The following table supplies these names */ +static const char *const yyTokenName[] = { +%% +}; +#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ + +#ifndef NDEBUG +/* For tracing reduce actions, the names of all rules are required. +*/ +static const char *const yyRuleName[] = { +%% +}; +#endif /* NDEBUG */ + + +#if YYGROWABLESTACK +/* +** Try to increase the size of the parser stack. Return the number +** of errors. Return 0 on success. +*/ +static int yyGrowStack(yyParser *p){ + int oldSize = 1 + (int)(p->yystackEnd - p->yystack); + int newSize; + int idx; + yyStackEntry *pNew; + + newSize = oldSize*2 + 100; + idx = (int)(p->yytos - p->yystack); + if( p->yystack==p->yystk0 ){ + pNew = YYREALLOC(0, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; + memcpy(pNew, p->yystack, oldSize*sizeof(pNew[0])); + }else{ + pNew = YYREALLOC(p->yystack, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; + } + p->yystack = pNew; + p->yytos = &p->yystack[idx]; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", + yyTracePrompt, oldSize, newSize); + } +#endif + p->yystackEnd = &p->yystack[newSize-1]; + return 0; +} +#endif /* YYGROWABLESTACK */ + +#if !YYGROWABLESTACK +/* For builds that do no have a growable stack, yyGrowStack always +** returns an error. +*/ +# define yyGrowStack(X) 1 +#endif + +/* Datatype of the argument to the memory allocated passed as the +** second argument to ParseAlloc() below. This can be changed by +** putting an appropriate #define in the %include section of the input +** grammar. +*/ +#ifndef YYMALLOCARGTYPE +# define YYMALLOCARGTYPE size_t +#endif + +/* Initialize a new parser that has already been allocated. +*/ +void ParseInit(void *yypRawParser ParseCTX_PDECL){ + yyParser *yypParser = (yyParser*)yypRawParser; + ParseCTX_STORE +#ifdef YYTRACKMAXSTACKDEPTH + yypParser->yyhwm = 0; +#endif + yypParser->yystack = yypParser->yystk0; + yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + yypParser->yytos = yypParser->yystack; + yypParser->yystack[0].stateno = 0; + yypParser->yystack[0].major = 0; +} + +#ifndef Parse_ENGINEALWAYSONSTACK +/* +** This function allocates a new parser. +** The only argument is a pointer to a function which works like +** malloc. +** +** Inputs: +** A pointer to the function used to allocate memory. +** +** Outputs: +** A pointer to a parser. This pointer is used in subsequent calls +** to Parse and ParseFree. +*/ +void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE) ParseCTX_PDECL){ + yyParser *yypParser; + yypParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); + if( yypParser ){ + ParseCTX_STORE + ParseInit(yypParser ParseCTX_PARAM); + } + return (void*)yypParser; +} +#endif /* Parse_ENGINEALWAYSONSTACK */ + + +/* The following function deletes the "minor type" or semantic value +** associated with a symbol. The symbol can be either a terminal +** or nonterminal. "yymajor" is the symbol code, and "yypminor" is +** a pointer to the value to be deleted. The code used to do the +** deletions is derived from the %destructor and/or %token_destructor +** directives of the input grammar. +*/ +static void yy_destructor( + yyParser *yypParser, /* The parser */ + YYCODETYPE yymajor, /* Type code for object to destroy */ + YYMINORTYPE *yypminor /* The object to be destroyed */ +){ + ParseARG_FETCH + ParseCTX_FETCH + switch( yymajor ){ + /* Here is inserted the actions which take place when a + ** terminal or non-terminal is destroyed. This can happen + ** when the symbol is popped from the stack during a + ** reduce or during error processing or when a parser is + ** being destroyed before it is finished parsing. + ** + ** Note: during a reduce, the only symbols destroyed are those + ** which appear on the RHS of the rule, but which are *not* used + ** inside the C code. + */ +/********* Begin destructor definitions ***************************************/ +%% +/********* End destructor definitions *****************************************/ + default: break; /* If no destructor action specified: do nothing */ + } +} + +/* +** Pop the parser's stack once. +** +** If there is a destructor routine associated with the token which +** is popped from the stack, then call it. +*/ +static void yy_pop_parser_stack(yyParser *pParser){ + yyStackEntry *yytos; + assert( pParser->yytos!=0 ); + assert( pParser->yytos > pParser->yystack ); + yytos = pParser->yytos--; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sPopping %s\n", + yyTracePrompt, + yyTokenName[yytos->major]); + } +#endif + yy_destructor(pParser, yytos->major, &yytos->minor); +} + +/* +** Clear all secondary memory allocations from the parser +*/ +void ParseFinalize(void *p){ + yyParser *pParser = (yyParser*)p; + + /* In-lined version of calling yy_pop_parser_stack() for each + ** element left in the stack */ + yyStackEntry *yytos = pParser->yytos; + while( yytos>pParser->yystack ){ +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sPopping %s\n", + yyTracePrompt, + yyTokenName[yytos->major]); + } +#endif + if( yytos->major>=YY_MIN_DSTRCTR ){ + yy_destructor(pParser, yytos->major, &yytos->minor); + } + yytos--; + } + +#if YYGROWABLESTACK + if( pParser->yystack!=pParser->yystk0 ) YYFREE(pParser->yystack); +#endif +} + +#ifndef Parse_ENGINEALWAYSONSTACK +/* +** Deallocate and destroy a parser. Destructors are called for +** all stack elements before shutting the parser down. +** +** If the YYPARSEFREENEVERNULL macro exists (for example because it +** is defined in a %include section of the input grammar) then it is +** assumed that the input pointer is never NULL. +*/ +void ParseFree( + void *p, /* The parser to be deleted */ + void (*freeProc)(void*) /* Function used to reclaim memory */ +){ +#ifndef YYPARSEFREENEVERNULL + if( p==0 ) return; +#endif + ParseFinalize(p); + (*freeProc)(p); +} +#endif /* Parse_ENGINEALWAYSONSTACK */ + +/* +** Return the peak depth of the stack for a parser. +*/ +#ifdef YYTRACKMAXSTACKDEPTH +int ParseStackPeak(void *p){ + yyParser *pParser = (yyParser*)p; + return pParser->yyhwm; +} +#endif + +/* This array of booleans keeps track of the parser statement +** coverage. The element yycoverage[X][Y] is set when the parser +** is in state X and has a lookahead token Y. In a well-tested +** systems, every element of this matrix should end up being set. +*/ +#if defined(YYCOVERAGE) +static unsigned char yycoverage[YYNSTATE][YYNTOKEN]; +#endif + +/* +** Write into out a description of every state/lookahead combination that +** +** (1) has not been used by the parser, and +** (2) is not a syntax error. +** +** Return the number of missed state/lookahead combinations. +*/ +#if defined(YYCOVERAGE) +int ParseCoverage(FILE *out){ + int stateno, iLookAhead, i; + int nMissed = 0; + for(stateno=0; statenoYY_MAX_SHIFT ) return stateno; + assert( stateno <= YY_SHIFT_COUNT ); +#if defined(YYCOVERAGE) + yycoverage[stateno][iLookAhead] = 1; +#endif + do{ + i = yy_shift_ofst[stateno]; + assert( i>=0 ); + assert( i<=YY_ACTTAB_COUNT ); + assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD ); + assert( iLookAhead!=YYNOCODE ); + assert( iLookAhead < YYNTOKEN ); + i += iLookAhead; + assert( i<(int)YY_NLOOKAHEAD ); + if( yy_lookahead[i]!=iLookAhead ){ +#ifdef YYFALLBACK + YYCODETYPE iFallback; /* Fallback token */ + assert( iLookAhead %s\n", + yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]); + } +#endif + assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */ + iLookAhead = iFallback; + continue; + } +#endif +#ifdef YYWILDCARD + { + int j = i - iLookAhead + YYWILDCARD; + assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) ); + if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){ +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", + yyTracePrompt, yyTokenName[iLookAhead], + yyTokenName[YYWILDCARD]); + } +#endif /* NDEBUG */ + return yy_action[j]; + } + } +#endif /* YYWILDCARD */ + return yy_default[stateno]; + }else{ + assert( i>=0 && i<(int)(sizeof(yy_action)/sizeof(yy_action[0])) ); + return yy_action[i]; + } + }while(1); +} + +/* +** Find the appropriate action for a parser given the non-terminal +** look-ahead token iLookAhead. +*/ +static YYACTIONTYPE yy_find_reduce_action( + YYACTIONTYPE stateno, /* Current state number */ + YYCODETYPE iLookAhead /* The look-ahead token */ +){ + int i; +#ifdef YYERRORSYMBOL + if( stateno>YY_REDUCE_COUNT ){ + return yy_default[stateno]; + } +#else + assert( stateno<=YY_REDUCE_COUNT ); +#endif + i = yy_reduce_ofst[stateno]; + assert( iLookAhead!=YYNOCODE ); + i += iLookAhead; +#ifdef YYERRORSYMBOL + if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ + return yy_default[stateno]; + } +#else + assert( i>=0 && iyytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); + /* Here code is inserted which will execute if the parser + ** stack every overflows */ +/******** Begin %stack_overflow code ******************************************/ +%% +/******** End %stack_overflow code ********************************************/ + ParseARG_STORE /* Suppress warning about unused %extra_argument var */ + ParseCTX_STORE +} + +/* +** Print tracing information for a SHIFT action +*/ +#ifndef NDEBUG +static void yyTraceShift(yyParser *yypParser, int yyNewState, const char *zTag){ + if( yyTraceFILE ){ + if( yyNewStateyytos->major], + yyNewState); + }else{ + fprintf(yyTraceFILE,"%s%s '%s', pending reduce %d\n", + yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major], + yyNewState - YY_MIN_REDUCE); + } + } +} +#else +# define yyTraceShift(X,Y,Z) +#endif + +/* +** Perform a shift action. +*/ +static void yy_shift( + yyParser *yypParser, /* The parser to be shifted */ + YYACTIONTYPE yyNewState, /* The new state to shift in */ + YYCODETYPE yyMajor, /* The major token to shift in */ + ParseTOKENTYPE yyMinor /* The minor token to shift in */ +){ + yyStackEntry *yytos; + yypParser->yytos++; +#ifdef YYTRACKMAXSTACKDEPTH + if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) ); + } +#endif + yytos = yypParser->yytos; + if( yytos>yypParser->yystackEnd ){ + if( yyGrowStack(yypParser) ){ + yypParser->yytos--; + yyStackOverflow(yypParser); + return; + } + yytos = yypParser->yytos; + assert( yytos <= yypParser->yystackEnd ); + } + if( yyNewState > YY_MAX_SHIFT ){ + yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; + } + yytos->stateno = yyNewState; + yytos->major = yyMajor; + yytos->minor.yy0 = yyMinor; + yyTraceShift(yypParser, yyNewState, "Shift"); +} + +/* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side +** of that rule */ +static const YYCODETYPE yyRuleInfoLhs[] = { +%% +}; + +/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number +** of symbols on the right-hand side of that rule. */ +static const signed char yyRuleInfoNRhs[] = { +%% +}; + +static void yy_accept(yyParser*); /* Forward Declaration */ + +/* +** Perform a reduce action and the shift that must immediately +** follow the reduce. +** +** The yyLookahead and yyLookaheadToken parameters provide reduce actions +** access to the lookahead token (if any). The yyLookahead will be YYNOCODE +** if the lookahead token has already been consumed. As this procedure is +** only called from one place, optimizing compilers will in-line it, which +** means that the extra parameters have no performance impact. +*/ +static YYACTIONTYPE yy_reduce( + yyParser *yypParser, /* The parser */ + unsigned int yyruleno, /* Number of the rule by which to reduce */ + int yyLookahead, /* Lookahead token, or YYNOCODE if none */ + ParseTOKENTYPE yyLookaheadToken /* Value of the lookahead token */ + ParseCTX_PDECL /* %extra_context */ +){ + int yygoto; /* The next state */ + YYACTIONTYPE yyact; /* The next action */ + yyStackEntry *yymsp; /* The top of the parser's stack */ + int yysize; /* Amount to pop the stack */ + ParseARG_FETCH + (void)yyLookahead; + (void)yyLookaheadToken; + yymsp = yypParser->yytos; + + switch( yyruleno ){ + /* Beginning here are the reduction cases. A typical example + ** follows: + ** case 0: + ** #line + ** { ... } // User supplied code + ** #line + ** break; + */ +/********** Begin reduce actions **********************************************/ +%% +/********** End reduce actions ************************************************/ + }; + assert( yyrulenoYY_MAX_SHIFT && yyact<=YY_MAX_SHIFTREDUCE) ); + + /* It is not possible for a REDUCE to be followed by an error */ + assert( yyact!=YY_ERROR_ACTION ); + + yymsp += yysize+1; + yypParser->yytos = yymsp; + yymsp->stateno = (YYACTIONTYPE)yyact; + yymsp->major = (YYCODETYPE)yygoto; + yyTraceShift(yypParser, yyact, "... then shift"); + return yyact; +} + +/* +** The following code executes when the parse fails +*/ +#ifndef YYNOERRORRECOVERY +static void yy_parse_failed( + yyParser *yypParser /* The parser */ +){ + ParseARG_FETCH + ParseCTX_FETCH +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); + } +#endif + while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); + /* Here code is inserted which will be executed whenever the + ** parser fails */ +/************ Begin %parse_failure code ***************************************/ +%% +/************ End %parse_failure code *****************************************/ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE +} +#endif /* YYNOERRORRECOVERY */ + +/* +** The following code executes when a syntax error first occurs. +*/ +static void yy_syntax_error( + yyParser *yypParser, /* The parser */ + int yymajor, /* The major type of the error token */ + ParseTOKENTYPE yyminor /* The minor type of the error token */ +){ + ParseARG_FETCH + ParseCTX_FETCH +#define TOKEN yyminor +/************ Begin %syntax_error code ****************************************/ +%% +/************ End %syntax_error code ******************************************/ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE +} + +/* +** The following is executed when the parser accepts +*/ +static void yy_accept( + yyParser *yypParser /* The parser */ +){ + ParseARG_FETCH + ParseCTX_FETCH +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); + } +#endif +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + assert( yypParser->yytos==yypParser->yystack ); + /* Here code is inserted which will be executed whenever the + ** parser accepts */ +/*********** Begin %parse_accept code *****************************************/ +%% +/*********** End %parse_accept code *******************************************/ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE +} + +/* The main parser program. +** The first argument is a pointer to a structure obtained from +** "ParseAlloc" which describes the current state of the parser. +** The second argument is the major token number. The third is +** the minor token. The fourth optional argument is whatever the +** user wants (and specified in the grammar) and is available for +** use by the action routines. +** +** Inputs: +**
    +**
  • A pointer to the parser (an opaque structure.) +**
  • The major token number. +**
  • The minor token number. +**
  • An option argument of a grammar-specified type. +**
+** +** Outputs: +** None. +*/ +void Parse( + void *yyp, /* The parser */ + int yymajor, /* The major token code number */ + ParseTOKENTYPE yyminor /* The value for the token */ + ParseARG_PDECL /* Optional %extra_argument parameter */ +){ + YYMINORTYPE yyminorunion; + YYACTIONTYPE yyact; /* The parser action. */ +#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) + int yyendofinput; /* True if we are at the end of input */ +#endif +#ifdef YYERRORSYMBOL + int yyerrorhit = 0; /* True if yymajor has invoked an error */ +#endif + yyParser *yypParser = (yyParser*)yyp; /* The parser */ + ParseCTX_FETCH + ParseARG_STORE + + assert( yypParser->yytos!=0 ); +#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) + yyendofinput = (yymajor==0); +#endif + + yyact = yypParser->yytos->stateno; +#ifndef NDEBUG + if( yyTraceFILE ){ + if( yyact < YY_MIN_REDUCE ){ + fprintf(yyTraceFILE,"%sInput '%s' in state %d\n", + yyTracePrompt,yyTokenName[yymajor],yyact); + }else{ + fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n", + yyTracePrompt,yyTokenName[yymajor],yyact-YY_MIN_REDUCE); + } + } +#endif + + while(1){ /* Exit by "break" */ + assert( yypParser->yytos>=yypParser->yystack ); + assert( yyact==yypParser->yytos->stateno ); + yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact); + if( yyact >= YY_MIN_REDUCE ){ + unsigned int yyruleno = yyact - YY_MIN_REDUCE; /* Reduce by this rule */ +#ifndef NDEBUG + assert( yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ); + if( yyTraceFILE ){ + int yysize = yyRuleInfoNRhs[yyruleno]; + if( yysize ){ + fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", + yyTracePrompt, + yyruleno, yyRuleName[yyruleno], + yyrulenoyytos[yysize].stateno); + }else{ + fprintf(yyTraceFILE, "%sReduce %d [%s]%s.\n", + yyTracePrompt, yyruleno, yyRuleName[yyruleno], + yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == + (int)(yypParser->yytos - yypParser->yystack)); + } +#endif + if( yypParser->yytos>=yypParser->yystackEnd ){ + if( yyGrowStack(yypParser) ){ + yyStackOverflow(yypParser); + break; + } + } + } + yyact = yy_reduce(yypParser,yyruleno,yymajor,yyminor ParseCTX_PARAM); + }else if( yyact <= YY_MAX_SHIFTREDUCE ){ + yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt--; +#endif + break; + }else if( yyact==YY_ACCEPT_ACTION ){ + yypParser->yytos--; + yy_accept(yypParser); + return; + }else{ + assert( yyact == YY_ERROR_ACTION ); + yyminorunion.yy0 = yyminor; +#ifdef YYERRORSYMBOL + int yymx; +#endif +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt); + } +#endif +#ifdef YYERRORSYMBOL + /* A syntax error has occurred. + ** The response to an error depends upon whether or not the + ** grammar defines an error token "ERROR". + ** + ** This is what we do if the grammar does define ERROR: + ** + ** * Call the %syntax_error function. + ** + ** * Begin popping the stack until we enter a state where + ** it is legal to shift the error symbol, then shift + ** the error symbol. + ** + ** * Set the error count to three. + ** + ** * Begin accepting and shifting new tokens. No new error + ** processing will occur until three tokens have been + ** shifted successfully. + ** + */ + if( yypParser->yyerrcnt<0 ){ + yy_syntax_error(yypParser,yymajor,yyminor); + } + yymx = yypParser->yytos->major; + if( yymx==YYERRORSYMBOL || yyerrorhit ){ +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sDiscard input token %s\n", + yyTracePrompt,yyTokenName[yymajor]); + } +#endif + yy_destructor(yypParser, (YYCODETYPE)yymajor, &yyminorunion); + yymajor = YYNOCODE; + }else{ + while( yypParser->yytos > yypParser->yystack ){ + yyact = yy_find_reduce_action(yypParser->yytos->stateno, + YYERRORSYMBOL); + if( yyact<=YY_MAX_SHIFTREDUCE ) break; + yy_pop_parser_stack(yypParser); + } + if( yypParser->yytos <= yypParser->yystack || yymajor==0 ){ + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + yy_parse_failed(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + yymajor = YYNOCODE; + }else if( yymx!=YYERRORSYMBOL ){ + yy_shift(yypParser,yyact,YYERRORSYMBOL,yyminor); + } + } + yypParser->yyerrcnt = 3; + yyerrorhit = 1; + if( yymajor==YYNOCODE ) break; + yyact = yypParser->yytos->stateno; +#elif defined(YYNOERRORRECOVERY) + /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to + ** do any kind of error recovery. Instead, simply invoke the syntax + ** error routine and continue going as if nothing had happened. + ** + ** Applications can set this macro (for example inside %include) if + ** they intend to abandon the parse upon the first syntax error seen. + */ + yy_syntax_error(yypParser,yymajor, yyminor); + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + break; +#else /* YYERRORSYMBOL is not defined */ + /* This is what we do if the grammar does not define ERROR: + ** + ** * Report an error message, and throw away the input token. + ** + ** * If the input token is $, then fail the parse. + ** + ** As before, subsequent error messages are suppressed until + ** three input tokens have been successfully shifted. + */ + if( yypParser->yyerrcnt<=0 ){ + yy_syntax_error(yypParser,yymajor, yyminor); + } + yypParser->yyerrcnt = 3; + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + if( yyendofinput ){ + yy_parse_failed(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + } + break; +#endif + } + } +#ifndef NDEBUG + if( yyTraceFILE ){ + yyStackEntry *i; + char cDiv = '['; + fprintf(yyTraceFILE,"%sReturn. Stack=",yyTracePrompt); + for(i=&yypParser->yystack[1]; i<=yypParser->yytos; i++){ + fprintf(yyTraceFILE,"%c%s", cDiv, yyTokenName[i->major]); + cDiv = ' '; + } + fprintf(yyTraceFILE,"]\n"); + } +#endif + return; +} + +/* +** Return the fallback token corresponding to canonical token iToken, or +** 0 if iToken has no fallback. +*/ +int ParseFallback(int iToken){ +#ifdef YYFALLBACK + assert( iToken<(int)(sizeof(yyFallback)/sizeof(yyFallback[0])) ); + return yyFallback[iToken]; +#else + (void)iToken; + return 0; +#endif +} diff --git a/docs/en/03-intro.md b/docs/en/03-intro.md index 4e0089950a6..853e96704fc 100644 --- a/docs/en/03-intro.md +++ b/docs/en/03-intro.md @@ -46,7 +46,7 @@ For more details on features, please read through the entire documentation. By making full use of [characteristics of time series data](https://tdengine.com/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases with the following advantages. -- **[High-Performance](https://tdengine.com/high-performance/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. +- **[High-Performance](https://tdengine.com/high-performance/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while outperforming other time-series databases for data ingestion, querying and data compression. - **[Simplified Solution](https://tdengine.com/comprehensive-industrial-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. diff --git a/docs/en/04-get-started/01-docker.md b/docs/en/04-get-started/01-docker.md index 882e2ef1940..f361e5a10f2 100644 --- a/docs/en/04-get-started/01-docker.md +++ b/docs/en/04-get-started/01-docker.md @@ -75,9 +75,9 @@ taos> ## TDegnine Graphic User Interface -From TDengine 3.3.0.0, there is a new componenet called `taos-explorer` added in the TDengine docker image. You can use it to manage the databases, super tables, child tables, and data in your TDengine system. There are also some features only available in TDengine Enterprise Edition, please contact TDengine sales team in case you need these features. +From TDengine 3.3.0.0, there is a new component called `taos-explorer` added in the TDengine docker image. You can use it to manage the databases, super tables, child tables, and data in your TDengine system. There are also some features only available in TDengine Enterprise Edition, please contact TDengine sales team in case you need these features. -To use taos-explorer in the container, you need to access the host port mapped from container port 6060. Assuming the host name is abc.com, and the port used on host is 6060, you need to access `http://abc.com:6060`. taos-explorer uses port 6060 by default in the container. When you use it the first time, you need to register with your enterprise email, then can logon using your user name and password in the TDengine database management system. +To use taos-explorer in the container, you need to access the host port mapped from container port 6060. Assuming the host name is abc.com, and the port used on host is 6060, you need to access `http://abc.com:6060`. taos-explorer uses port 6060 by default in the container. The default username and password to log in to the TDengine Database Management System is "root/taosdata". ## Test data insert performance diff --git a/docs/en/14-reference/01-components/10-taosbenchmark b/docs/en/14-reference/01-components/10-taosbenchmark index e4884b889c9..45c5cd2fb8b 100644 --- a/docs/en/14-reference/01-components/10-taosbenchmark +++ b/docs/en/14-reference/01-components/10-taosbenchmark @@ -364,6 +364,9 @@ The configuration parameters for specifying super table tag columns and data col - **min**: The minimum value of the column/label of the data type. The generated value will equal or large than the minimum value. - **max**: The maximum value of the column/label of the data type. The generated value will less than the maximum value. + +- **scalingFactor**: Floating-point precision enhancement factor, which takes effect only when the data type is float/double. It has a valid range of positive integers from 1 to 1,000,000. It is used to enhance the precision of generated floating-point numbers, particularly when the min or max values are small. This property enhances the precision after the decimal point by powers of 10: scalingFactor of 10 indicates an enhancement of 1 decimal precision, 100 indicates an enhancement of 2 decimal precision, and so on. + - **fun**: This column of data is filled with functions. Currently, only the sin and cos functions are supported. The input parameter is the timestamp and converted to an angle value. The conversion formula is: angle x=input time column ts value % 360. At the same time, it supports coefficient adjustment and random fluctuation factor adjustment, presented in a fixed format expression, such as fun="10\*sin(x)+100\*random(5)", where x represents the angle, ranging from 0 to 360 degrees, and the growth step size is consistent with the time column step size. 10 represents the coefficient of multiplication, 100 represents the coefficient of addition or subtraction, and 5 represents the fluctuation range within a random range of 5%. The currently supported data types are int, bigint, float, and double. Note: The expression is fixed and cannot be reversed. - **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values. diff --git a/docs/en/14-reference/03-taos-sql/10-function.md b/docs/en/14-reference/03-taos-sql/10-function.md index 2ba3c416fdf..d2efd668b00 100644 --- a/docs/en/14-reference/03-taos-sql/10-function.md +++ b/docs/en/14-reference/03-taos-sql/10-function.md @@ -1187,7 +1187,7 @@ CSUM(expr) ### DERIVATIVE ```sql -DERIVATIVE(expr, time_inerval, ignore_negative) +DERIVATIVE(expr, time_interval, ignore_negative) ignore_negative: { 0 diff --git a/docs/en/14-reference/03-taos-sql/12-distinguished.md b/docs/en/14-reference/03-taos-sql/12-distinguished.md index bfc9ca32c0f..2374b762d49 100644 --- a/docs/en/14-reference/03-taos-sql/12-distinguished.md +++ b/docs/en/14-reference/03-taos-sql/12-distinguished.md @@ -80,7 +80,7 @@ These pseudocolumns occur after the aggregation clause. `FILL` clause is used to specify how to fill when there is data missing in any window, including: 1. NONE: No fill (the default fill mode) -2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. If multiple columns in select list need to be filled, then in the fill clause there must be a fill value for each of these columns, for example, `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`. +2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. If multiple columns in select list need to be filled, then in the fill clause there must be a fill value for each of these columns, for example, `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`. Note that only exprs in select list that contains normal cols need to specify fill value, exprs like `_wstart`, `_wend`, `_wduration`, `_wstart + 1a`, `now`, `1+1`, partition keys like tbname(when using partition by) don't need to specify fill value. But exprs like `timediff(last(ts), _wstart)` need to specify fill value. 3. PREV: Fill with the previous non-NULL value, `FILL(PREV)` 4. NULL: Fill with NULL, `FILL(NULL)` 5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)` diff --git a/docs/en/14-reference/03-taos-sql/21-node.md b/docs/en/14-reference/03-taos-sql/21-node.md index 2ebccb76f71..cdc4bdd0206 100644 --- a/docs/en/14-reference/03-taos-sql/21-node.md +++ b/docs/en/14-reference/03-taos-sql/21-node.md @@ -27,11 +27,15 @@ The preceding SQL command shows all dnodes in the cluster with the ID, endpoint, ## Delete a DNODE ```sql -DROP DNODE dnode_id +DROP DNODE dnode_id [force] [unsafe] ``` Note that deleting a dnode does not stop its process. You must stop the process after the dnode is deleted. +Only online node is allowed to be deleted. Drop is executed with force option if the offline node need to be deleted. + +Drop is executed with unsafe option if the node with single replica is offline, and the data on it is not able to be restored. + ## Modify Dnode Configuration ```sql diff --git a/docs/en/14-reference/03-taos-sql/31-compress.md b/docs/en/14-reference/03-taos-sql/31-compress.md index 10a06a4c91c..39abfe69bd7 100644 --- a/docs/en/14-reference/03-taos-sql/31-compress.md +++ b/docs/en/14-reference/03-taos-sql/31-compress.md @@ -41,7 +41,7 @@ In this article, it specifically refers to the level within the secondary compre ### Create Table with Compression ```sql -CREATE [dbname.]tabname (colName colType [ENCODE 'encode_type'] [COMPRESS 'compress_type' [LEVEL 'level'], [, other cerate_definition]...]) +CREATE [dbname.]tabname (colName colType [ENCODE 'encode_type'] [COMPRESS 'compress_type' [LEVEL 'level'], [, other create_definition]...]) ``` **Parameter Description** @@ -58,7 +58,7 @@ CREATE [dbname.]tabname (colName colType [ENCODE 'encode_type'] [COMPRESS 'compr ### Change Compression Method ```sql -ALTER TABLE [db_name.]tabName MODIFY COLUMN colName [ENCODE 'ecode_type'] [COMPRESS 'compress_type'] [LEVEL "high"] +ALTER TABLE [db_name.]tabName MODIFY COLUMN colName [ENCODE 'encode_type'] [COMPRESS 'compress_type'] [LEVEL "high"] ``` **Parameter Description** diff --git a/docs/en/14-reference/05-connectors/60-rest-api.mdx b/docs/en/14-reference/05-connectors/60-rest-api.mdx index 2c3cd21f414..a29751e951a 100644 --- a/docs/en/14-reference/05-connectors/60-rest-api.mdx +++ b/docs/en/14-reference/05-connectors/60-rest-api.mdx @@ -125,7 +125,7 @@ where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e. Starting from `TDengine 3.0.3.0`, `taosAdapter` provides a configuration parameter `httpCodeServerError` to set whether to return a non-200 http status code when the C interface returns an error | **Description** | **httpCodeServerError false** | **httpCodeServerError true** | -|--------------------|---------------------------- ------|---------------------------------------| +|--------------------|----------------------------------|---------------------------------------| | taos_errno() returns 0 | 200 | 200 | | taos_errno() returns non-0 | 200 (except authentication error) | 500 (except authentication error and 400/502 error) | | Parameter error | 400 (only handle HTTP request URL parameter error) | 400 (handle HTTP request URL parameter error and taosd return error) | diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md index 14a1a0fb9d9..77d183a5ef7 100755 --- a/docs/en/14-reference/12-config/index.md +++ b/docs/en/14-reference/12-config/index.md @@ -701,15 +701,6 @@ The charset that takes effect is UTF-8. | Type | String | | Default Value | _tag_null | -### smlDataFormat - -| Attribute | Description | -| ----------- | ----------------------------------------------------------------------------------- | -| Applicable | Client only | -| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0 | -| Value Range | 0: not consistent; 1: consistent. | -| Default | 0 | - ### smlTsDefaultName | Attribute | Description | @@ -719,6 +710,16 @@ The charset that takes effect is UTF-8. | Type | String | | Default Value | _ts | +### smlDot2Underline + +| Attribute | Description | +| -------- | -------------------------------------------------------- | +| Applicable | Client only | +| Meaning | Convert the dot in the supertable name to an underscore | +| Type | Bool | +| Default Value | true | + + ## Compress Parameters ### compressMsgSize diff --git a/docs/en/26-tdinternal/04-load-balance.md b/docs/en/26-tdinternal/04-load-balance.md index 474272c46d2..c7aca23cc9d 100644 --- a/docs/en/26-tdinternal/04-load-balance.md +++ b/docs/en/26-tdinternal/04-load-balance.md @@ -4,7 +4,7 @@ sidebar_label: Load Balance description: This document describes how TDengine implements load balancing. --- -The load balance in TDengine is mainly about processing data series data. TDengine employes builtin hash algorithm to distribute all the tables, sub-tables and their data of a database across all the vgroups that belongs to the database. Each table or sub-table can only be handled by a single vgroup, while each vgroup can process multiple table or sub-table. +The load balance in TDengine is mainly about processing data series data. TDengine employs builtin hash algorithm to distribute all the tables, sub-tables and their data of a database across all the vgroups that belongs to the database. Each table or sub-table can only be handled by a single vgroup, while each vgroup can process multiple table or sub-table. The number of vgroup can be specified when creating a database, using the parameter `vgroups`. @@ -12,10 +12,10 @@ The number of vgroup can be specified when creating a database, using the parame create database db0 vgroups 100; ``` -The proper value of `vgroups` depends on available system resources. Assuming there is only one database to be created in the system, then the number of `vgroups` is determined by the available resources from all dnodes. In principle more vgroups can be created if you have more CPU and memory. Disk I/O is another important factor to consider. Once the bottleneck shows on disk I/O, more vgroups may downgrad the system performance significantly. If multiple databases are to be created in the system, then the total number of `vroups` of all the databases are dependent on the available system resources. It needs to be careful to distribute vgroups among these databases, you need to consider the number of tables, data writing frequency, size of each data row for all these databases. A recommended practice is to firstly choose a starting number for `vgroups`, for example double of the number of CPU cores, then try to adjust and optimize system configurations to find the best setting for `vgroups`, then distribute these vgroups among databases. +The proper value of `vgroups` depends on available system resources. Assuming there is only one database to be created in the system, then the number of `vgroups` is determined by the available resources from all dnodes. In principle more vgroups can be created if you have more CPU and memory. Disk I/O is another important factor to consider. Once the bottleneck shows on disk I/O, more vgroups may degrade the system performance significantly. If multiple databases are to be created in the system, then the total number of `vgroups` of all the databases are dependent on the available system resources. It needs to be careful to distribute vgroups among these databases, you need to consider the number of tables, data writing frequency, size of each data row for all these databases. A recommended practice is to firstly choose a starting number for `vgroups`, for example double of the number of CPU cores, then try to adjust and optimize system configurations to find the best setting for `vgroups`, then distribute these vgroups among databases. -Furthermode, TDengine distributes the vgroups of each database equally among all dnodes. In case of replica 3, the distribution is even more complex, TDengine tries its best to prevent any dnode from becoming a bottleneck. +Furthermore, TDengine distributes the vgroups of each database equally among all dnodes. In case of replica 3, the distribution is even more complex, TDengine tries its best to prevent any dnode from becoming a bottleneck. -TDegnine utilizes the above ways to achieve load balance in a cluster, and finally achieve higher throughput. +TDengine utilizes the above ways to achieve load balance in a cluster, and finally achieve higher throughput. Once the load balance is achieved, after some operations like deleting tables or dropping databases, the load across all dnodes may become imbalanced, the method of rebalance will be provided in later versions. However, even without explicit rebalancing, TDengine will try its best to achieve new balance without manual interfering when a new database is created. diff --git a/docs/examples/JDBC/taosdemo/readme.md b/docs/examples/JDBC/taosdemo/readme.md index 141391d1f61..82aba4d9c1c 100644 --- a/docs/examples/JDBC/taosdemo/readme.md +++ b/docs/examples/JDBC/taosdemo/readme.md @@ -7,8 +7,4 @@ java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host -data java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host -database -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 ``` -如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path -请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。 - - If you encounter the error Exception in thread "main" `java.lang.UnsatisfiedLinkError: no taos in java.library.path`, please check whether the TDengine client package is installed or TDengine is compiled and installed. If you are sure it is installed and still encounter this error, you can add `-Djava.library.path=/usr/lib` after the `java` command to specify the path to the shared library. diff --git a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java index a399f3aa6af..647855dc480 100644 --- a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java @@ -1,8 +1,9 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; +import com.fasterxml.jackson.core.JsonProcessingException; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.tmq.*; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.*; import java.time.Duration; @@ -60,7 +61,7 @@ public static TaosConsumer getConsumer() throws Exception { // ANCHOR_END: create_consumer } - public static void pollExample(TaosConsumer consumer) throws SQLException { + public static void pollExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: poll_data_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -73,7 +74,7 @@ public static void pollExample(TaosConsumer consumer) throws SQLExce for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } } } catch (Exception ex) { @@ -91,7 +92,7 @@ public static void pollExample(TaosConsumer consumer) throws SQLExce // ANCHOR_END: poll_data_code_piece } - public static void seekExample(TaosConsumer consumer) throws SQLException { + public static void seekExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: consumer_seek List topics = Collections.singletonList("topic_meters"); try { @@ -99,7 +100,7 @@ public static void seekExample(TaosConsumer consumer) throws SQLExce consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); Set assignment = consumer.assignment(); - System.out.println("Now assignment: " + JSON.toJSONString(assignment)); + System.out.println("Now assignment: " + JsonUtil.getObjectMapper().writeValueAsString(assignment)); ConsumerRecords records = ConsumerRecords.emptyRecord(); // make sure we have got some data @@ -125,7 +126,7 @@ public static void seekExample(TaosConsumer consumer) throws SQLExce } - public static void commitExample(TaosConsumer consumer) throws SQLException { + public static void commitExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: commit_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -135,7 +136,7 @@ public static void commitExample(TaosConsumer consumer) throws SQLEx for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } if (!records.isEmpty()) { // after processing the data, commit the offset manually diff --git a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java index a59bfc282fe..378ef8ae6dd 100644 --- a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java @@ -1,7 +1,7 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.Connection; import java.sql.DriverManager; @@ -31,7 +31,11 @@ public static void main(String[] args) throws SQLException, InterruptedException final AbsConsumerLoop consumerLoop = new AbsConsumerLoop() { @Override public void process(ResultBean result) { - System.out.println("data: " + JSON.toJSONString(result)); + try{ + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(result)); + } catch (Exception e) { + throw new RuntimeException(e); + } } }; diff --git a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java index 6db65f47f2b..02db97a5a95 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java @@ -1,8 +1,9 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; +import com.fasterxml.jackson.core.JsonProcessingException; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.tmq.*; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.*; import java.time.Duration; @@ -60,7 +61,7 @@ public static TaosConsumer getConsumer() throws Exception { // ANCHOR_END: create_consumer } - public static void pollExample(TaosConsumer consumer) throws SQLException { + public static void pollExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: poll_data_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -73,7 +74,7 @@ public static void pollExample(TaosConsumer consumer) throws SQLExce for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } } } catch (Exception ex) { @@ -91,7 +92,7 @@ public static void pollExample(TaosConsumer consumer) throws SQLExce // ANCHOR_END: poll_data_code_piece } - public static void seekExample(TaosConsumer consumer) throws SQLException { + public static void seekExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: consumer_seek List topics = Collections.singletonList("topic_meters"); try { @@ -99,7 +100,7 @@ public static void seekExample(TaosConsumer consumer) throws SQLExce consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); Set assignment = consumer.assignment(); - System.out.println("Now assignment: " + JSON.toJSONString(assignment)); + System.out.println("Now assignment: " + JsonUtil.getObjectMapper().writeValueAsString(assignment)); ConsumerRecords records = ConsumerRecords.emptyRecord(); // make sure we have got some data @@ -125,7 +126,7 @@ public static void seekExample(TaosConsumer consumer) throws SQLExce } - public static void commitExample(TaosConsumer consumer) throws SQLException { + public static void commitExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: commit_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -135,7 +136,7 @@ public static void commitExample(TaosConsumer consumer) throws SQLEx for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } if (!records.isEmpty()) { // after processing the data, commit the offset manually diff --git a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java index 70e29503f85..77c6a4fd1bd 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java @@ -1,7 +1,7 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.Connection; import java.sql.DriverManager; @@ -28,7 +28,11 @@ public static void main(String[] args) throws SQLException, InterruptedException final AbsConsumerLoop consumerLoop = new AbsConsumerLoop() { @Override public void process(ResultBean result) { - System.out.println("data: " + JSON.toJSONString(result)); + try{ + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(result)); + } catch (Exception e) { + throw new RuntimeException(e); + } } }; diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java index 8678f652311..fa6ebf08586 100644 --- a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java @@ -13,6 +13,9 @@ public class DataBaseMonitor { public DataBaseMonitor init() throws SQLException { if (conn == null) { String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + if (jdbcURL == null || jdbcURL == ""){ + jdbcURL = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + } conn = DriverManager.getConnection(jdbcURL); stmt = conn.createStatement(); } diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java index dc820f161c8..1497992f6b5 100644 --- a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java @@ -69,6 +69,9 @@ public SQLWriter(int maxBatchSize) { */ private static Connection getConnection() throws SQLException { String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + if (jdbcURL == null || jdbcURL == ""){ + jdbcURL = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + } return DriverManager.getConnection(jdbcURL); } diff --git a/docs/examples/java/src/test/java/com/taos/test/TestAll.java b/docs/examples/java/src/test/java/com/taos/test/TestAll.java index e014a3b3151..a92ddd116c0 100644 --- a/docs/examples/java/src/test/java/com/taos/test/TestAll.java +++ b/docs/examples/java/src/test/java/com/taos/test/TestAll.java @@ -17,6 +17,37 @@ public void dropDB(String dbName) throws SQLException { stmt.execute("drop database if exists " + dbName); } } + waitTransaction(); + } + + public void dropTopic(String topicName) throws SQLException { + String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + try (Connection conn = DriverManager.getConnection(jdbcUrl)) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop topic if exists " + topicName); + } + } + waitTransaction(); + } + + public void waitTransaction() throws SQLException { + + String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + try (Connection conn = DriverManager.getConnection(jdbcUrl)) { + try (Statement stmt = conn.createStatement()) { + for (int i = 0; i < 10; i++) { + stmt.execute("show transactions"); + try (ResultSet resultSet = stmt.getResultSet()) { + if (resultSet.next()) { + int count = resultSet.getInt(1); + if (count == 0) { + break; + } + } + } + } + } + } } public void insertData() throws SQLException { @@ -104,14 +135,20 @@ public void testConsumer() throws Exception { SubscribeDemo.main(args); } -// @Test -// public void testSubscribeJni() throws SQLException, InterruptedException { -// dropDB("power"); -// ConsumerLoopFull.main(args); -// } -// @Test -// public void testSubscribeWs() throws SQLException, InterruptedException { -// dropDB("power"); -// WsConsumerLoopFull.main(args); -// } + @Test + public void testSubscribeJni() throws SQLException, InterruptedException { + dropTopic("topic_meters"); + dropDB("power"); + ConsumerLoopFull.main(args); + dropTopic("topic_meters"); + dropDB("power"); + } + @Test + public void testSubscribeWs() throws SQLException, InterruptedException { + dropTopic("topic_meters"); + dropDB("power"); + WsConsumerLoopFull.main(args); + dropTopic("topic_meters"); + dropDB("power"); + } } diff --git a/docs/examples/node/package.json b/docs/examples/node/package.json index 3f5f54e9d7c..14303c8f374 100644 --- a/docs/examples/node/package.json +++ b/docs/examples/node/package.json @@ -4,6 +4,6 @@ "main": "index.js", "license": "MIT", "dependencies": { - "@tdengine/websocket": "^3.1.0" + "@tdengine/websocket": "^3.1.1" } } diff --git a/docs/examples/node/websocketexample/sql_example.js b/docs/examples/node/websocketexample/sql_example.js index 0a09228d970..c120b84d99f 100644 --- a/docs/examples/node/websocketexample/sql_example.js +++ b/docs/examples/node/websocketexample/sql_example.js @@ -3,7 +3,6 @@ const taos = require("@tdengine/websocket"); let dsn = 'ws://localhost:6041'; async function createConnect() { - try { let conf = new taos.WSConfig(dsn); conf.setUser('root'); diff --git a/docs/examples/node/websocketexample/tmq_seek_example.js b/docs/examples/node/websocketexample/tmq_seek_example.js index f676efe36f6..be4d8ddfa4a 100644 --- a/docs/examples/node/websocketexample/tmq_seek_example.js +++ b/docs/examples/node/websocketexample/tmq_seek_example.js @@ -10,7 +10,6 @@ const groupId = "group1"; const clientId = "client1"; async function createConsumer() { - let groupId = "group1"; let clientId = "client1"; let configMap = new Map([ diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md index 32ea117fbb2..1fda72024ce 100644 --- a/docs/zh/01-index.md +++ b/docs/zh/01-index.md @@ -4,9 +4,9 @@ sidebar_label: 文档首页 slug: / --- -TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库Time Series Database, TSDB), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。如果你对时序数据的基本概念、价值以及其所能带来的业务价值尚不了解,请参考[时序数据基础](./concept) +TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库Time Series Database, TSDB), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。如果你对时序数据的基本概念、价值以及其所能带来的业务价值尚不了解,请参考[时序数据基础](./concept)。 -TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[数据模型](./basic/model)一章。 +TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论你在工作中是什么角色,请您仔细阅读[数据模型](./basic/model)一章。 如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。对 REST API、各种编程语言的连接器(Connector)想做更多详细了解的话,请看[连接器](./reference/connector)一章。 @@ -16,6 +16,8 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移 如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。 +如果你对数据库内核设计感兴趣,或是开源爱好者,建议仔细阅读[技术内幕](./tdinterna)一章。该章从分布式架构到存储引擎、查询引擎、数据订阅,再到流计算引擎都做了详细阐述。建议对照文档,查看TDengine在GitHub的源代码,对TDengine的设计和编码做深入了解,更欢迎加入开源社区,贡献代码。 + 最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。 Together, we make a difference! diff --git a/docs/zh/04-get-started/01-docker.md b/docs/zh/04-get-started/01-docker.md index cadde10e0c1..c75c8bafd0e 100644 --- a/docs/zh/04-get-started/01-docker.md +++ b/docs/zh/04-get-started/01-docker.md @@ -17,23 +17,24 @@ docker pull tdengine/tdengine:latest 或者指定版本的容器镜像: ```shell -docker pull tdengine/tdengine:3.0.1.4 +docker pull tdengine/tdengine:3.3.3.0 ``` 然后只需执行下面的命令: ```shell -docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine +docker run -d -p 6030:6030 -p 6041:6041 -p 6043:6043 -p 6044-6049:6044-6049 -p 6044-6045:6044-6045/udp -p 6060:6060 tdengine/tdengine ``` -注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。 +注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043 为 taosKeeper 使用端口。6044-6049 TCP 端口为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。 +6044 和 6045 UDP 端口为 statsd 和 collectd 格式写入接口,可根据需要选择是否打开。6060 为 taosExplorer 使用端口。具体端口使用情况请参考[网络端口要求](../../operation/planning#网络端口要求)。 如果需要将数据持久化到本机的某一个文件夹,则执行下边的命令: ```shell docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \ -v ~/data/taos/dnode/log:/var/log/taos \ - -p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine + -p 6030:6030 -p 6041:6041 -p 6043:6043 -p 6044-6049:6044-6049 -p 6044-6045:6044-6045/udp -p 6060:6060 tdengine/tdengine ``` :::note @@ -121,4 +122,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1 SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); ``` -在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 \ No newline at end of file +在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。 diff --git a/docs/zh/04-get-started/03-package.md b/docs/zh/04-get-started/03-package.md index 2a1f594b4fa..9724c0a1c92 100644 --- a/docs/zh/04-get-started/03-package.md +++ b/docs/zh/04-get-started/03-package.md @@ -14,7 +14,9 @@ TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc) 为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 Lite 版本的安装包。 -在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,RPM 和 Deb 包不含 `taosdump` 和 TDinsight 安装脚本,这些工具需要通过安装 taosTools 包获得。TDengine 也提供 Windows x64 平台和 macOS x64/m1 平台的安装包。 +在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统,用户可以根据自己的运行环境自行选择。同时我们也提供了 tar.gz 格式安装包,以及 `apt-get` 工具从线上进行安装。 + +此外,TDengine 也提供 macOS x64/m1 平台的 pkg 安装包。 ## 运行环境要求 在linux系统中,运行环境最低要求如下: @@ -317,4 +319,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1 SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); ``` -在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 \ No newline at end of file +在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。 \ No newline at end of file diff --git a/docs/zh/04-get-started/_07-use.md b/docs/zh/04-get-started/_07-use.md index d206ed41022..8c976e9b55e 100644 --- a/docs/zh/04-get-started/_07-use.md +++ b/docs/zh/04-get-started/_07-use.md @@ -54,4 +54,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1 SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); ``` -在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 \ No newline at end of file +在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。 \ No newline at end of file diff --git a/docs/zh/05-basic/01-model.md b/docs/zh/05-basic/01-model.md index bcd931df3a5..f49db178929 100644 --- a/docs/zh/05-basic/01-model.md +++ b/docs/zh/05-basic/01-model.md @@ -106,7 +106,7 @@ CREATE DATABASE power PRECISION 'ms' KEEP 3650 DURATION 10 BUFFER 16; ``` 该 SQL 将创建一个名为 `power` 的数据库,各参数说明如下: - `PRECISION 'ms'` :这个数据库的时序数据使用毫秒(ms)精度的时间戳 -- `KEEP 365`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除 +- `KEEP 3650`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除 - `DURATION 10` :每 10 天的数据放在一个数据文件中 - `BUFFER 16` :写入使用大小为 16MB 的内存池。 @@ -214,4 +214,4 @@ TDengine 支持灵活的数据模型设计,包括多列模型和单列模型 尽管 TDengine 推荐使用多列模型,因为这种模型在写入效率和存储效率方面通常更优,但在某些特定场景下,单列模型可能更为适用。例如,当一个数据采集点的采集量种类经常发生变化时,如果采用多列模型,就需要频繁修改超级表的结构定义,这会增加应用程序的复杂性。在这种情况下,采用单列模型可以简化应用程序的设计和管理,因为它允许独立地管理和扩展每个物理量的超级表。 -总之,TDengine 提供了灵活的数据模型选项,用户可以根据实际需求和场景选择最适合的模型,以优化性能和管理复杂性。 \ No newline at end of file +总之,TDengine 提供了灵活的数据模型选项,用户可以根据实际需求和场景选择最适合的模型,以优化性能和管理复杂性。 diff --git a/docs/zh/05-basic/03-query.md b/docs/zh/05-basic/03-query.md index 6afdba0997b..b6f172829ae 100644 --- a/docs/zh/05-basic/03-query.md +++ b/docs/zh/05-basic/03-query.md @@ -8,7 +8,7 @@ toc_max_heading_level: 4 ## 基本查询 -为了更好的介绍 TDengine 数据查询,使用 如下 taosBenchmark 命令,生成本章内容需要的时序数据。 +为了更好的介绍 TDengine 数据查询,使用如下 taosBenchmark 命令,生成本章内容需要的时序数据。 ```shell taosBenchmark --start-timestamp=1600000000000 --tables=100 --records=10000000 --time-step=10000 @@ -20,21 +20,22 @@ taosBenchmark --start-timestamp=1600000000000 --tables=100 --records=10000000 -- ```sql SELECT * FROM meters -WHERE voltage > 10 +WHERE voltage > 230 ORDER BY ts DESC -LIMIT 5 +LIMIT 5; ``` -上面的 SQL,从超级表 `meters` 中查询出电压 `voltage` 大于 10 的记录,按时间降序排列,且仅输出前 5 行。查询结果如下: +上面的 SQL,从超级表 `meters` 中查询出电压 `voltage` 大于 230V 的记录,按时间降序排列,且仅输出前 5 行。查询结果如下: ```text - ts | current | voltage | phase | groupid | location | -========================================================================================================== -2023-11-14 22:13:10.000 | 1.1294620 | 18 | 0.3531540 | 8 | California.MountainView | -2023-11-14 22:13:10.000 | 1.0294620 | 12 | 0.3631540 | 2 | California.Campbell | -2023-11-14 22:13:10.000 | 1.0294620 | 16 | 0.3531540 | 1 | California.Campbell | -2023-11-14 22:13:10.000 | 1.1294620 | 18 | 0.3531540 | 2 | California.Campbell | -2023-11-14 22:13:10.000 | 1.1294620 | 16 | 0.3431540 | 7 | California.PaloAlto | + ts | current | voltage | phase | groupid | location | +=================================================================================================== +2023-11-15 06:13:10.000 | 14.0601978 | 232 | 146.5000000 | 10 | California.Sunnyvale | +2023-11-15 06:13:10.000 | 14.0601978 | 232 | 146.5000000 | 1 | California.LosAngles | +2023-11-15 06:13:10.000 | 14.0601978 | 232 | 146.5000000 | 10 | California.Sunnyvale | +2023-11-15 06:13:10.000 | 14.0601978 | 232 | 146.5000000 | 5 | California.Cupertino | +2023-11-15 06:13:10.000 | 14.0601978 | 232 | 146.5000000 | 4 | California.SanFrancisco | +Query OK, 5 row(s) in set (0.145403s) ``` ## 聚合查询 @@ -48,28 +49,28 @@ TDengine 支持通过 GROUP BY 子句,对数据进行聚合查询。SQL 语句 group by 子句用于对数据进行分组,并为每个分组返回一行汇总信息。在 group by 子句中,可以使用表或视图中的任何列作为分组依据,这些列不需要出现在 select 列表中。此外,用户可以直接在超级表上执行聚合查询,无须预先创建子表。以智能电表的数据模型为例,使用 group by 子句的 SQL 如下: ```sql -SELECT groupid,avg(voltage) +SELECT groupid, avg(voltage) FROM meters WHERE ts >= "2022-01-01T00:00:00+08:00" AND ts < "2023-01-01T00:00:00+08:00" -GROUP BY groupid +GROUP BY groupid; ``` 上面的 SQL,查询超级表 `meters` 中,时间戳大于等于 `2022-01-01T00:00:00+08:00`,且时间戳小于 `2023-01-01T00:00:00+08:00` 的数据,按照 `groupid` 进行分组,求每组的平均电压。查询结果如下: ```text - groupid | avg(voltage) | -========================================== - 8 | 9.104040404040404 | - 5 | 9.078333333333333 | - 1 | 9.087037037037037 | - 7 | 8.991414141414142 | - 9 | 8.789814814814815 | - 6 | 9.051010101010101 | - 4 | 9.135353535353536 | - 10 | 9.213131313131314 | - 2 | 9.008888888888889 | - 3 | 8.783888888888889 | + groupid | avg(voltage) | +====================================== + 8 | 243.961981544901079 | + 5 | 243.961981544901079 | + 1 | 243.961981544901079 | + 7 | 243.961981544901079 | + 9 | 243.961981544901079 | + 6 | 243.961981544901079 | + 4 | 243.961981544901079 | + 10 | 243.961981544901079 | + 2 | 243.961981544901079 | + 3 | 243.961981544901079 | Query OK, 10 row(s) in set (0.042446s) ``` @@ -110,24 +111,24 @@ TDengine 按如下方式处理数据切分子句。 ```sql SELECT location, avg(voltage) FROM meters -PARTITION BY location +PARTITION BY location; ``` 上面的示例 SQL 查询超级表 `meters`,将数据按标签 `location` 进行分组,每个分组计算电压的平均值。查询结果如下: ```text - location | avg(voltage) | -========================================================= - California.SantaClara | 8.793334320000000 | - California.SanFrancisco | 9.017645882352941 | - California.SanJose | 9.156112940000000 | - California.LosAngles | 9.036753507692307 | - California.SanDiego | 8.967037053333334 | - California.Sunnyvale | 8.978572085714285 | - California.PaloAlto | 8.936665800000000 | - California.Cupertino | 8.987654066666666 | - California.MountainView | 9.046297266666667 | - California.Campbell | 9.149999028571429 | + location | avg(voltage) | +====================================================== + California.SantaClara | 243.962050000000005 | + California.SanFrancisco | 243.962050000000005 | + California.SanJose | 243.962050000000005 | + California.LosAngles | 243.962050000000005 | + California.SanDiego | 243.962050000000005 | + California.Sunnyvale | 243.962050000000005 | + California.PaloAlto | 243.962050000000005 | + California.Cupertino | 243.962050000000005 | + California.MountainView | 243.962050000000005 | + California.Campbell | 243.962050000000005 | Query OK, 10 row(s) in set (2.415961s) ``` @@ -200,20 +201,20 @@ SLIMIT 2; 上面的 SQL,查询超级表 `meters` 中,时间戳大于等于 `2022-01-01T00:00:00+08:00`,且时间戳小于 `2022-01-01T00:05:00+08:00` 的数据;数据首先按照子表名 `tbname` 进行数据切分,再按照每 1 分钟的时间窗口进行切分,且每个时间窗口向后偏移 5 秒;最后,仅取前 2 个分片的数据作为结果。查询结果如下: ```text - tbname | _wstart | _wend | avg(voltage) | -========================================================================================== - d40 | 2021-12-31 15:59:05.000 | 2021-12-31 16:00:05.000 | 4.000000000000000 | - d40 | 2021-12-31 16:00:05.000 | 2021-12-31 16:01:05.000 | 5.000000000000000 | - d40 | 2021-12-31 16:01:05.000 | 2021-12-31 16:02:05.000 | 8.000000000000000 | - d40 | 2021-12-31 16:02:05.000 | 2021-12-31 16:03:05.000 | 7.666666666666667 | - d40 | 2021-12-31 16:03:05.000 | 2021-12-31 16:04:05.000 | 9.666666666666666 | - d40 | 2021-12-31 16:04:05.000 | 2021-12-31 16:05:05.000 | 15.199999999999999 | - d41 | 2021-12-31 15:59:05.000 | 2021-12-31 16:00:05.000 | 4.000000000000000 | - d41 | 2021-12-31 16:00:05.000 | 2021-12-31 16:01:05.000 | 7.000000000000000 | - d41 | 2021-12-31 16:01:05.000 | 2021-12-31 16:02:05.000 | 9.000000000000000 | - d41 | 2021-12-31 16:02:05.000 | 2021-12-31 16:03:05.000 | 10.666666666666666 | - d41 | 2021-12-31 16:03:05.000 | 2021-12-31 16:04:05.000 | 8.333333333333334 | - d41 | 2021-12-31 16:04:05.000 | 2021-12-31 16:05:05.000 | 9.600000000000000 | + tbname | _wstart | _wend | avg(voltage) | +====================================================================================== + d2 | 2021-12-31 23:59:05.000 | 2022-01-01 00:00:05.000 | 253.000000000000000 | + d2 | 2022-01-01 00:00:05.000 | 2022-01-01 00:01:05.000 | 244.166666666666657 | + d2 | 2022-01-01 00:01:05.000 | 2022-01-01 00:02:05.000 | 241.833333333333343 | + d2 | 2022-01-01 00:02:05.000 | 2022-01-01 00:03:05.000 | 243.166666666666657 | + d2 | 2022-01-01 00:03:05.000 | 2022-01-01 00:04:05.000 | 240.833333333333343 | + d2 | 2022-01-01 00:04:05.000 | 2022-01-01 00:05:05.000 | 244.800000000000011 | + d26 | 2021-12-31 23:59:05.000 | 2022-01-01 00:00:05.000 | 253.000000000000000 | + d26 | 2022-01-01 00:00:05.000 | 2022-01-01 00:01:05.000 | 244.166666666666657 | + d26 | 2022-01-01 00:01:05.000 | 2022-01-01 00:02:05.000 | 241.833333333333343 | + d26 | 2022-01-01 00:02:05.000 | 2022-01-01 00:03:05.000 | 243.166666666666657 | + d26 | 2022-01-01 00:03:05.000 | 2022-01-01 00:04:05.000 | 240.833333333333343 | + d26 | 2022-01-01 00:04:05.000 | 2022-01-01 00:05:05.000 | 244.800000000000011 | Query OK, 12 row(s) in set (0.021265s) ``` @@ -255,19 +256,19 @@ SLIMIT 1; 上面的 SQL,查询超级表 `meters` 中,时间戳大于等于 `2022-01-01T00:00:00+08:00`,且时间戳小于 `2022-01-01T00:05:00+08:00` 的数据,数据首先按照子表名 `tbname` 进行数据切分,再按照每 1 分钟的时间窗口进行切分,且时间窗口按照 30 秒进行滑动;最后,仅取前 1 个分片的数据作为结果。查询结果如下: ```text - tbname | _wstart | avg(voltage) | -================================================================ - d40 | 2021-12-31 15:59:30.000 | 4.000000000000000 | - d40 | 2021-12-31 16:00:00.000 | 5.666666666666667 | - d40 | 2021-12-31 16:00:30.000 | 4.333333333333333 | - d40 | 2021-12-31 16:01:00.000 | 5.000000000000000 | - d40 | 2021-12-31 16:01:30.000 | 9.333333333333334 | - d40 | 2021-12-31 16:02:00.000 | 9.666666666666666 | - d40 | 2021-12-31 16:02:30.000 | 10.000000000000000 | - d40 | 2021-12-31 16:03:00.000 | 10.333333333333334 | - d40 | 2021-12-31 16:03:30.000 | 10.333333333333334 | - d40 | 2021-12-31 16:04:00.000 | 13.000000000000000 | - d40 | 2021-12-31 16:04:30.000 | 15.333333333333334 | + tbname | _wstart | avg(voltage) | +============================================================= + d2 | 2021-12-31 23:59:30.000 | 248.333333333333343 | + d2 | 2022-01-01 00:00:00.000 | 246.000000000000000 | + d2 | 2022-01-01 00:00:30.000 | 244.666666666666657 | + d2 | 2022-01-01 00:01:00.000 | 240.833333333333343 | + d2 | 2022-01-01 00:01:30.000 | 239.500000000000000 | + d2 | 2022-01-01 00:02:00.000 | 243.833333333333343 | + d2 | 2022-01-01 00:02:30.000 | 243.833333333333343 | + d2 | 2022-01-01 00:03:00.000 | 241.333333333333343 | + d2 | 2022-01-01 00:03:30.000 | 241.666666666666657 | + d2 | 2022-01-01 00:04:00.000 | 244.166666666666657 | + d2 | 2022-01-01 00:04:30.000 | 244.666666666666657 | Query OK, 11 row(s) in set (0.013153s) ``` @@ -290,13 +291,13 @@ SLIMIT 1; 上面的 SQL,查询超级表 `meters` 中,时间戳大于等于 `2022-01-01T00:00:00+08:00`,且时间戳小于 `2022-01-01T00:05:00+08:00` 的数据,数据首先按照子表名 `tbname` 进行数据切分,再按照每 1 分钟的时间窗口进行切分,且时间窗口按照 1 分钟进行切分;最后,仅取前 1 个分片的数据作为结果。查询结果如下: ```text - tbname | _wstart | _wend | avg(voltage) | -================================================================================================================= - d28 | 2021-12-31 16:00:00.000 | 2021-12-31 16:01:00.000 | 7.333333333333333 | - d28 | 2021-12-31 16:01:00.000 | 2021-12-31 16:02:00.000 | 8.000000000000000 | - d28 | 2021-12-31 16:02:00.000 | 2021-12-31 16:03:00.000 | 11.000000000000000 | - d28 | 2021-12-31 16:03:00.000 | 2021-12-31 16:04:00.000 | 6.666666666666667 | - d28 | 2021-12-31 16:04:00.000 | 2021-12-31 16:05:00.000 | 10.000000000000000 | + tbname | _wstart | _wend | avg(voltage) | +====================================================================================== + d2 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:00.000 | 246.000000000000000 | + d2 | 2022-01-01 00:01:00.000 | 2022-01-01 00:02:00.000 | 240.833333333333343 | + d2 | 2022-01-01 00:02:00.000 | 2022-01-01 00:03:00.000 | 243.833333333333343 | + d2 | 2022-01-01 00:03:00.000 | 2022-01-01 00:04:00.000 | 241.333333333333343 | + d2 | 2022-01-01 00:04:00.000 | 2022-01-01 00:05:00.000 | 244.166666666666657 | Query OK, 5 row(s) in set (0.016812s) ``` @@ -342,53 +343,65 @@ SLIMIT 2; 上面的 SQL,查询超级表 `meters` 中,时间戳大于等于 `2022-01-01T00:00:00+08:00`,且时间戳小于 `2022-01-01T00:05:00+08:00` 的数据;数据首先按照子表名 `tbname` 进行数据切分,再按照每 1 分钟的时间窗口进行切分,如果窗口内的数据出现缺失,则使用使用前一个非 NULL 值填充数据;最后,仅取前 2 个分片的数据作为结果。查询结果如下: ```text - tbname | _wstart | _wend | avg(voltage) | -================================================================================================================= - d40 | 2021-12-31 16:00:00.000 | 2021-12-31 16:01:00.000 | 5.666666666666667 | - d40 | 2021-12-31 16:01:00.000 | 2021-12-31 16:02:00.000 | 5.000000000000000 | - d40 | 2021-12-31 16:02:00.000 | 2021-12-31 16:03:00.000 | 9.666666666666666 | - d40 | 2021-12-31 16:03:00.000 | 2021-12-31 16:04:00.000 | 10.333333333333334 | - d40 | 2021-12-31 16:04:00.000 | 2021-12-31 16:05:00.000 | 13.000000000000000 | - d41 | 2021-12-31 16:00:00.000 | 2021-12-31 16:01:00.000 | 5.666666666666667 | - d41 | 2021-12-31 16:01:00.000 | 2021-12-31 16:02:00.000 | 9.333333333333334 | - d41 | 2021-12-31 16:02:00.000 | 2021-12-31 16:03:00.000 | 11.000000000000000 | - d41 | 2021-12-31 16:03:00.000 | 2021-12-31 16:04:00.000 | 7.666666666666667 | - d41 | 2021-12-31 16:04:00.000 | 2021-12-31 16:05:00.000 | 10.000000000000000 | + tbname | _wstart | _wend | avg(voltage) | +======================================================================================= + d2 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:00.000 | 246.000000000000000 | + d2 | 2022-01-01 00:01:00.000 | 2022-01-01 00:02:00.000 | 240.833333333333343 | + d2 | 2022-01-01 00:02:00.000 | 2022-01-01 00:03:00.000 | 243.833333333333343 | + d2 | 2022-01-01 00:03:00.000 | 2022-01-01 00:04:00.000 | 241.333333333333343 | + d2 | 2022-01-01 00:04:00.000 | 2022-01-01 00:05:00.000 | 244.166666666666657 | + d26 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:00.000 | 246.000000000000000 | + d26 | 2022-01-01 00:01:00.000 | 2022-01-01 00:02:00.000 | 240.833333333333343 | + d26 | 2022-01-01 00:02:00.000 | 2022-01-01 00:03:00.000 | 243.833333333333343 | + d26 | 2022-01-01 00:03:00.000 | 2022-01-01 00:04:00.000 | 241.333333333333343 | + d26 | 2022-01-01 00:04:00.000 | 2022-01-01 00:05:00.000 | 244.166666666666657 | Query OK, 10 row(s) in set (0.022866s) ``` ### 状态窗口 -使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。TDengine 还支持将 CASE 表达式用在状态量,可以表达某个状态的开始是由满足某个条件而触发,这个状态的结束是由另外一个条件满足而触发的语义。以智能电表为例,电压正常范围是 205V 到 235V,那么可以通过监控电压来判断电路是否正常。 +使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。TDengine 还支持将 CASE 表达式用在状态量,可以表达某个状态的开始是由满足某个条件而触发,这个状态的结束是由另外一个条件满足而触发的语义。以智能电表为例,电压正常范围是 225V 到 235V,那么可以通过监控电压来判断电路是否正常。 ```sql -SELECT tbname, _wstart, _wend,_wduration, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE 0 END status +SELECT tbname, _wstart, _wend,_wduration, CASE WHEN voltage >= 225 and voltage <= 235 THEN 1 ELSE 0 END status FROM meters WHERE ts >= "2022-01-01T00:00:00+08:00" AND ts < "2022-01-01T00:05:00+08:00" PARTITION BY tbname STATE_WINDOW( - CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE 0 END + CASE WHEN voltage >= 225 and voltage <= 235 THEN 1 ELSE 0 END ) -SLIMIT 10; +SLIMIT 2; ``` -以上 SQL,查询超级表 meters 中,时间戳大于等于 2022-01-01T00:00:00+08:00,且时间戳小于 2022-01-01T00:05:00+08:00的数据;数据首先按照子表名 tbname 进行数据切分;根据电压是否在正常范围内进行状态窗口的划分;最后,取前 10 个分片的数据作为结果。查询结果如下: +以上 SQL,查询超级表 meters 中,时间戳大于等于 2022-01-01T00:00:00+08:00,且时间戳小于 2022-01-01T00:05:00+08:00的数据;数据首先按照子表名 tbname 进行数据切分;根据电压是否在正常范围内进行状态窗口的划分;最后,取前 2 个分片的数据作为结果。查询结果如下:(由于数据是随机生成,结果集包含的数据条数会有不同) ```text - tbname | _wstart | _wend | _wduration | status | -===================================================================================================================================== - d76 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 | - d47 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 | - d37 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 | - d87 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 | - d64 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 | - d35 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 | - d83 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 | - d51 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 | - d63 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 | - d0 | 2021-12-31 16:00:00.000 | 2021-12-31 16:04:50.000 | 290000 | 0 | -Query OK, 10 row(s) in set (0.040495s) + tbname | _wstart | _wend | _wduration | status | +=============================================================================================== + d2 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:20.000 | 80000 | 0 | + d2 | 2022-01-01 00:01:30.000 | 2022-01-01 00:01:30.000 | 0 | 1 | + d2 | 2022-01-01 00:01:40.000 | 2022-01-01 00:01:40.000 | 0 | 0 | + d2 | 2022-01-01 00:01:50.000 | 2022-01-01 00:01:50.000 | 0 | 1 | + d2 | 2022-01-01 00:02:00.000 | 2022-01-01 00:02:20.000 | 20000 | 0 | + d2 | 2022-01-01 00:02:30.000 | 2022-01-01 00:02:30.000 | 0 | 1 | + d2 | 2022-01-01 00:02:40.000 | 2022-01-01 00:03:00.000 | 20000 | 0 | + d2 | 2022-01-01 00:03:10.000 | 2022-01-01 00:03:10.000 | 0 | 1 | + d2 | 2022-01-01 00:03:20.000 | 2022-01-01 00:03:40.000 | 20000 | 0 | + d2 | 2022-01-01 00:03:50.000 | 2022-01-01 00:03:50.000 | 0 | 1 | + d2 | 2022-01-01 00:04:00.000 | 2022-01-01 00:04:50.000 | 50000 | 0 | + d26 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:20.000 | 80000 | 0 | + d26 | 2022-01-01 00:01:30.000 | 2022-01-01 00:01:30.000 | 0 | 1 | + d26 | 2022-01-01 00:01:40.000 | 2022-01-01 00:01:40.000 | 0 | 0 | + d26 | 2022-01-01 00:01:50.000 | 2022-01-01 00:01:50.000 | 0 | 1 | + d26 | 2022-01-01 00:02:00.000 | 2022-01-01 00:02:20.000 | 20000 | 0 | + d26 | 2022-01-01 00:02:30.000 | 2022-01-01 00:02:30.000 | 0 | 1 | + d26 | 2022-01-01 00:02:40.000 | 2022-01-01 00:03:00.000 | 20000 | 0 | + d26 | 2022-01-01 00:03:10.000 | 2022-01-01 00:03:10.000 | 0 | 1 | + d26 | 2022-01-01 00:03:20.000 | 2022-01-01 00:03:40.000 | 20000 | 0 | + d26 | 2022-01-01 00:03:50.000 | 2022-01-01 00:03:50.000 | 0 | 1 | + d26 | 2022-01-01 00:04:00.000 | 2022-01-01 00:04:50.000 | 50000 | 0 | +Query OK, 22 row(s) in set (0.153403s) ``` ### 会话窗口 @@ -417,18 +430,18 @@ SLIMIT 10; 上面的 SQL,查询超级表 meters 中,时间戳大于等于 2022-01-01T00:00:00+08:00,且时间戳小于 2022-01-01T00:10:00+08:00的数据;数据先按照子表名 tbname 进行数据切分,再根据 10 分钟的会话窗口进行切分;最后,取前 10 个分片的数据作为结果,返回子表名、窗口开始时间、窗口结束时间、窗口宽度、窗口内数据条数。查询结果如下: ```text - tbname | _wstart | _wend | _wduration | count(*) | -===================================================================================================================================== - d76 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 | - d47 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 | - d37 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 | - d87 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 | - d64 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 | - d35 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 | - d83 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 | - d51 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 | - d63 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 | - d0 | 2021-12-31 16:00:00.000 | 2021-12-31 16:09:50.000 | 590000 | 60 | + tbname | _wstart | _wend | _wduration | count(*) | +=============================================================================================== + d2 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 | + d26 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 | + d52 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 | + d64 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 | + d76 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 | + d28 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 | + d4 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 | + d88 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 | + d77 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 | + d54 | 2022-01-01 00:00:00.000 | 2022-01-01 00:09:50.000 | 590000 | 60 | Query OK, 10 row(s) in set (0.043489s) ``` @@ -458,26 +471,26 @@ FROM meters WHERE ts >= "2022-01-01T00:00:00+08:00" AND ts < "2022-01-01T00:10:00+08:00" PARTITION BY tbname -EVENT_WINDOW START WITH voltage >= 10 END WITH voltage < 20 -LIMIT 10; +EVENT_WINDOW START WITH voltage >= 225 END WITH voltage < 235 +LIMIT 5; ``` -上面的 SQL,查询超级表meters中,时间戳大于等于2022-01-01T00:00:00+08:00,且时间戳小于2022-01-01T00:10:00+08:00的数据;数据先按照子表名tbname进行数据切分,再根据事件窗口条件:电压大于等于 10V,且小于 20V 进行切分;最后,取前 10 行的数据作为结果,返回子表名、窗口开始时间、窗口结束时间、窗口宽度、窗口内数据条数。查询结果如下: +上面的 SQL,查询超级表meters中,时间戳大于等于2022-01-01T00:00:00+08:00,且时间戳小于2022-01-01T00:10:00+08:00的数据;数据先按照子表名tbname进行数据切分,再根据事件窗口条件:电压大于等于 225V,且小于 235V 进行切分;最后,取每个分片的前 5 行的数据作为结果,返回子表名、窗口开始时间、窗口结束时间、窗口宽度、窗口内数据条数。查询结果如下: ```text - tbname | _wstart | _wend | _wduration | count(*) | -===================================================================================================================================== - d0 | 2021-12-31 16:00:00.000 | 2021-12-31 16:00:00.000 | 0 | 1 | - d0 | 2021-12-31 16:00:30.000 | 2021-12-31 16:00:30.000 | 0 | 1 | - d0 | 2021-12-31 16:00:40.000 | 2021-12-31 16:00:40.000 | 0 | 1 | - d0 | 2021-12-31 16:01:20.000 | 2021-12-31 16:01:20.000 | 0 | 1 | - d0 | 2021-12-31 16:02:20.000 | 2021-12-31 16:02:20.000 | 0 | 1 | - d0 | 2021-12-31 16:02:30.000 | 2021-12-31 16:02:30.000 | 0 | 1 | - d0 | 2021-12-31 16:03:10.000 | 2021-12-31 16:03:10.000 | 0 | 1 | - d0 | 2021-12-31 16:03:30.000 | 2021-12-31 16:03:30.000 | 0 | 1 | - d0 | 2021-12-31 16:03:40.000 | 2021-12-31 16:03:40.000 | 0 | 1 | - d0 | 2021-12-31 16:03:50.000 | 2021-12-31 16:03:50.000 | 0 | 1 | -Query OK, 10 row(s) in set (0.034127s) + tbname | _wstart | _wend | _wduration | count(*) | +============================================================================================== + d0 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:30.000 | 90000 | 10 | + d0 | 2022-01-01 00:01:40.000 | 2022-01-01 00:02:30.000 | 50000 | 6 | + d0 | 2022-01-01 00:02:40.000 | 2022-01-01 00:03:10.000 | 30000 | 4 | + d0 | 2022-01-01 00:03:20.000 | 2022-01-01 00:07:10.000 | 230000 | 24 | + d0 | 2022-01-01 00:07:20.000 | 2022-01-01 00:07:50.000 | 30000 | 4 | + d1 | 2022-01-01 00:00:00.000 | 2022-01-01 00:01:30.000 | 90000 | 10 | + d1 | 2022-01-01 00:01:40.000 | 2022-01-01 00:02:30.000 | 50000 | 6 | + d1 | 2022-01-01 00:02:40.000 | 2022-01-01 00:03:10.000 | 30000 | 4 | + d1 | 2022-01-01 00:03:20.000 | 2022-01-01 00:07:10.000 | 230000 | 24 | +…… +Query OK, 500 row(s) in set (0.328557s) ``` ### 计数窗口 @@ -492,17 +505,25 @@ sliding_val 是一个常量,表示窗口滑动的数量,类似于 interval select _wstart, _wend, count(*) from meters where ts >= "2022-01-01T00:00:00+08:00" and ts < "2022-01-01T00:30:00+08:00" -count_window(10); +count_window(1000); ``` -上面的 SQL 查询超级表 meters 中时间戳大于等于 2022-01-01T00:00:00+08:00 且时间戳小于 2022-01-01T00:10:00+08:00 的数据。以每 10 条数据为一组,返回每组的开始时间、结束时间和分组条数。查询结果如下。 +上面的 SQL 查询超级表 meters 中时间戳大于等于 2022-01-01T00:00:00+08:00 且时间戳小于 2022-01-01T00:10:00+08:00 的数据。以每 1000 条数据为一组,返回每组的开始时间、结束时间和分组条数。查询结果如下: ```text -_wstart | _wend |count(*)| -=========================================================== -2021-12-31 16:00:00.000 | 2021-12-31 16:10:00.000 | 10 | -2021-12-31 16:10:00.000 | 2021-12-31 16:20:00.000 | 10 | -2021-12-31 16:20:00.000 | 2021-12-31 16:30:00.000 | 10 | + _wstart | _wend | count(*) | +===================================================================== + 2022-01-01 00:00:00.000 | 2022-01-01 00:01:30.000 | 1000 | + 2022-01-01 00:01:40.000 | 2022-01-01 00:03:10.000 | 1000 | + 2022-01-01 00:03:20.000 | 2022-01-01 00:04:50.000 | 1000 | + 2022-01-01 00:05:00.000 | 2022-01-01 00:06:30.000 | 1000 | + 2022-01-01 00:06:40.000 | 2022-01-01 00:08:10.000 | 1000 | + 2022-01-01 00:08:20.000 | 2022-01-01 00:09:50.000 | 1000 | + 2022-01-01 00:10:00.000 | 2022-01-01 00:11:30.000 | 1000 | + 2022-01-01 00:11:40.000 | 2022-01-01 00:13:10.000 | 1000 | + 2022-01-01 00:13:20.000 | 2022-01-01 00:14:50.000 | 1000 | + 2022-01-01 00:15:00.000 | 2022-01-01 00:16:30.000 | 1000 | +Query OK, 10 row(s) in set (0.062794s) ``` ## 时序数据特有函数 @@ -563,14 +584,14 @@ UNION ALL 上面的 SQL,分别查询:子表 d1 的 1 条数据,子表 d11 的 2 条数据,子表 d21 的 3 条数据,并将结果合并。返回的结果如下: ```text - tbname | ts | current | voltage | phase | -================================================================================================= - d11 | 2020-09-13 12:26:40.000 | 1.0260611 | 6 | 0.3620200 | - d11 | 2020-09-13 12:26:50.000 | 2.9544230 | 8 | 1.0048079 | - d21 | 2020-09-13 12:26:40.000 | 1.0260611 | 2 | 0.3520200 | - d21 | 2020-09-13 12:26:50.000 | 2.9544230 | 2 | 0.9948080 | - d21 | 2020-09-13 12:27:00.000 | -0.0000430 | 12 | 0.0099860 | - d1 | 2020-09-13 12:26:40.000 | 1.0260611 | 10 | 0.3520200 | + tbname | ts | current | voltage | phase | +==================================================================================== + d11 | 2020-09-13 20:26:40.000 | 11.5680809 | 247 | 146.5000000 | + d11 | 2020-09-13 20:26:50.000 | 14.2392311 | 234 | 148.0000000 | + d1 | 2020-09-13 20:26:40.000 | 11.5680809 | 247 | 146.5000000 | + d21 | 2020-09-13 20:26:40.000 | 11.5680809 | 247 | 146.5000000 | + d21 | 2020-09-13 20:26:50.000 | 14.2392311 | 234 | 148.0000000 | + d21 | 2020-09-13 20:27:00.000 | 10.0999422 | 251 | 146.0000000 | Query OK, 6 row(s) in set (0.006438s) ``` diff --git a/docs/zh/06-advanced/02-cache.md b/docs/zh/06-advanced/02-cache.md index ca1da30dbfc..065adbf50ae 100644 --- a/docs/zh/06-advanced/02-cache.md +++ b/docs/zh/06-advanced/02-cache.md @@ -57,7 +57,7 @@ TDengine 利用这些日志文件实现故障前的状态恢复。在写入 WAL - wal_fsync_period:当 wal_level 设置为 2 时,这个参数控制执行 fsync 的频率。设置为 0 表示每次写入后立即执行 fsync,这可以确保数据的安全性,但可能会牺牲一些性能。当设置为大于 0 的数值时,表示 fsync 周期,默认为 3000,范围是[1, 180000],单位毫秒。 ```sql -CREATE DATABASE POWER WAL_LEVEL 1 WAL_FSYNC_PERIOD 3000; +CREATE DATABASE POWER WAL_LEVEL 2 WAL_FSYNC_PERIOD 3000; ``` 在创建数据库时可以选择不同的参数类型,来选择性能优先或者可靠性优先。 @@ -119,4 +119,4 @@ taos> select last_row(ts,current) from meters; Query OK, 1 row(s) in set (0.046682s) ``` -可以看到查询的时延从 353/344ms 缩短到了 44ms,提升约 8 倍。 \ No newline at end of file +可以看到查询的时延从 353/344ms 缩短到了 44ms,提升约 8 倍。 diff --git a/docs/zh/06-advanced/03-stream.md b/docs/zh/06-advanced/03-stream.md index a219f86750d..c47831dde37 100644 --- a/docs/zh/06-advanced/03-stream.md +++ b/docs/zh/06-advanced/03-stream.md @@ -116,10 +116,11 @@ create stream if not exists count_history_s fill_history 1 into count_history as ### 流计算的触发模式 -在创建流时,可以通过 TRIGGER 指令指定流计算的触发模式。对于非窗口计算,流计算的触发是实时的,对于窗口计算,目前提供 3 种触发模式,默认为 WINDOW_CLOSE。 +在创建流时,可以通过 TRIGGER 指令指定流计算的触发模式。对于非窗口计算,流计算的触发是实时的,对于窗口计算,目前提供 4 种触发模式,默认为 WINDOW_CLOSE。 1. AT_ONCE:写入立即触发。 2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用)。 3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。 +4. FORCE_WINDOW_CLOSE:以操作系统当前时间为准,只计算当前关闭窗口的结果,并推送出去。窗口只会在被关闭的时刻计算一次,后续不会再重复计算。该模式当前只支持 INTERVAL 窗口(不支持滑动);FILL_HISTORY必须为 0,IGNORE EXPIRED 必须为 1,IGNORE UPDATE 必须为 1;FILL 只支持 PREV 、NULL、 NONE、VALUE。 窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。 @@ -227,4 +228,35 @@ PAUSE STREAM [IF EXISTS] stream_name; RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name; ``` -没有指定 IF EXISTS,如果该 stream 不存在,则报错。如果存在,则恢复流计算。指定了 IF EXISTS,如果 stream 不存在,则返回成功。如果存在,则恢复流计算。如果指定 IGNORE UNTREATED,则恢复流计算时,忽略流计算暂停期间写入的数据。 \ No newline at end of file +没有指定 IF EXISTS,如果该 stream 不存在,则报错。如果存在,则恢复流计算。指定了 IF EXISTS,如果 stream 不存在,则返回成功。如果存在,则恢复流计算。如果指定 IGNORE UNTREATED,则恢复流计算时,忽略流计算暂停期间写入的数据。 + +### 流计算升级故障恢复 + +升级 TDengine 后,如果流计算不兼容,需要删除流计算,然后重新创建流计算。步骤如下: + +1.修改 taos.cfg,添加 disableStream 1 + +2.重启 taosd。如果启动失败,修改 stream 目录的名称,避免 taosd 启动的时候尝试加载 stream 目录下的流计算数据信息。不使用删除操作避免误操作导致的风险。需要修改的文件夹:$dataDir/vnode/vnode*/tq/stream,$dataDir 指 TDengine 存储数据的目录,在 $dataDir/vnode/ 目录下会有多个类似 vnode1 、vnode2...vnode* 的目录,全部需要修改里面的 tq/stream 目录的名字,改为 tq/stream.bk + +3.启动 taos + +```sql +drop stream xxxx; ---- xxx 指stream name +flush database stream_source_db; ---- 流计算读取数据的超级表所在的 database +flush database stream_dest_db; ---- 流计算写入数据的超级表所在的 database +``` + +举例: + +```sql +create stream streams1 into test1.streamst as select _wstart, count(a) c1 from test.st interval(1s) ; +drop database streams1; +flush database test; +flush database test1; +``` + +4.关闭 taosd + +5.修改 taos.cfg,去掉 disableStream 1,或将 disableStream 改为 0 + +6.启动 taosd \ No newline at end of file diff --git a/docs/zh/06-advanced/05-data-in/index.md b/docs/zh/06-advanced/05-data-in/index.md index 7e5c4670105..0dfa04db56c 100644 --- a/docs/zh/06-advanced/05-data-in/index.md +++ b/docs/zh/06-advanced/05-data-in/index.md @@ -14,18 +14,26 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx ## 支持的数据源 -目前 TDengine 支持的数据源如下: - -1. Aveva PI System:一个工业数据管理和分析平台,前身为 OSIsoft PI System,它能够实时采集、整合、分析和可视化工业数据,助力企业实现智能化决策和精细化管理 -2. Aveva Historian:一个工业大数据分析软件,前身为 Wonderware Historian,专为工业环境设计,用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据。 -3. OPC DA/UA:OPC 是 Open Platform Communications 的缩写,是一种开放式、标准化的通信协议,用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发,旨在解决工业控制领域中不同设备之间互操作性差的问题。OPC 协议最初于 1996 年发布,当时称为 OPC DA (Data Access),主要用于实时数据采集和控制;2006 年,OPC 基金会发布了 OPC UA (Unified Architecture) 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本。 -4. MQTT:Message Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。 -5. Kafka:由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。 -6. OpenTSDB:基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。 -7. CSV:Comma Separated Values 的缩写,是一种以逗号分隔的纯文本文件格式,通常用于电子表格或数据库软件。 -8. TDengine 2:泛指运行 TDengine 2.x 版本的 TDengine 实例。 -9. TDengine 3:泛指运行 TDengine 3.x 版本的 TDengine 实例。 -10. MySQL, PostgreSQL, Oracle 等关系型数据库。 +目前 TDengine 支持的数据源如下表: + +| 数据源 | 支持版本 | 描述 | +| --- | --- | --- | +| Aveva PI System | PI AF Server Version 2.10.9.593 或以上 | 工业数据管理和分析平台,前身为 OSIsoft PI System,它能够实时采集、整合、分析和可视化工业数据,助力企业实现智能化决策和精细化管理 | +| Aveva Historian | AVEVA Historian 2020 RS SP1 | 工业大数据分析软件,前身为 Wonderware Historian,专为工业环境设计,用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据 | +| OPC DA | Matrikon OPC version: 1.7.2.7433 | OPC 是 Open Platform Communications 的缩写,是一种开放式、标准化的通信协议,用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发,旨在解决工业控制领域中不同设备之间互操作性差的问题;OPC 协议最初于 1996 年发布,当时称为 OPC DA (Data Access),主要用于实时数据采集和控制。 | +| OPC UA | KeepWare KEPServerEx 6.5 | 2006 年,OPC 基金会发布了 OPC UA (Unified Architecture) 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本 | +| MQTT | emqx: 3.0.0 到 5.7.1
hivemq: 4.0.0 到 4.31.0
mosquitto: 1.4.4 到 2.0.18 | Message Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。 | +| Kafka | 2.11 ~ 3.8.0 | 由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。 | +| InfluxDB | 1.7、1.8、2.0-2.7 | InfluxDB 是一种流行的开源时间序列数据库,它针对处理大量时间序列数据进行了优化。| +| OpenTSDB | 2.4.1 | 基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。 | +| MySQL | 5.6,5.7,8.0+ | MySQL是最流行的关系型数据库管理系统之一,由于其体积小、速度快、总体拥有成本低,尤其是开放源码这一特点,一般中小型和大型网站的开发都选择 MySQL 作为网站数据库。 | +| Oracle | 11G/12c/19c | Oracle 数据库系统是世界上流行的关系数据库管理系统,系统可移植性好、使用方便、功能强,适用于各类大、中、小微机环境。它是一种高效率的、可靠性好的、适应高吞吐量的数据库方案。 | +| PostgreSQL | v15.0+ | PostgreSQL 是一个功能非常强大的、源代码开放的客户/服务器关系型数据库管理系统, 有很多在大型商业RDBMS中所具有的特性,包括事务、子选择、触发器、视图、外键引用完整性和复杂锁定功能。| +| SQL Server | 2012/2022 | Microsoft SQL Server 是一种关系型数据库管理系统,由 Microsoft 公司开发,具有使用方便可伸缩性好与相关软件集成程度高等优点。 | +| MongoDB | 3.6+ | MongoDB 是一个介于关系型数据库与非关系型数据库之间的产品,被广泛应用于内容管理系统、移动应用与物联网等众多领域。 | +| CSV | - | Comma Separated Values 的缩写,是一种以逗号分隔的纯文本文件格式,通常用于电子表格或数据库软件。 | +| TDengine 2.x | 2.4 或 2.6+ | TDengine 旧版本,已不再维护,推荐升级到 3.0 最新版本。 | +| TDengine 3.x | 源端版本+ | 使用 TMQ 进行 TDengine 指定从数据库或超级表的订阅。 | ## 数据提取、过滤和转换 diff --git a/docs/zh/06-advanced/06-data-analysis/01-arima.md b/docs/zh/06-advanced/06-data-analysis/01-arima.md new file mode 100644 index 00000000000..b9d63e924f8 --- /dev/null +++ b/docs/zh/06-advanced/06-data-analysis/01-arima.md @@ -0,0 +1,54 @@ +--- +title: "ARIMA" +sidebar_label: "ARIMA" +--- + +本节讲述 ARIMA 算法模型的使用方法。 + +## 功能概述 + +ARIMA 即自回归移动平均模型(Autoregressive Integrated Moving Average, ARIMA),也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。 +ARIMA 模型是一种自回归模型,只需要自变量即可预测后续的值。ARIMA 模型要求时间序列**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。 + +>平稳的时间序列:其性质不随观测时间的变化而变化。具有趋势或季节性的时间序列不是平稳时间序列——趋势和季节性使得时间序列在不同时段呈现不同性质。 + +以下参数可以动态输入,控制预测过程中生成合适的 ARIMA 模型。 + +- p= 自回归模型阶数 +- d= 差分阶数 +- q= 移动平均模型阶数 + + +### 参数 +分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。 +|参数|说明|必填项| +|---|---|-----| +|period|输入时间序列每个周期包含的数据点个数,如果不设置该参数或该参数设置为 0,将使用非季节性/周期性的 ARIMA 模型预测|选填| +|start_p|自回归模型阶数的起始值,0 开始的整数,不推荐大于 10|选填| +|max_p|自回归模型阶数的结束值,0 开始的整数,不推荐大于 10|选填| +|start_q|移动平均模型阶数的起始值,0 开始的整数,不推荐大于 10|选填| +|max_q|移动平均模型阶数的结束值,0 开始的整数,不推荐大于 10|选填| +|d|差分阶数|选填| + +`start_p`、`max_p` `start_q` `max_q` 四个参数约束了模型在多大的范围内去搜寻合适的最优解。相同输入数据的条件下,参数范围越大,消耗的资源越多,系统响应的时间越长。 + +### 示例及结果 +针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,start_p 起始是 1, 最大拟合是 5,start_q 是 1,最大值是 5,预测结果中返回 95% 置信区间范围边界。 +``` +FORECAST(i32, "algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5") +``` + +```json5 +{ +"rows": fc_rows, // 返回结果的行数 +"period": period, // 返回结果的周期性,同输入 +"alpha": alpha, // 返回结果的置信区间,同输入 +"algo": "arima", // 返回结果使用的算法 +"mse": mse, // 拟合输入时间序列时候生成模型的最小均方误差(MSE) +"res": res // 列模式的结果 +} +``` + +### 参考文献 +- https://en.wikipedia.org/wiki/Autoregressive_moving-average_model +- https://baike.baidu.com/item/%E8%87%AA%E5%9B%9E%E5%BD%92%E6%BB%91%E5%8A%A8%E5%B9%B3%E5%9D%87%E6%A8%A1%E5%9E%8B/5023931?fromtitle=ARMA%E6%A8%A1%E5%9E%8B&fromid=8048415 diff --git a/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md b/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md new file mode 100644 index 00000000000..38662ca2b30 --- /dev/null +++ b/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md @@ -0,0 +1,43 @@ +--- +title: "HoltWinters" +sidebar_label: "HoltWinters" +--- + +本节讲述 HoltWinters 算法模型的使用方法。 + +## 功能概述 +HoltWinters 模型又称为多次指数平滑模型(EMA)。适用于含有线性趋势和周期波动的非平稳序列,利用指数平滑法让模型参数不断适应非平稳序列的变化,并对未来趋势进行**短期**预测。 +HoltWinters 有两种不同的季节性组成部分,当季节变化在该时间序列中大致保持不变时,通常选择**加法模型**;而当季节变化与时间序列的水平成比例变化时,通常选择**乘法模型**。 +该模型对于返回数据不提供计算的置信区间范围结果,在 95% 置信区间的上下界结果与预测结果相同。 + + +### 参数 + +分析平台中使用自动化的 HoltWinters 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。 +|参数|说明|必填项| +|---|---|---| +|period|输入时间序列每个周期包含的数据点个数。如果不设置该参数或该参数设置为 0,将使用一次(简单)指数平滑方式进行数据拟合,并据此进行未来数据的预测|选填| +|trend|趋势模型使用加法模型还是乘法模型|选填| +|seasonal|季节性采用加法模型还是乘法模型|选填| + +参数 `trend` 和 `seasonal`的均可以选择 `add` (加法模型)或 `mul`(乘法模型)。 + +### 示例及结果 +针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,趋势采用乘法模型,季节采用乘法模型 +``` +FORECAST(i32, "algo=holtwinters,period=10,trend=mul,seasonal=mul") +``` + +```json5 +{ +"rows": rows, // 返回结果的行数 +"period": period, // 返回结果的周期性,该结果与输入的周期性相同,如果没有周期性,该值为 0 +"algo": 'holtwinters' // 返回结果使用的计算模型 +"mse": mse, // 最小均方误差(minmum square error) +"res": res // 具体的结果,按照列形式返回的结果。一般意义上包含了两列 [timestamp][fc_results]。 +} +``` + +### 参考文献 +- https://en.wikipedia.org/wiki/Exponential_smoothing +- https://orangematter.solarwinds.com/2019/12/15/holt-winters-forecasting-simplified/ diff --git a/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md b/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md new file mode 100644 index 00000000000..bdfa455ae36 --- /dev/null +++ b/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md @@ -0,0 +1,46 @@ +--- +title: "Anomaly-detection" +sidebar_label: "Anomaly-detection" +--- + +本节讲述异常检测算法模型的使用方法。 + +## 概述 +分析平台提供了 6 种异常检查模型,6 种异常检查模型分为 3 个类别,分别属于基于统计的异常检测模型、基于数据密度的检测模型、基于深度学习的异常检测模型。在不指定异常检测使用的方法的情况下,默认调用 iqr 的方法进行计算。 + + +### 统计学异常检测方法 + +- k-sigma[1]: 即 ***68–95–99.7 rule*** 。***k***值默认为 3,即序列均值的 3 倍标准差范围为边界,超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值 K 倍标准差,则该点被视为异常点. + +|参数|说明|是否必选|默认值| +|---|---|---|---| +|k|标准差倍数|选填|3| + + +- IQR[2]:四分位距 (Interquartile range, IQR) 是一种衡量变异性的方法. 四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1(第 1 个四分位数)、Q2(第 2 个四分位数)和 Q3(第 3 个四分位数)。IQR 定义为 Q3–Q1,位于 Q3+1.5。无输入参数。 + +- Grubbs[3]: 又称为 Grubbs' test,即最大标准残差测试。Grubbs 通常用作检验最大值、最小值偏离均值的程度是否为异常,该单变量数据集遵循近似标准正态分布。非正态分布数据集不能使用该方法。无输入参数。 + +- SHESD[4]: 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常点比例的上界***k***,最差的情况是至多 49.9%。数据集的异常比例一般不超过 5% + +|参数|说明|是否必选|默认值| +|---|---|---|---| +|k|异常点在输入数据集中占比,范围是$`1\le K \le 49.9`$ |选填|5| + + +### 基于数据密度的检测方法 +LOF[5]: 局部离群因子(LOF,又叫局部异常因子)算法是 Breunig 于 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 top(n) 个点。 + + +### 基于深度学习的检测方法 +使用自动编码器的异常检测模型。可以对具有周期性的数据具有较好的检测结果。但是使用该模型需要针对输入的时序数据进行训练,同时将训练完成的模型部署到服务目录中,才能够运行与使用。 + + +### 参考文献 +1. https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule +2. https://en.wikipedia.org/wiki/Interquartile_range +3. Adikaram, K. K. L. B.; Hussein, M. A.; Effenberger, M.; Becker, T. (2015-01-14). "Data Transformation Technique to Improve the Outlier Detection Power of Grubbs's Test for Data Expected to Follow Linear Relation". Journal of Applied Mathematics. 2015: 1–9. doi:10.1155/2015/708948. +4. Hochenbaum, O. S. Vallis, and A. Kejariwal. 2017. Automatic Anomaly Detection in the Cloud Via Statistical Learning. arXiv preprint arXiv:1704.07706 (2017). +5. Breunig, M. M.; Kriegel, H.-P.; Ng, R. T.; Sander, J. (2000). LOF: Identifying Density-based Local Outliers (PDF). Proceedings of the 2000 ACM SIGMOD International Conference on Management of Data. SIGMOD. pp. 93–104. doi:10.1145/335191.335388. ISBN 1-58113-217-4. + diff --git a/docs/zh/06-advanced/06-data-analysis/addins.md b/docs/zh/06-advanced/06-data-analysis/addins.md new file mode 100644 index 00000000000..c0b89217182 --- /dev/null +++ b/docs/zh/06-advanced/06-data-analysis/addins.md @@ -0,0 +1,170 @@ +--- +title: "addins" +sidebar_label: "addins" +--- + +本节说明如何将自己开发的预测算法和异常检测算法整合到 TDengine 分析平台,并能够通过 SQL 语句进行调用。 + +## 目录结构 + +![数据分析功能架构图](./pic/dir.png) + +|目录|说明| +|---|---| +|taos|Python 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc,单元测试和集成测试目录 test。 algo 目录下 ad 放置异常检测算法代码,fc 放置预测算法代码| +|script|是安装脚本和发布脚本放置目录| +|model|放置针对数据集完成的训练模型| +|cfg|配置文件目录| + +## 约定与限制 + +定义异常检测算法的 Python 代码文件需放在 /taos/algo/ad 目录中,预测算法 Python 代码文件需要放在 /taos/algo/fc 目录中,以确保系统启动的时候能够正常加载对应目录下的 Python 文件。 + + +### 类命名规范 + +算法类的名称需要以下划线开始,以 Service 结尾。例如:_KsigmaService 是 KSigma 异常检测算法的实现类。 + +### 类继承约定 + +- 异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute` +- 预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute` + +### 类属性初始化 +每个算法实现的类需要静态初始化两个类属性,分别是: + +- `name`:触发调用的关键词,全小写英文字母 +- `desc`:算法的描述信息 + +### 核心方法输入与输出约定 + +`execute` 是算法处理的核心方法。调用该方法的时候,`self.list` 已经设置好输入数组。 + +异常检测输出结果 + +`execute` 的返回值是长度与 `self.list` 相同的数组,数组位置为 -1 的即为异常值点。例如:输入数组是 [2, 2, 2, 2, 100], 如果 100 是异常点,那么返回值是 [1, 1, 1, 1, -1]。 + +预测输出结果 + +对于预测算法,`AbstractForecastService` 的对象属性说明如下: + +|属性名称|说明|默认值| +|---|---|---| +|period|输入时间序列的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可| 0| +|start_ts|预测结果的开始时间| 0| +|time_step|预测结果的两个数据点之间时间间隔|0 | +|fc_rows|预测结果的数量| 0 | +|return_conf|预测结果中是否包含置信区间范围,如果不包含置信区间,那么上界和下界与自身相同| 1| +|conf|置信区间分位数 0.05| + + +预测返回结果如下: +```python +return { + "rows": self.fc_rows, # 预测数据行数 + "period": self.period, # 数据周期性,同输入 + "algo": "holtwinters", # 预测使用的算法 + "mse": mse, # 预测算法的 mse + "res": res # 结果数组 [时间戳数组, 预测结果数组, 预测结果执行区间下界数组,预测结果执行区间上界数组] +} +``` + + +## 示例代码 + +```python +import numpy as np +from service import AbstractAnomalyDetectionService + +# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束,如下 _IqrService 是 IQR 异常检测算法的实现类。 +class _IqrService(AbstractAnomalyDetectionService): + """ IQR algorithm 定义类,从 AbstractAnomalyDetectionService 继承,并实现 AbstractAnomalyDetectionService 类的抽象函数 """ + + # 定义算法调用关键词,全小写ASCII码(必须添加) + name = 'iqr' + + # 该算法的描述信息(建议添加) + desc = """found the anomaly data according to the inter-quartile range""" + + def __init__(self): + super().__init__() + + def execute(self): + """ execute 是算法实现逻辑的核心实现,直接修改该实现即可 """ + + # self.list 是输入数值列,list 类型,例如:[1,2,3,4,5]。设置 self.list 的方法在父类中已经进行了定义。实现自己的算法,修改该文件即可,以下代码使用自己的实现替换即可。 + #lower = np.quantile(self.list, 0.25) + #upper = np.quantile(self.list, 0.75) + + #min_val = lower - 1.5 * (upper - lower) + #max_val = upper + 1.5 * (upper - lower) + #threshold = [min_val, max_val] + + # 返回值是与输入数值列长度相同的数据列,异常值对应位置是 -1。例如上述输入数据列,返回数值列是 [1, 1, 1, 1, -1],表示 [5] 是异常值。 + return [-1 if k < threshold[0] or k > threshold[1] else 1 for k in self.list] + + + def set_params(self, params): + """该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑""" + pass +``` + + +## 单元测试 + +在测试文件目录中的 anomaly_test.py 中增加单元测试用例。 + +```python +def test_iqr(self): + """ 测试 _IqrService 类 """ + s = loader.get_service("iqr") + + # 设置需要进行检测的输入数据 + s.set_input_list(AnomalyDetectionTest.input_list) + + # 测试 set_params 的处理逻辑 + try: + s.set_params({"k": 2}) + except ValueError as e: + self.assertEqual(1, 0) + + r = s.execute() + + # 绘制异常检测结果 + draw_ad_results(AnomalyDetectionTest.input_list, r, "iqr") + + # 检查结果 + self.assertEqual(r[-1], -1) + self.assertEqual(len(r), len(AnomalyDetectionTest.input_list)) +``` + +## 需要模型的算法 + +针对特定数据集,进行模型训练的算法,在训练完成后。需要将训练得到的模型保存在 model 目录中。需要注意的是,针对每个算法,需要建立独立的文件夹。例如 auto_encoder 的训练算法在 model 目录下建立 autoencoder 的目录,使用该算法针对不同数据集训练得到的模型,均需要放置在该目录下。 + +训练完成后的模型,使用 joblib 进行保存。 + +并在 model 目录下建立对应的文件夹存放该模型。 + +保存模型的调用,可参考 encoder.py 的方式,用户通过调用 set_params 方法,并指定参数 `{"model": "ad_encoder_keras"}` 的方式,可以调用该模型进行计算。 + +具体的调用方式如下: + +```python +def test_autoencoder_ad(self): + # 获取特定的算法服务 + s = loader.get_service("ac") + data = self.__load_remote_data_for_ad() + + # 设置异常检查的输入数据 + s.set_input_list(data) + + # 指定调用的模型,该模型是之前针对该数据集进行训练获得 + s.set_params({"model": "ad_encoder_keras"}) + # 执行检查动作,并返回结果 + r = s.execute() + + num_of_error = -(sum(filter(lambda x: x == -1, r))) + self.assertEqual(num_of_error, 109) +``` + diff --git a/docs/zh/06-advanced/06-data-analysis/index.md b/docs/zh/06-advanced/06-data-analysis/index.md new file mode 100644 index 00000000000..2cbea1caba6 --- /dev/null +++ b/docs/zh/06-advanced/06-data-analysis/index.md @@ -0,0 +1,322 @@ +--- +sidebar_label: 数据分析 +title: 数据分析功能 +--- + +## 概述 + +ANode(Analysis Node)是 TDengine 提供数据分析功能的扩展组件,通过 Restful 接口提供分析服务,拓展 TDengine 的功能,支持时间序列高级分析。 +ANode 是无状态的数据分析节点,集群中可以存在多个 ANode 节点,相互之间没有关联。将 ANode 注册到 TDengine 集群以后,通过 SQL 语句即可调用并完成时序分析任务。 +下图是数据分析的技术架构示意图。 + +![数据分析功能架构图](./pic/data-analysis.png) + +## 安装部署 +### 环境准备 +ANode 要求节点上准备有 Python 3.10 及以上版本,以及相应的 Python 包自动安装组件 Pip,同时请确保能够正常连接互联网。 + +### 安装及卸载 +使用专门的 ANode 安装包 TDengine-enterprise-anode-1.x.x.tar.gz 进行 ANode 的安装部署工作,安装过程与 TDengine 的安装流程一致。 + +```bash +tar -xzvf TDengine-enterprise-anode-1.0.0.tar.gz +cd TDengine-enterprise-anode-1.0.0 +sudo ./install.sh +``` + +卸载 ANode,执行命令 `rmtaosanode` 即可。 + +### 其他 +为了避免 ANode 安装后影响目标节点现有的 Python 库。 ANode 使用 Python 虚拟环境运行,安装后的默认 Python 目录处于 `/var/lib/taos/taosanode/venv/`。为了避免反复安装虚拟环境带来的开销,卸载 ANode 并不会自动删除该虚拟环境,如果您确认不需要 Python 的虚拟环境,可以手动删除。 + +## 启动及停止服务 +安装 ANode 以后,可以使用 `systemctl` 来管理 ANode 的服务。使用如下命令可以启动/停止/检查状态。 + +```bash +systemctl start taosanoded +systemctl stop taosanoded +systemctl status taosanoded +``` + +## 目录及配置说明 +|目录/文件|说明| +|---------------|------| +|/usr/local/taos/taosanode/bin|可执行文件目录| +|/usr/local/taos/taosanode/resource|资源文件目录,链接到文件夹 /var/lib/taos/taosanode/resource/| +|/usr/local/taos/taosanode/lib|库文件目录| +|/var/lib/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model| +|/var/log/taos/taosanode/|日志文件目录| +|/etc/taos/taosanode.ini|配置文件| + +### 配置说明 + +Anode 提供的 RestFul 服务使用 uWSGI 驱动,因此 ANode 和 uWSGI 的配置信息存放在同一个配置文件中,具体如下: + +```ini +[uwsgi] +# charset +env = LC_ALL = en_US.UTF-8 + +# ip:port +http = 127.0.0.1:6050 + +# the local unix socket file than communicate to Nginx +#socket = 127.0.0.1:8001 +#socket-timeout = 10 + +# base directory +chdir = /usr/local/taos/taosanode/lib + +# initialize python file +wsgi-file = /usr/local/taos/taosanode/lib/taos/app.py + +# call module of uWSGI +callable = app + +# auto remove unix Socket and pid file when stopping +vacuum = true + +# socket exec model +#chmod-socket = 664 + +# uWSGI pid +uid = root + +# uWSGI gid +gid = root + +# main process +master = true + +# the number of worker processes +processes = 2 + +# pid file +pidfile = /usr/local/taos/taosanode/taosanode.pid + +# enable threads +enable-threads = true + +# the number of threads for each process +threads = 4 + +# memory useage report +memory-report = true + +# smooth restart +reload-mercy = 10 + +# conflict with systemctl, so do NOT uncomment this +# daemonize = /var/log/taos/taosanode/taosanode.log + +# log directory +logto = /var/log/taos/taosanode/taosanode.log + +# wWSGI monitor port +stats = 127.0.0.1:8387 + +# python virtual environment directory +virtualenv = /usr/local/taos/taosanode/venv/ + +[taosanode] +# default app log file +app-log = /var/log/taos/taosanode/taosanode.app.log + +# model storage directory +model-dir = /usr/local/taos/taosanode/model/ + +# default log level +log-level = DEBUG + +# draw the query results +draw-result = 0 +``` + +**提示** +请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而无法正常启动。 + + +## ANode 基本操作 +### 管理 ANode +#### 创建 ANode +```sql +CREATE ANODE {node_url} +``` +node_url 是提供服务的 ANode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 ANode 以后如果不注册到 TDengine 集群中,则无法提供正常的服务。不建议 ANode 注册到两个或多个集群中。 + +#### 查看 ANode +列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`。 +```sql +SHOW ANODES; +``` + +#### 查看提供的时序数据分析服务 + +```SQL +SHOW ANODES FULL; +``` + +#### 强制刷新集群中的分析算法缓存 +```SQL +UPDATE ANODE {node_id} +UPDATE ALL ANODES +``` + +#### 删除 ANode +```sql +DROP ANODE {anode_id} +``` +删除 ANode 只是将 ANode 从 TDengine 集群中删除,管理 ANode 的启停仍然需要使用`systemctl`命令。 + +### 时序数据分析功能 + +#### 白噪声检查 + +分析平台提供的 Restful 服务要求输入的时间序列不能是白噪声时间序列(White Noise Data, WND)和随机数序列 , 因此针对所有数据均默认进行白噪声检查。当前白噪声检查采用通行的 `Ljung-Box` 检验,`Ljung-Box` 统计量检查过程需要遍历整个输入序列并进行计算。 +如果用户能够明确输入序列一定不是白噪声序列,那么可以通过输入参数,指定预测之前忽略该检查,从而节省分析过程的 CPU 计算资源。 +同时支持独立地针对输入序列进行白噪声检测(该检测功能暂不独立对外开放)。 + + +#### 数据重采样和时间戳对齐 + +分析平台支持将输入数据进行重采样预处理,从而确保输出结果按照用户指定的等间隔进行处理。处理过程分为两种类别: + +- 数据时间戳对齐。由于真实数据可能并非严格按照查询指定的时间戳输入。此时分析平台会自动将数据的时间间隔按照指定的时间间隔进行对齐。例如输入时间序列 [11, 22, 29, 41],用户指定时间间隔为 10,该序列将被对齐重整为以下序列 [10, 20, 30, 40]。 +- 数据时间重采样。用户输入时间序列的采样频率超过了输出结果的频率,例如输入时间序列的采样频率是 5,输出结果的频率是 10,输入时间序列 [0, 5, 10, 15, 20, 25, 30] 将被重采用为间隔 为 10 的序列 [0, 10, 20,30],[5, 15, 25] 处的数据将被丢弃。 + +需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据 [11, 22, 29, 49],并且用户要求的时间间隔为 10,重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。 + + +#### 时序数据异常检测 +异常检测是针对输入的时序数据,使用预设或用户指定的算法确定时间序列中**可能**出现异常的时间序列点,对于时间序列中若干个连续的异常点,将自动合并成为一个连续的(闭区间)异常窗口。对于只有单个点的场景,异常窗口窗口退化成为一个起始时间和结束时间相同的点。 +异常检测生成的异常窗口受检测算法和算法参数的共同影响,对于异常窗口范围内的数据,可以应用 TDengine 提供的聚合和标量函数进行查询或变换处理。 +对于输入时间序列 (1, 20), (2, 22), (3, 91), (4, 120), (5, 18), (6, 19)。系统检测到 (3, 91), (4, 120) 为异常点,那么返回的异常窗口是闭区间 [3, 4]。 + + +##### 语法 + +```SQL +ANOMALY_WINDOW(column_name, option_expr) + +option_expr: {" +algo=expr1 +[,wncheck=1|0] +[,expr2] +"} +``` + +1. `column`:进行时序数据异常检测的输入数据列,当前只支持单列,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。 +2. `options`:字符串。其中使用 K=V 调用异常检测算法及与算法相关的参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma,k=2` 表示进行异常检测的算法是 ksigma,该算法接受的输入参数是 2。 +3. 异常检测的结果可以作为外层查询的子查询输入,在 `SELECT` 子句中使用的聚合函数或标量函数与其他类型的窗口查询相同。 +4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。 + +**参数说明** +|参数|含义|默认值| +|---|---|---| +|algo|异常检测调用的算法|iqr| +|wncheck|对输入数据列是否进行白噪声检查|取值为 0 或者 1,默认值为 1,表示进行白噪声检查| + +异常检测的返回结果以窗口形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可以使用的伪列如下: +1. `_WSTART`: 异常窗口开始时间戳 +2. `_WEND`:异常窗口结束时间戳 +3. `_WDURATION`:异常窗口持续时间 + +**示例** +```SQL +--- 使用 iqr 算法进行异常检测,检测列 i32 列。 +SELECT _wstart, _wend, SUM(i32) +FROM ai.atb +ANOMALY_WINDOW(i32, "algo=iqr"); + +--- 使用 ksigma 算法进行异常检测,输入参数 k 值为 2,检测列 i32 列 +SELECT _wstart, _wend, SUM(i32) +FROM ai.atb +ANOMALY_WINDOW(i32, "algo=ksigma,k=2"); +``` + +``` +taos> SELECT _wstart, _wend, count(*) FROM ai.atb ANOMAYL_WINDOW(i32); + _wstart | _wend | count(*) | +==================================================================== + 2020-01-01 00:00:16.000 | 2020-01-01 00:00:16.001 | 1 | +Query OK, 1 row(s) in set (0.028946s) +``` + + +**可用异常检测算法** +- iqr +- ksigma +- grubbs +- lof +- shesd +- tac + + +#### 时序数据预测 +数据预测以一段训练数据作为输入,预测接下来一个连续时间区间内,时序数据的趋势。 + +##### 语法 +```SQL +FORECAST(column_expr, option_expr) + +option_expr: {" +algo=expr1 +[,wncheck=1|0] +[,conf=conf_val] +[,every=every_val] +[,rows=rows_val] +[,start=start_ts_val] +[,expr2] +"} + +``` +1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型输入。 +2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测还支持 `conf`, `every`, `rows`, `start`, `rows` 几个参数,其含义如下: + +**参数说明** + +|参数|含义|默认值| +|---|---|---| +|algo|预测分析使用的算法|holtwinters| +|wncheck|白噪声(white noise data)检查|默认值为 1,0 表示不进行检查| +|conf|预测数据的置信区间范围 ,取值范围 [0, 100]|95| +|every|预测数据的采样间隔|输入数据的采样间隔| +|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样时间段| +|rows|预测结果的记录数|10| + +1. 预测查询结果新增了三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果 +2. 更改参数 `START`:返回预测结果的起始时间,改变起始时间不会影响返回的预测数值,只影响起始时间。 +3. `EVERY`:可以与输入数据的采样频率不同。采样频率只能低于或等于输入数据采样频率,不能**高于**输入数据的采样频率。 +4. 对于某些不需要计算置信区间的算法,即使指定了置信区间,返回的结果中其上下界退化成为一个点。 + +**示例** + +```SQL +--- 使用 arima 算法进行预测,预测结果是 10 条记录(默认值),数据进行白噪声检查,默认置信区间 95%. +SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima") +FROM ai.ftb; + +--- 使用 arima 算法进行预测,输入数据的是周期数据,每 10 个采样点是一个周期。返回置信区间是 95%. +SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10") +FROM ai.ftb; +``` +``` +taos> select _flow, _fhigh, _frowts, forecast(i32) from ai.ftb; + _flow | _fhigh | _frowts | forecast(i32) | +======================================================================================== + 10.5286684 | 41.8038254 | 2020-01-01 00:01:35.001 | 26 | + -21.9861946 | 83.3938904 | 2020-01-01 00:01:36.001 | 30 | + -78.5686035 | 144.6729126 | 2020-01-01 00:01:37.001 | 33 | + -154.9797363 | 230.3057709 | 2020-01-01 00:01:38.001 | 37 | + -253.9852905 | 337.6083984 | 2020-01-01 00:01:39.001 | 41 | + -375.7857971 | 466.4594727 | 2020-01-01 00:01:40.001 | 45 | + -514.8043823 | 622.4426270 | 2020-01-01 00:01:41.001 | 53 | + -680.6343994 | 796.2861328 | 2020-01-01 00:01:42.001 | 57 | + -868.4956665 | 992.8603516 | 2020-01-01 00:01:43.001 | 62 | + -1076.1566162 | 1214.4498291 | 2020-01-01 00:01:44.001 | 69 | +``` + + +**可用预测算法** +- arima +- holtwinters diff --git a/docs/zh/06-advanced/06-data-analysis/pic/data-analysis.png b/docs/zh/06-advanced/06-data-analysis/pic/data-analysis.png new file mode 100644 index 00000000000..44fd82832f3 Binary files /dev/null and b/docs/zh/06-advanced/06-data-analysis/pic/data-analysis.png differ diff --git a/docs/zh/06-advanced/06-data-analysis/pic/dir.png b/docs/zh/06-advanced/06-data-analysis/pic/dir.png new file mode 100644 index 00000000000..d5aafb44274 Binary files /dev/null and b/docs/zh/06-advanced/06-data-analysis/pic/dir.png differ diff --git a/docs/zh/08-operation/02-planning.md b/docs/zh/08-operation/02-planning.md index 66da1df8bfa..89f4c42fe81 100644 --- a/docs/zh/08-operation/02-planning.md +++ b/docs/zh/08-operation/02-planning.md @@ -53,7 +53,7 @@ M = (T × S × 3 + (N / 4096) + 100) 与 WebSocket 连接方式相比,RESTful 连接方式在内存占用上更大,除了缓冲区所需的内存以外,还需要考虑每个连接响应结果的内存开销。这种内存开销与响应结果的JSON 数据大小密切相关,特别是在查询数据量很大时,会占用大量内存。 -由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议打开batchfetch=true 选项,以启用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险 +由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议使用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险 **注意** - 建议采用 RESTful/WebSocket 连接方式来访问 TDengine 集群,而不采用taosc 原生连接方式。 @@ -146,11 +146,17 @@ TDengine 的多级存储功能在使用上还具备以下优点。 下表列出了 TDengine 的一些接口或组件的常用端口,这些端口均可以通过配置文件中的参数进行修改。 -|接口或组件 | 端口 | -|:---------------------------:|:---------:| -|原生接口(taosc) | 6030 | -|RESTful 接口 | 6041 | -|WebSocket 接口 |6041 | -|taosKeeper | 6043 | -|taosX | 6050, 6055 | -|taosExplorer | 6060 | \ No newline at end of file +| 接口或组件名称 | 端口 | 协议 | +|:-----------------------------------------:|:----------:|:--------:| +| 原生接口(taosc) | 6030 | TCP | +| RESTful 接口 | 6041 | TCP | +| WebSocket 接口 | 6041 | TCP | +| taosKeeper | 6043 | TCP | +| statsd 格式写入接口 | 6044 | TCP/UDP | +| collectd 格式写入接口 | 6045 | TCP/UDP | +| openTSDB Telnet 格式写入接口 | 6046 | TCP | +| collectd 使用 openTSDB Telnet 格式写入接口 | 6047 | TCP | +| icinga2 使用 openTSDB Telnet 格式写入接口 | 6048 | TCP | +| tcollector 使用 openTSDB Telnet 格式写入接口 | 6049 | TCP | +| taosX | 6050, 6055 | TCP | +| taosExplorer | 6060 | TCP | diff --git a/docs/zh/08-operation/03-deployment.md b/docs/zh/08-operation/03-deployment.md index 2e0c2a79893..e549e8613d1 100644 --- a/docs/zh/08-operation/03-deployment.md +++ b/docs/zh/08-operation/03-deployment.md @@ -368,6 +368,18 @@ spec: labels: app: "tdengine" spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - tdengine + topologyKey: kubernetes.io/hostname containers: - name: "tdengine" image: "tdengine/tdengine:3.2.3.0" @@ -837,4 +849,4 @@ Helm 管理下,清理操作也变得简单: helm uninstall tdengine ``` -但 Helm 也不会自动移除 PVC,需要手动获取 PVC 然后删除掉。 \ No newline at end of file +但 Helm 也不会自动移除 PVC,需要手动获取 PVC 然后删除掉。 diff --git a/docs/zh/08-operation/12-multi.md b/docs/zh/08-operation/12-multi.md index a5608ad5fad..1e81a7ff1e3 100644 --- a/docs/zh/08-operation/12-multi.md +++ b/docs/zh/08-operation/12-multi.md @@ -76,7 +76,7 @@ dataDir /mnt/data6 2 0 |s3UploadDelaySec | data 文件持续多长时间不再变动后上传至 s3,单位:秒。最小值:1;最大值:2592000 (30天),默认值 60 秒 | |s3PageCacheSize |s3 page cache 缓存页数目,单位:页。最小值:4;最大值:1024*1024\*1024。 ,默认值 4096| |s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600 | -|s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 1,表示开启自动 S3 迁移,可配置为 0。 | +|s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1。 | ### 检查配置参数可用性 @@ -108,9 +108,37 @@ s3migrate database ; | # | 参数 | 默认值 | 最小值 | 最大值 | 描述 | | :--- | :----------- | :----- | :----- | :------ | :----------------------------------------------------------- | -| 1 | s3_keeplocal | 3650 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位:天,支持 m(分钟)、h(小时)和 d(天)三个单位 | -| 2 | s3_chunksize | 262144 | 131072 | 1048576 | 上传对象的大小阈值,与 TSDB_PAGESIZE 参数一样,不可修改,单位为 TSDB 页 | -| 3 | s3_compact | 0 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 | +| 1 | s3_keeplocal | 365 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位:天,支持 m(分钟)、h(小时)和 d(天)三个单位 | +| 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 | +| 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 | + +### 对象存储读写次数估算 + +对象存储服务的使用成本与存储的数据量及请求次数相关,下面分别介绍数据的上传及下载过程。 + +#### 数据上传 + +当 TSDB 时序数据超过 `s3_keeplocal` 参数指定的时间,相关的数据文件会被切分成多个文件块,每个文件块的默认大小是 512M 字节 (`s3_chunkpages * tsdb_pagesize`)。除了最后一个文件块保留在本地文件系统外,其余的文件块会被上传到对象存储服务。 + +```math +上传次数 = 数据文件大小 / (s3_chunkpages * tsdb_pagesize) - 1 +``` + +在创建数据库时,可以通过 `s3_chunkpages` 参数调整每个文件块的大小,从而控制每个数据文件的上传次数。 + +其它类型的文件如 head, stt, sma 等,保留在本地文件系统,以加速预计算相关查询。 + +#### 数据下载 + +在查询操作中,如果需要访问对象存储中的数据,TSDB 不会下载整个数据文件,而是计算所需数据在文件中的位置,只下载相应的数据到 TSDB 页缓存中,然后将数据返回给查询执行引擎。后续查询首先检查页缓存,查看数据是否已被缓存。如果数据已缓存,则直接使用缓存中的数据,而无需重复从对象存储下载,从而有效降低从对象存储下载数据的次数。 + +相邻的多个数据页会作为一个数据块从对象存储下载一次,以减少从对象存储下载的次数。每个数据页的大小,在创建数据库时,通过 `tsdb_pagesize` 参数指定,默认 4K 字节。 + +```math +下载次数 = 查询需要的数据块数量 - 已缓存的数据块数量 +``` + +页缓存是内存缓存,节点重启后,再次查询需要重新下载数据。缓存采用 LRU (Least Recently Used) 策略,当缓存空间不足时,最近最少使用的数据将被淘汰。缓存的大小可以通过 `s3PageCacheSize` 参数进行调整,通常来说,缓存越大,下载次数越少。 ## Azure Blob 存储 本节介绍在 TDengine Enterprise 如何使用微软 Azure Blob 对象存储。本功能是上一小节‘对象存储’功能的扩展,需额外依赖 Flexify 服务提供的 S3 网关。通过适当的参数配置,可以把大部分较冷的时序数据存储到 Azure Blob 服务中。 @@ -135,3 +163,15 @@ s3BucketName td-test - 认为全部 S3 服务均指向同一数据源,对各个 S3 服务操作完全等价 - 在某一 S3 服务上操作失败后会切换至其他服务,全部服务都失败后将返回最后产生的错误码 - 最大支持的 S3 服务配置数为 10 + +### 不依赖 Flexify 服务 + +用户界面同 S3,不同的地方在于下面三个参数的配置: + +| # | 参数 | 示例值 | 描述 | +| :--- | :----------- | :--------------------------------------- | :----------------------------------------------------------- | +| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL | +| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey | +| 3 | s3BucketName | test-container | Container name | + +其中 fd2d01c73 是账户 ID;微软 Blob 存储服务只支持 Https 协议,不支持 Http。 diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index fbf086bf6bc..89a97b108d0 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -33,7 +33,7 @@ taosd 命令行参数如下 | secondEp | taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,缺省值:无 | | fqdn | 启动 taosd 后所监听的服务地址,缺省值:所在服务器上配置的第一个 hostname | | serverPort | 启动 taosd 后所监听的端口,缺省值:6030 | -| numOfRpcSessions | 允许一个客户端能创建的最大连接数,取值范围 100-100000,缺省值:30000 | +| numOfRpcSessions | 允许一个 dnode 能发起的最大连接数,取值范围 100-100000,缺省值:30000 | | timeToGetAvailableConn | 获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值:500000 | ### 监控相关 @@ -156,14 +156,14 @@ charset 的有效值是 UTF-8。 ### 内存相关 | 参数名称 | 参数说明 | | :----------------: | :---------------------------------------------: | -| rpcQueueMemoryAllowed | 一个 dnode 允许的 rpc 消息占用的内存最大值,单位 bytes,取值范围:10485760-INT64_MAX,缺省值:服务器内存的 1/10 | -| syncLogBufferMemoryAllowed | 一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围:10485760-INT64_MAX,缺省值:服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 | +| rpcQueueMemoryAllowed | 一个 dnode 允许的 rpc 消息占用的内存最大值,单位 bytes,取值范围:104857600-INT64_MAX,缺省值:服务器内存的 1/10 | +| syncLogBufferMemoryAllowed | 一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围:104857600-INT64_MAX,缺省值:服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 | ### 性能调优 | 参数名称 | 参数说明 | | :----------------: | :---------------------------------------------: | -| numOfCommitThreads | 写入线程的最大数量,取值范围 0-1024,缺省值为 4 | +| numOfCommitThreads | 落盘线程的最大数量,取值范围 0-1024,缺省值为 4 | ### 日志相关 @@ -180,6 +180,7 @@ charset 的有效值是 UTF-8。 | tmrDebugFlag | 定时器模块的日志开关,取值范围同上 | | uDebugFlag | 共用功能模块的日志开关,取值范围同上 | | rpcDebugFlag | rpc 模块的日志开关,取值范围同上 | +| cDebugFlag | 客户端模块的日志开关,取值范围同上 | | jniDebugFlag | jni 模块的日志开关,取值范围同上 | | qDebugFlag | query 模块的日志开关,取值范围同上 | | dDebugFlag | dnode 模块的日志开关,取值范围同上,缺省值 135 | @@ -223,16 +224,16 @@ lossyColumns float|double | 参数名称 | 参数说明 | | :--------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| enableCoreFile | crash 时是否生成 core 文件;0: 不生成,1:生成;默认值 为 1; 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下
2、手动启动,就在 taosd 执行目录下。 | -| udf | 是否启动 UDF 服务;0: 不启动,1:启动;默认值 为 0 | -| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变 ;默认值 为 | -| tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000;缺省值 为20 | -| maxTsmaNum | 集群内可创建的TSMA个数;取值范围:0-3;缺省值: 3 | +| enableCoreFile | crash 时是否生成 core 文件;0: 不生成,1:生成;默认值为 1; 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下
2、手动启动,就在 taosd 执行目录下。 | +| udf | 是否启动 UDF 服务;0: 不启动,1:启动;默认值为 0 | +| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变;默认值为 0 | +| tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000;缺省值为20 | +| maxTsmaNum | 集群内可创建的TSMA个数;取值范围:0-3;缺省值为 3 | ## taosd 监控指标 -taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。 +taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。 ### taosd\_cluster\_basic 表 @@ -458,4 +459,4 @@ TDengine 的日志文件主要包括普通日志和慢日志两种类型。 3. 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。 4. 慢日志文件不自动删除,不压缩。 5. 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLines,logKeepDays 不适用于慢日志。 - \ No newline at end of file + diff --git a/docs/zh/14-reference/01-components/02-taosc.md b/docs/zh/14-reference/01-components/02-taosc.md index 5f22ebe8d55..bd1e700041e 100755 --- a/docs/zh/14-reference/01-components/02-taosc.md +++ b/docs/zh/14-reference/01-components/02-taosc.md @@ -10,7 +10,7 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在 | 参数名称 | 参数含义 | |:-----------:|:----------------------------------------------------------:| -|firstEp | taos 启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:${hostname}:6030,若无法获取 ${hostname},则赋值为 localhost | +|firstEp | taos 启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:hostname:6030,若无法获取该服务器的 hostname,则赋值为 localhost | |secondEp | 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,没有缺省值 | |numOfRpcSessions | 一个客户端能创建的最大连接数,取值范围:10-50000000(单位为毫秒);缺省值:500000 | |telemetryReporting | 是否上传 telemetry,0: 不上传,1: 上传;缺省值:1 | @@ -35,6 +35,7 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在 |smlAutoChildTableNameDelimiter | schemaless tag之间的连接符,连起来作为子表名,无缺省值 | |smlTagName | schemaless tag 为空时默认的 tag 名字, 缺省值 "_tag_null" | |smlTsDefaultName | schemaless自动建表的时间列名字通过该配置设置, 缺省值 "_ts" | +|smlDot2Underline | schemaless 把超级表名中的 dot 转成下划线 | |enableCoreFile | crash 时是否生成 core 文件,0: 不生成, 1: 生成;缺省值:1 | |enableScience | 是否开启科学计数法显示浮点数; 0: 不开始, 1: 开启;缺省值:1 | |compressMsgSize | 是否对 RPC 消息进行压缩; -1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩; 缺省值 -1| diff --git a/docs/zh/14-reference/01-components/06-taoskeeper.md b/docs/zh/14-reference/01-components/06-taoskeeper.md index c3d22d25f18..00b1f1ee51c 100644 --- a/docs/zh/14-reference/01-components/06-taoskeeper.md +++ b/docs/zh/14-reference/01-components/06-taoskeeper.md @@ -13,7 +13,7 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的 taosKeeper 有两种安装方式: -- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](../../../get-started/)。 +- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[TDengine 安装](../../../get-started/)。 - 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。 @@ -22,55 +22,64 @@ taosKeeper 有两种安装方式: taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:命令行参数、环境变量 和 配置文件。优先级为:命令行参数、环境变量、配置文件参数。 一般我们推荐使用配置文件。 ### 命令行参数和环境变量 + 命令行参数 和 环境变量说明可以参考命令 `taoskeeper --help` 的输出。下面是一个例子: + ```shell -Usage of taosKeeper v3.3.2.0: - --debug enable debug mode. Env "TAOS_KEEPER_DEBUG" - -P, --port int http port. Env "TAOS_KEEPER_PORT" (default 6043) - --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_KEEPER_LOG_LEVEL" (default "info") - --gopoolsize int coroutine size. Env "TAOS_KEEPER_POOL_SIZE" (default 50000) - -R, --RotationInterval string interval for refresh metrics, such as "300ms", Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Env "TAOS_KEEPER_ROTATION_INTERVAL" (default "15s") - --tdengine.host string TDengine server's ip. Env "TAOS_KEEPER_TDENGINE_HOST" (default "127.0.0.1") - --tdengine.port int TDengine REST server(taosAdapter)'s port. Env "TAOS_KEEPER_TDENGINE_PORT" (default 6041) - --tdengine.username string TDengine server's username. Env "TAOS_KEEPER_TDENGINE_USERNAME" (default "root") - --tdengine.password string TDengine server's password. Env "TAOS_KEEPER_TDENGINE_PASSWORD" (default "taosdata") - --tdengine.usessl TDengine server use ssl or not. Env "TAOS_KEEPER_TDENGINE_USESSL" - --metrics.prefix string prefix in metrics names. Env "TAOS_KEEPER_METRICS_PREFIX" - --metrics.database.name string database for storing metrics data. Env "TAOS_KEEPER_METRICS_DATABASE" (default "log") - --metrics.tables stringArray export some tables that are not super table, multiple values split with white space. Env "TAOS_KEEPER_METRICS_TABLES" - --environment.incgroup whether running in cgroup. Env "TAOS_KEEPER_ENVIRONMENT_INCGROUP" - --log.path string log path. Env "TAOS_KEEPER_LOG_PATH" (default "/var/log/taos") - --log.rotationCount uint log rotation count. Env "TAOS_KEEPER_LOG_ROTATION_COUNT" (default 5) - --log.rotationTime duration log rotation time. Env "TAOS_KEEPER_LOG_ROTATION_TIME" (default 24h0m0s) - --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_ROTATION_SIZE" (default "100000000") - -c, --config string config path default /etc/taos/taoskeeper.toml - -V, --version Print the version and exit - -h, --help Print this help message and exit +Usage of taoskeeper v3.3.3.0: + -R, --RotationInterval string interval for refresh metrics, such as "300ms", Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Env "TAOS_KEEPER_ROTATION_INTERVAL" (default "15s") + -c, --config string config path default /etc/taos/taoskeeper.toml + --drop string run taoskeeper in command mode, only support old_taosd_metric_stables. + --environment.incgroup whether running in cgroup. Env "TAOS_KEEPER_ENVIRONMENT_INCGROUP" + --fromTime string parameter of transfer, example: 2020-01-01T00:00:00+08:00 (default "2020-01-01T00:00:00+08:00") + --gopoolsize int coroutine size. Env "TAOS_KEEPER_POOL_SIZE" (default 50000) + -h, --help Print this help message and exit + --instanceId int instance ID. Env "TAOS_KEEPER_INSTANCE_ID" (default 64) + --log.compress whether to compress old log. Env "TAOS_KEEPER_LOG_COMPRESS" + --log.keepDays uint log retention days, must be a positive integer. Env "TAOS_KEEPER_LOG_KEEP_DAYS" (default 30) + --log.level string log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL" (default "info") + --log.path string log path. Env "TAOS_KEEPER_LOG_PATH" (default "/var/log/taos") + --log.reservedDiskSize string reserved disk size for log dir (KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_RESERVED_DISK_SIZE" (default "1GB") + --log.rotationCount uint log rotation count. Env "TAOS_KEEPER_LOG_ROTATION_COUNT" (default 5) + --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_ROTATION_SIZE" (default "1GB") + --log.rotationTime duration deprecated: log rotation time always 24 hours. Env "TAOS_KEEPER_LOG_ROTATION_TIME" (default 24h0m0s) + --logLevel string log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL" (default "info") + --metrics.database.name string database for storing metrics data. Env "TAOS_KEEPER_METRICS_DATABASE" (default "log") + --metrics.database.options.buffer int database option buffer for audit database. Env "TAOS_KEEPER_METRICS_BUFFER" (default 64) + --metrics.database.options.cachemodel string database option cachemodel for audit database. Env "TAOS_KEEPER_METRICS_CACHEMODEL" (default "both") + --metrics.database.options.keep int database option buffer for audit database. Env "TAOS_KEEPER_METRICS_KEEP" (default 90) + --metrics.database.options.vgroups int database option vgroups for audit database. Env "TAOS_KEEPER_METRICS_VGROUPS" (default 1) + --metrics.prefix string prefix in metrics names. Env "TAOS_KEEPER_METRICS_PREFIX" + --metrics.tables stringArray export some tables that are not super table, multiple values split with white space. Env "TAOS_KEEPER_METRICS_TABLES" + -P, --port int http port. Env "TAOS_KEEPER_PORT" (default 6043) + --tdengine.host string TDengine server's ip. Env "TAOS_KEEPER_TDENGINE_HOST" (default "127.0.0.1") + --tdengine.password string TDengine server's password. Env "TAOS_KEEPER_TDENGINE_PASSWORD" (default "taosdata") + --tdengine.port int TDengine REST server(taosAdapter)'s port. Env "TAOS_KEEPER_TDENGINE_PORT" (default 6041) + --tdengine.username string TDengine server's username. Env "TAOS_KEEPER_TDENGINE_USERNAME" (default "root") + --tdengine.usessl TDengine server use ssl or not. Env "TAOS_KEEPER_TDENGINE_USESSL" + --transfer string run taoskeeper in command mode, only support old_taosd_metric. transfer old metrics data to new tables and exit + -V, --version Print the version and exit ``` - - ### 配置文件 -taosKeeper 支持用 `taoskeeper -c ` 命令来指定配置文件。 -若不指定配置文件,taosKeeper 会使用默认配置文件,其路径为: `/etc/taos/taoskeeper.toml` 。 +taosKeeper 支持用 `taoskeeper -c ` 命令来指定配置文件。 +若不指定配置文件,taosKeeper 会使用默认配置文件,其路径为: `/etc/taos/taoskeeper.toml` 。 若既不指定 taosKeeper 配置文件,且 `/etc/taos/taoskeeper.toml` 也不存在,将使用默认配置。 **下面是配置文件的示例:** + ```toml -# Start with debug middleware for gin -debug = false +# The ID of the currently running taoskeeper instance, default is 64. +instanceId = 64 -# Listen port, default is 6043 +# Listening port, default is 6043. port = 6043 -# log level -loglevel = "info" - -# go pool size +# Go pool size gopoolsize = 50000 -# interval for metrics +# Interval for metrics RotationInterval = "15s" [tdengine] @@ -81,20 +90,21 @@ password = "taosdata" usessl = false [metrics] -# metrics prefix in metrics names. +# Metrics prefix in metrics names. prefix = "taos" -# export some tables that are not super table +# Export some tables that are not super table. tables = [] -# database for storing metrics data +# Database for storing metrics data. [metrics.database] name = "log" -# database options for db storing metrics data + +# Database options for db storing metrics data. [metrics.database.options] vgroups = 1 buffer = 64 -KEEP = 90 +keep = 90 cachemodel = "both" [environment] @@ -102,9 +112,19 @@ cachemodel = "both" incgroup = false [log] -rotationCount = 5 -rotationTime = "24h" -rotationSize = 100000000 +# The directory where log files are stored. +# path = "/var/log/taos" +level = "info" +# Number of log file rotations before deletion. +rotationCount = 30 +# The number of days to retain log files. +keepDays = 30 +# The maximum size of a log file before rotation. +rotationSize = "1GB" +# If set to true, log files will be compressed. +compress = false +# Minimum disk space to reserve. Log files will not be written if disk space falls below this limit. +reservedDiskSize = "1GB" ``` ## 启动 @@ -118,7 +138,6 @@ monitorFqdn localhost # taoskeeper 服务的 FQDN TDengine 监控配置相关,具体请参考:[TDengine 监控配置](../../../operation/monitor)。 - @@ -188,8 +207,7 @@ Active: inactive (dead) - -## 健康检查 +## 健康检查 可以访问 taosKeeper 的 `check_health` 接口来判断服务是否存活,如果服务正常则会返回 HTTP 200 状态码: @@ -208,7 +226,6 @@ Content-Length: 21 {"version":"3.3.2.3"} ``` - ## 数据收集与监控 taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产生的监控数据记录在指定数据库中(默认的监控数据是 `log`),这些监控数据可以用来配置 TDengine 监控。 @@ -216,6 +233,7 @@ taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产 ### 查看监控数据 可以查看 `log` 库下的超级表,每个超级表都对应一组监控指标,具体指标不再赘述。 + ```shell taos> use log; Database changed. @@ -251,17 +269,14 @@ taos> select last_row(*) from taosd_dnodes_info; Query OK, 1 row(s) in set (0.003168s) ``` - ### 使用 TDInsight 配置监控 -收集到监控数据以后,就可以使用 TDInsight 来配置 TDengine 的监控,具体请参考 [TDinsight 参考手册](../tdinsight/) - +收集到监控数据以后,就可以使用 TDInsight 来配置 TDengine 的监控,具体请参考 [TDinsight 参考手册](../tdinsight/)。 ## 集成 Prometheus taoskeeper 提供了 `/metrics` 接口,返回了 Prometheus 格式的监控数据,Prometheus 可以从 taoskeeper 抽取监控数据,实现通过 Prometheus 监控 TDengine 的目的。 - ### 导出监控指标 下面通过 `curl` 命令展示 `/metrics` 接口返回的数据格式: @@ -298,9 +313,11 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 #### taosd 集群 ##### 监控信息支持的标签 + - `cluster_id`: 集群 id ##### 相关指标及其含义 + | 指标名称 | 类型 | 含义 | | ----------------------------------- | ------- | ------------------------------------- | | taos_cluster_info_connections_total | counter | 总连接数 | @@ -328,11 +345,13 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 #### dnode ##### 监控信息支持的标签 + - `cluster_id`: 集群 id - `dnode_ep`: dnode 端点 - `dnode_id`:dnode id ##### 相关指标及其含义 + | 指标名称 | 类型 | 含义 | | ------------------------------ | ------- | ---------------------------------------------------------------------------------------- | | taos_d_info_status | gauge | dnode 状态,标签 value 表示状态, ready 表示正常, offline 表示下线, unknown 表示未知。 | @@ -361,13 +380,15 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 #### 数据目录 ##### 监控信息支持的标签 + - `cluster_id`: 集群 id - `dnode_ep`: dnode 端点 - `dnode_id`:dnode id - `data_dir_name`:数据目录名 -- `data_dir_level`:数据目录级别 +- `data_dir_level`:数据目录级别 ##### 相关指标及其含义 + | 指标名称 | 类型 | 含义 | | --------------------------------- | ----- | -------------------- | | taos_taosd_dnodes_data_dirs_avail | gauge | 可用空间(单位 Byte) | @@ -377,12 +398,14 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 #### 日志目录 ##### 监控信息支持的标签 + - `cluster_id`: 集群 id - `dnode_ep`: dnode 端点 - `dnode_id`:dnode id - `log_dir_name`:日志目录名 ##### 相关指标及其含义 + | 指标名称 | 类型 | 含义 | | -------------------------------- | ----- | -------------------- | | taos_taosd_dnodes_log_dirs_avail | gauge | 可用空间(单位 Byte) | @@ -392,11 +415,13 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 #### 日志数量 ##### 监控信息支持的标签 + - `cluster_id`: 集群 id - `dnode_ep`: dnode 端点 - `dnode_id`:dnode id ##### 相关指标及其含义 + | 指标名称 | 类型 | 含义 | | ---------------------- | ------- | ------------ | | taos_log_summary_debug | counter | 调试日志数量 | @@ -404,14 +429,15 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 | taos_log_summary_info | counter | 信息日志数量 | | taos_log_summary_trace | counter | 跟踪日志数量 | - #### taosadapter ##### 监控信息支持的标签 + - `endpoint`:端点 - `req_type`:请求类型,0 表示 rest,1 表示 websocket ##### 相关指标及其含义 + | 指标名称 | 类型 | 含义 | | -------------------------------------- | ------- | -------------------- | | taos_adapter_requests_fail | counter | 失败的请求数 | @@ -433,9 +459,11 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 #### taoskeeper ##### 监控信息支持的标签 + - `identify`: 节点 endpoint ##### 相关指标及其含义 + | 指标名称 | 类型 | 含义 | | ----------------------- | ----- | ------------------------------------- | | taos_keeper_monitor_cpu | gauge | taoskeeper CPU 使用率(取值范围 0~1) | @@ -444,6 +472,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 #### 其他 taosd 集群监控项 ##### taos_m_info_role + - **标签**: - `cluster_id`: 集群 id - `mnode_ep`: mnode 端点 @@ -453,6 +482,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 - **含义**: mnode 角色 ##### taos_taos_sql_req_count + - **标签**: - `cluster_id`: 集群 id - `result`: 请求结果(取值范围: Success, Failed) @@ -462,6 +492,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 - **含义**: SQL 请求数量 ##### taos_taosd_sql_req_count + - **标签**: - `cluster_id`: 集群 id - `dnode_ep`: dnode 端点 @@ -474,6 +505,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 - **含义**: SQL 请求数量 ##### taos_taosd_vgroups_info_status + - **标签**: - `cluster_id`: 集群 id - `database_name`: 数据库名称 @@ -482,6 +514,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 - **含义**: 虚拟组状态。 0 为 unsynced,表示没有leader选出;1 为 ready。 ##### taos_taosd_vgroups_info_tables_num + - **标签**: - `cluster_id`: 集群 id - `database_name`: 数据库名称 @@ -490,6 +523,7 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 - **含义**: 虚拟组表数量 ##### taos_taosd_vnodes_info_role + - **标签**: - `cluster_id`: 集群 id - `database_name`: 数据库名称 @@ -499,7 +533,6 @@ taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1 - **类型**: gauge - **含义**: 虚拟节点角色 - ### 抽取配置 Prometheus 提供了 `scrape_configs` 配置如何从 endpoint 抽取监控数据,通常只需要修改 `static_configs` 中的 targets 配置为 taoskeeper 的 endpoint 地址,更多配置信息请参考 [Prometheus 配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)。 @@ -521,8 +554,6 @@ scrape_configs: 在 Grafana Dashboard 菜单点击 `import`,dashboard ID 填写 `18587`,点击 `Load` 按钮即可导入 `TaosKeeper Prometheus Dashboard for 3.x` dashboard。 - - ## taosKeeper 监控指标 taosKeeper 也会将自己采集的监控数据写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。 diff --git a/docs/zh/14-reference/01-components/07-explorer.md b/docs/zh/14-reference/01-components/07-explorer.md index 499fb3697c8..eab4aef15b3 100644 --- a/docs/zh/14-reference/01-components/07-explorer.md +++ b/docs/zh/14-reference/01-components/07-explorer.md @@ -8,7 +8,7 @@ taosExplorer 是一个为用户提供 TDengine 实例的可视化管理交互工 ## 安装 -taosEexplorer 无需单独安装,从 TDengine 3.3.0.0 版本开始,它随着 TDengine 安装包一起发布,安装完成后,就可以看到 `taos-explorer` 服务。如果按照 GitHub 里步骤自己编译 TDengine 源代码生成的安装包不包含 taosExplorer。 +taosExplorer 无需单独安装,从 TDengine 3.3.0.0 版本开始,它随着 TDengine 安装包一起发布,安装完成后,就可以看到 `taos-explorer` 服务。如果按照 GitHub 里步骤自己编译 TDengine 源代码生成的安装包不包含 taosExplorer。 ## 配置 diff --git a/docs/zh/14-reference/02-tools/10-taosbenchmark.md b/docs/zh/14-reference/02-tools/10-taosbenchmark.md index 3c43c589151..d6552905778 100644 --- a/docs/zh/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/zh/14-reference/02-tools/10-taosbenchmark.md @@ -364,6 +364,8 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **max** : 数据类型的 列/标签 的最大值。生成的值将小于最小值。 +- **scalingFactor** : 浮点数精度增强因子,仅当数据类型是 float/double 时生效,有效值范围为 1 至 1000000 的正整数。用于增强生成浮点数的精度,特别是在 min 或 max 值较小的情况下。此属性按 10 的幂次增强小数点后的精度:scalingFactor 为 10 表示增强 1 位小数精度,100 表示增强 2 位,依此类推。 + - **fun** : 此列数据以函数填充,目前只支持 sin 和 cos 两函数,输入参数为时间戳换算成角度值,换算公式: 角度 x = 输入的时间列ts值 % 360。同时支持系数调节,随机波动因子调节,以固定格式的表达式展现,如 fun=“10\*sin(x)+100\*random(5)” , x 表示角度,取值 0 ~ 360度,增长步长与时间列步长一致。10 表示乘的系数,100 表示加或减的系数,5 表示波动幅度在 5% 的随机范围内。目前支持的数据类型为 int, bigint, float, double 四种数据类型。注意:表达式为固定模式,不可前后颠倒。 - **values** : nchar/binary 列/标签的值域,将从值中随机选择。 diff --git a/docs/zh/14-reference/03-taos-sql/02-database.md b/docs/zh/14-reference/03-taos-sql/02-database.md index 7d040a2c44f..91b39976a1a 100644 --- a/docs/zh/14-reference/03-taos-sql/02-database.md +++ b/docs/zh/14-reference/03-taos-sql/02-database.md @@ -30,6 +30,7 @@ database_option: { | SINGLE_STABLE {0 | 1} | TABLE_PREFIX value | TABLE_SUFFIX value + | DNODES value | TSDB_PAGESIZE value | WAL_LEVEL {1 | 2} | WAL_FSYNC_PERIOD value @@ -63,19 +64,20 @@ database_option: { - MAXROWS:文件块中记录的最大条数,默认为 4096 条。 - MINROWS:文件块中记录的最小条数,默认为 100 条。 - KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。 -- STT_TRIGGER:表示落盘文件触发文件合并的个数。默认为 1,范围 1 到 16。对于少表高频场景,此参数建议使用默认配置,或较小的值;而对于多表低频场景,此参数建议配置较大的值。 +- STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。 - SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。 - 0:表示可以创建多张超级表。 - 1:表示只可以创建一张超级表。 - TABLE_PREFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;例如,假定表名为 "v30001",当 TSDB_PREFIX = 2 时 使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup - TABLE_SUFFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "01" 来决定分配到哪个 vgroup。 - TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。 +- DNODES:指定 VNODE 所在的 DNODE 列表,如 '1,2,3',以逗号区分且字符间不能有空格,仅企业版支持。 - WAL_LEVEL:WAL 级别,默认为 1。 - 1:写 WAL,但不执行 fsync。 - 2:写 WAL,而且执行 fsync。 - WAL_FSYNC_PERIOD:当 WAL_LEVEL 参数设置为 2 时,用于设置落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。 -- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要WAL日志文件额外保留的最大时长策略。WAL日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 3600,表示在 WAL 保留最近 3600 秒的数据,请根据数据订阅的需要修改这个参数为适当值。 -- WAL_RETENTION_SIZE:为了数据订阅消费,需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。 +- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要 WAL 日志文件额外保留的最大时长策略。WAL 日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 3600,表示在 WAL 保留最近 3600 秒的数据,请根据数据订阅的需要修改这个参数为适当值。 +- WAL_RETENTION_SIZE:为了数据订阅消费,需要 WAL 日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。 ### 创建数据库示例 ```sql @@ -120,6 +122,7 @@ alter_database_option: { | KEEP value | WAL_RETENTION_PERIOD value | WAL_RETENTION_SIZE value + | MINROWS value } ``` diff --git a/docs/zh/14-reference/03-taos-sql/03-table.md b/docs/zh/14-reference/03-taos-sql/03-table.md index cad9190bd90..81ad60e3d23 100644 --- a/docs/zh/14-reference/03-taos-sql/03-table.md +++ b/docs/zh/14-reference/03-taos-sql/03-table.md @@ -42,12 +42,12 @@ table_option: { **使用说明** 1. 表(列)名命名规则参见[名称命名规则](./19-limit.md#名称命名规则)。 -1. 表名最大长度为 192。 -1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键。 -1. 除时间戳主键列之外,还可以通过 PRIMARY KEY 关键字指定第二列为额外的主键列。被指定为主键列的第二列必须为整型或字符串类型(varchar)。 -1. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 BINARY/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。 -1. 使用数据类型 BINARY/NCHAR/GEOMETRY,需指定其最长的字节数,如 BINARY(20),表示 20 字节。 -1. 关于 `ENCODE` 和 `COMPRESS` 的使用,请参考[按列压缩](../compress) +2. 表名最大长度为 192。 +3. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键。 +4. 除时间戳主键列之外,还可以通过 PRIMARY KEY 关键字指定第二列为额外的主键列。被指定为主键列的第二列必须为整型或字符串类型(VARCHAR)。 +5. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 VARCHAR/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。 +6. 使用数据类型 VARCHAR/NCHAR/GEOMETRY,需指定其最长的字节数,如 VARCHAR(20),表示 20 字节。 +7. 关于 `ENCODE` 和 `COMPRESS` 的使用,请参考[按列压缩](../compress) **参数说明** @@ -87,7 +87,7 @@ CREATE TABLE [IF NOT EXISTS] USING [db_name.]stb_name (field1_name [, field2_nam **参数说明** -1. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。CSV 文件中应仅包含 table name 与 tag 值。如需插入数据,请参考数据写入章节。 +1. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。CSV 文件中应仅包含 table name 与 tag 值。如需插入数据,请参考'数据写入'章节。 2. 为指定的 stb_name 创建子表,该超级表必须已经存在。 3. field_name 列表顺序与 CSV 文件各列内容顺序一致。列表中不允许出现重复项,且必须包含 `tbname`,可包含零个或多个超级表中已定义的标签列。未包含在列表中的标签值将被设置为 NULL。 diff --git a/docs/zh/14-reference/03-taos-sql/05-insert.md b/docs/zh/14-reference/03-taos-sql/05-insert.md index b2c34f4c558..40f8e95006c 100644 --- a/docs/zh/14-reference/03-taos-sql/05-insert.md +++ b/docs/zh/14-reference/03-taos-sql/05-insert.md @@ -1,7 +1,7 @@ --- sidebar_label: 数据写入 title: 数据写入 -description: 写入数据的详细语法 +description: 写入数据的详细语法 --- ## 写入语法 @@ -25,9 +25,9 @@ INSERT INTO tb_name [(field1_name, ...)] subquery ### 超级表语法 ```sql INSERT INTO - stb1_name [(field1_name, ...)] + stb1_name [(field1_name, ...)] VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path - [stb2_name [(field1_name, ...)] + [stb2_name [(field1_name, ...)] VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path ...]; ``` @@ -47,7 +47,7 @@ INSERT INTO 2. VALUES 语法表示了要插入的一行或多行数据。 -3. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。 +3. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。如仅需创建子表,请参考'表'章节。 4. `INSERT ... VALUES` 语句和 `INSERT ... FILE` 语句均可以在一条 INSERT 语句中同时向多个表插入数据。 @@ -154,12 +154,20 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; ``` -## 超级表语法 +## 向超级表插入数据并自动创建子表 -自动建表, 表名通过tbname列指定 +自动建表, 表名通过 tbname 列指定 ```sql -INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase) - values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32) +INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase) + VALUES ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33) - ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33) + ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33) +``` +## 通过 CSV 文件向超级表插入数据并自动创建子表 + +根据 csv 文件内容,为 超级表创建子表,并填充相应 column 与 tag + +```sql +INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase) + FILE '/tmp/csvfile_21002.csv' ``` diff --git a/docs/zh/14-reference/03-taos-sql/07-tag-index.md b/docs/zh/14-reference/03-taos-sql/07-tag-index.md index 383c5b2a1f0..364d465ba39 100644 --- a/docs/zh/14-reference/03-taos-sql/07-tag-index.md +++ b/docs/zh/14-reference/03-taos-sql/07-tag-index.md @@ -11,7 +11,7 @@ description: 使用标签索引提升查询性能 创建索引的语法如下 ```sql -CREATE INDEX index_name ON tbl_name (tagColName) +CREATE INDEX index_name ON tbl_name (tagColName) ``` 其中 `index_name` 为索引名称, `tbl_name` 为超级表名称,`tagColName` 为要在其上建立索引的 tag 列的名称。`tagColName` 的类型不受限制,即任何类型的 tag 列都可以建立索引。 diff --git a/docs/zh/14-reference/03-taos-sql/10-function.md b/docs/zh/14-reference/03-taos-sql/10-function.md index 8c882b32372..bbc6a0b81a1 100644 --- a/docs/zh/14-reference/03-taos-sql/10-function.md +++ b/docs/zh/14-reference/03-taos-sql/10-function.md @@ -1569,7 +1569,7 @@ COUNT({* | expr}) ELAPSED(ts_primary_key [, time_unit]) ``` -**功能说明**:elapsed函数表达了统计周期内连续的时间长度,和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有INTERVAL子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED返回的并不是时间范围的绝对值,而是绝对值除以time_unit所得到的单位个数。 +**功能说明**:elapsed 函数表达了统计周期内连续的时间长度,和 twa 函数配合使用可以计算统计曲线下的面积。在通过 INTERVAL 子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有 INTERVAL 子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED 返回的并不是时间范围的绝对值,而是绝对值除以 time_unit 所得到的单位个数。流计算仅在 FORCE_WINDOW_CLOSE 模式下支持该函数。 **返回结果类型**:DOUBLE。 @@ -1578,15 +1578,15 @@ ELAPSED(ts_primary_key [, time_unit]) **适用于**: 表,超级表,嵌套查询的外层查询 **说明**: -- ts_primary_key参数只能是表的第一列,即 TIMESTAMP 类型的主键列。 -- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit 参数未指定时,以数据库的时间分辨率为时间单位。支持的时间单位 time_unit 如下: +- ts_primary_key 参数只能是表的第一列,即 TIMESTAMP 类型的主键列。 +- 按 time_unit 参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit 参数未指定时,以数据库的时间分辨率为时间单位。支持的时间单位 time_unit 如下: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 -- 可以和interval组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。 -- order by asc/desc不影响差值的计算结果。 -- 对于超级表,需要和group by tbname子句组合使用,不可以直接使用。 -- 对于普通表,不支持和group by子句组合使用。 -- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句,diff函数会让内层查询输出隐式时间戳列,此为主键列,可以用于elapsed函数的第一个参数。相反,例如select elapsed(ts) from (select * from sub1) 语句,ts列输出到外层时已经没有了主键列的含义,无法使用elapsed函数。此外,elapsed函数作为一个与时间线强依赖的函数,形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。 -- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。 +- 可以和 interval 组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。 +- order by asc/desc 不影响差值的计算结果。 +- 对于超级表,需要和 group by tbname 子句组合使用,不可以直接使用。 +- 对于普通表,不支持和 group by 子句组合使用。 +- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如 select elapsed(ts) from (select diff(value) from sub1) 语句,diff 函数会让内层查询输出隐式时间戳列,此为主键列,可以用于 elapsed 函数的第一个参数。相反,例如 select elapsed(ts) from (select * from sub1) 语句,ts 列输出到外层时已经没有了主键列的含义,无法使用 elapsed 函数。此外,elapsed 函数作为一个与时间线强依赖的函数,形如 select elapsed(ts) from (select diff(value) from st group by tbname)尽 管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。 +- 不支持与 leastsquares、diff、derivative、top、bottom、last_row、interp 等函数混合使用。 ### LEASTSQUARES @@ -1829,14 +1829,14 @@ ignore_null_values: { - INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。 - INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。 -- INTERP 需要同时与 RANGE,EVERY 和 FILL 关键字一起使用。 -- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 \<= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。 +- INTERP SQL 查询需要同时与 RANGE,EVERY 和 FILL 关键字一起使用;流计算不能使用 RANGE,需要 EVERY 和 FILL 关键字一起使用。 +- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2) 字段来指定,需满足 timestamp1 \<= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。 - INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值. - INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句) - INTERP 可以在 RANGE 字段中只指定唯一的时间戳对单个时间点进行插值,在这种情况下,EVERY 字段可以省略。例如:SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear). - INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。 -- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。 -- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。 +- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0 版本以后支持)。 +- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0 版本以后支持)。 - INTERP 对于带复合主键的表的查询,若存在相同时间戳的数据,则只有对应的复合主键最小的数据参与运算。 ### LAST @@ -2180,7 +2180,7 @@ STATEDURATION(expr, oper, val, unit) TWA(expr) ``` -**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。对于存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。 +**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。对于存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。流计算仅在 FORCE_WINDOW_CLOSE 模式下支持该函数。 **返回数据类型**:DOUBLE。 diff --git a/docs/zh/14-reference/03-taos-sql/12-distinguished.md b/docs/zh/14-reference/03-taos-sql/12-distinguished.md index e149c2c82e9..d7696b18598 100644 --- a/docs/zh/14-reference/03-taos-sql/12-distinguished.md +++ b/docs/zh/14-reference/03-taos-sql/12-distinguished.md @@ -76,7 +76,7 @@ window_clause: { FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种: 1. 不进行填充:NONE(默认填充模式)。 -2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILL(VALUE, 1.23),相应列为 INT 类型,则填充值为 1, 若查询列表中有多列需要FILL, 则需要给每一个FILL列指定VALUE, 如`SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`。 +2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILL(VALUE, 1.23),相应列为 INT 类型,则填充值为 1, 若查询列表中有多列需要 FILL, 则需要给每一个 FILL 列指定 VALUE, 如 `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`, 注意, SELECT 表达式中只有包含普通列时才需要指定 FILL VALUE, 如 `_wstart`, `_wstart+1a`, `now`, `1+1` 以及使用 partition by 时的 partition key (如 tbname)都不需要指定 VALUE, 如 `timediff(last(ts), _wstart)` 则需要指定VALUE。 3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。 4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。 5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。 diff --git a/docs/zh/14-reference/03-taos-sql/14-stream.md b/docs/zh/14-reference/03-taos-sql/14-stream.md index cd5c76a4ad3..0470ebf630b 100644 --- a/docs/zh/14-reference/03-taos-sql/14-stream.md +++ b/docs/zh/14-reference/03-taos-sql/14-stream.md @@ -143,13 +143,14 @@ SELECT * from information_schema.`ins_streams`; 在创建流时,可以通过 TRIGGER 指令指定流式计算的触发模式。 -对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 3 种触发模式,默认为 WINDOW_CLOSE: +对于非窗口计算,流式计算的触发是实时的;对于窗口计算,目前提供 4 种触发模式,默认为 WINDOW_CLOSE: 1. AT_ONCE:写入立即触发 2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用) 3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。 +4. FORCE_WINDOW_CLOSE:以操作系统当前时间为准,只计算当前关闭窗口的结果,并推送出去。窗口只会在被关闭的时刻计算一次,后续不会再重复计算。该模式当前只支持 INTERVAL 窗口(不支持滑动);FILL_HISTORY 必须为 0,IGNORE EXPIRED 必须为 1,IGNORE UPDATE 必须为 1;FILL 只支持 PREV 、NULL、NONE、VALUE。 由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。 @@ -212,11 +213,11 @@ TDengine 对于修改数据提供两种处理方式,由 IGNORE UPDATE 选项 ```sql [field1_name,...] ``` -用来指定stb_name的列与subquery输出结果的对应关系。如果stb_name的列与subquery输出结果的位置、数量全部匹配,则不需要显示指定对应关系。如果stb_name的列与subquery输出结果的数据类型不匹配,会把subquery输出结果的类型转换成对应的stb_name的列的类型。 +在本页文档顶部的 [field1_name,...] 是用来指定 stb_name 的列与 subquery 输出结果的对应关系的。如果 stb_name 的列与 subquery 输出结果的位置、数量全部匹配,则不需要显示指定对应关系。如果 stb_name 的列与 subquery 输出结果的数据类型不匹配,会把 subquery 输出结果的类型转换成对应的 stb_name 的列的类型。 对于已经存在的超级表,检查列的schema信息 -1. 检查列的schema信息是否匹配,对于不匹配的,则自动进行类型转换,当前只有数据长度大于4096byte时才报错,其余场景都能进行类型转换。 -2. 检查列的个数是否相同,如果不同,需要显示的指定超级表与subquery的列的对应关系,否则报错;如果相同,可以指定对应关系,也可以不指定,不指定则按位置顺序对应。 +1. 检查列的 schema 信息是否匹配,对于不匹配的,则自动进行类型转换,当前只有数据长度大于 4096byte 时才报错,其余场景都能进行类型转换。 +2. 检查列的个数是否相同,如果不同,需要显示的指定超级表与 subquery 的列的对应关系,否则报错;如果相同,可以指定对应关系,也可以不指定,不指定则按位置顺序对应。 ## 自定义TAG @@ -291,3 +292,4 @@ RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name; CREATE SNODE ON DNODE [id] ``` 其中的 id 是集群中的 dnode 的序号。请注意选择的dnode,流计算的中间状态将自动在其上进行备份。 +从 3.3.4.0 版本开始,在多副本环境中创建流会进行 snode 的**存在性检查**,要求首先创建 snode。如果 snode 不存在,无法创建流。 diff --git a/docs/zh/14-reference/03-taos-sql/17-json.md b/docs/zh/14-reference/03-taos-sql/17-json.md index 18c25cfe230..6e1f568bac4 100644 --- a/docs/zh/14-reference/03-taos-sql/17-json.md +++ b/docs/zh/14-reference/03-taos-sql/17-json.md @@ -33,7 +33,7 @@ description: 对 JSON 类型如何使用的详细说明 ## 支持的操作 -1. 在 where 条件中时,支持函数 match/nmatch/between and/like/and/or/is null/is no null,不支持 in +1. 在 where 条件中时,支持函数 match/nmatch/between and/like/and/or/is null/is not null,不支持 in ``` select * from s1 where info->'k1' match 'v*'; diff --git a/docs/zh/14-reference/03-taos-sql/21-node.md b/docs/zh/14-reference/03-taos-sql/21-node.md index 967cb511273..e3a672790c5 100644 --- a/docs/zh/14-reference/03-taos-sql/21-node.md +++ b/docs/zh/14-reference/03-taos-sql/21-node.md @@ -27,11 +27,15 @@ SHOW DNODES; ## 删除数据节点 ```sql -DROP DNODE dnode_id +DROP DNODE dnode_id [force] [unsafe] ``` 注意删除 dnode 不等于停止相应的进程。实际中推荐先将一个 dnode 删除之后再停止其所对应的进程。 +只有在线节点可以被删除。如果要强制删除离线节点,需要执行强制删除操作, 即指定force选项。 + +当节点上存在单副本,并且节点处于离线,如果要强制删除该节点,需要执行非安全删除,即制定unsafe,并且数据不可再恢复。 + ## 修改数据节点配置 ```sql diff --git a/docs/zh/14-reference/05-connector/35-node.mdx b/docs/zh/14-reference/05-connector/35-node.mdx index 6ac34d24717..d9512eae78d 100644 --- a/docs/zh/14-reference/05-connector/35-node.mdx +++ b/docs/zh/14-reference/05-connector/35-node.mdx @@ -26,6 +26,7 @@ Node.js 连接器目前仅支持 WebSocket 连接器, 其通过 taosAdapter | Node.js 连接器 版本 | 主要变化 | TDengine 版本 | | :------------------: | :----------------------: | :----------------: | +| 3.1.1 | 优化了数据传输性能 | 3.3.2.0 及更高版本 | | 3.1.0 | 新版本发布,支持 WebSocket 连接 | 3.2.0.0 及更高版本 | ## 处理异常 diff --git a/docs/zh/14-reference/05-connector/50-odbc.mdx b/docs/zh/14-reference/05-connector/50-odbc.mdx index 8518d2ffd73..63837fd282d 100644 --- a/docs/zh/14-reference/05-connector/50-odbc.mdx +++ b/docs/zh/14-reference/05-connector/50-odbc.mdx @@ -3,11 +3,14 @@ sidebar_label: ODBC title: TDengine ODBC --- -TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库的数据表/视图。 +TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)以及用户自定义开发的应用程序,通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库。 -TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 +TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 -注意:TDengine ODBC 支持 32/64 位 Windows 系统,调用 TDengine ODBC 需要通过相应位数的 ODBC 驱动管理器进行。在 32 位 Windows 系统或者 64 位 Windows 系统的 32 位应用程序中,仅支持使用 WebSocket 连接方式访问 TDengine 数据库。 +TDengine ODBC 提供 64 位 和 32 位 两种驱动程序。但 32 位仅企业版支持,且仅支持 WebSocket 连接方式。 +**注意:** +- 驱动管理器:确保使用与应用程序架构匹配的 ODBC 驱动管理器。32 位应用程序需要使用 32 位 ODBC 驱动管理器,64 位应用程序需要使用 64 位 ODBC 驱动管理器。 +- 数据源名称(DSN):32 位和 64 位 ODBC 驱动管理器都可以看到所有 DSN,用户 DSN 标签页下的 DSN 如果名字相同会共用,因此需要在 DSN 名称上去区分。 想更多了解 TDengine 时序时序数据库的使用,可访问 [TDengine官方文档](https://docs.taosdata.com/intro/)。 @@ -24,17 +27,17 @@ TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连 ### 数据源连接类型与区别 -TDengine ODBC 支持两种连接 TDengine 数据库方式:Websocket 连接与 Native 连接,其区别如下: +TDengine ODBC 支持两种连接 TDengine 数据库方式:WebSocket 连接与 Native 连接,其区别如下: -1. 访问云服务仅支持使用 Websocket 连接方式。 +1. 访问云服务仅支持使用 WebSocket 连接方式。 2. 32 位应用程序仅支持使用 WebSocket 连接方式。 -3. Websocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。 +3. WebSocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。 4. Native 连接通常性能更好一点,但是必须与 TDengine 数据库服务端的版本保持一致。 -5. 对于一般用户,建议使用 **Websocket** 连接方式,性能与 Native 差别不大,兼容性更好。 +5. 对于一般用户,建议使用 **WebSocket** 连接方式,性能与 Native 差别不大,兼容性更好。 ### WebSocket 连接 @@ -46,11 +49,11 @@ TDengine ODBC 支持两种连接 TDengine 数据库方式:Websocket 连接与 4. 点击完成,进入 TDengine ODBC 数据源配置页面,填写如下必要信息 - ![ODBC websocket connection config](./assets/odbc-ws-config-zh.webp) + ![ODBC WebSocket connection config](./assets/odbc-ws-config-zh.webp) 4.1 【DSN】:Data Source Name 必填,为新添加的 ODBC 数据源命名 - 4.2【连接类型】 : 必选,选择连接类型,这里选择 【Websocket】 + 4.2【连接类型】 : 必选,选择连接类型,这里选择 【WebSocket】 4.3【URL】必填,ODBC 数据源 URL,示例: `http://localhost:6041`, 云服务的 url 示例: `https://gw.cloud.taosdata.com?token=your_token` @@ -111,7 +114,7 @@ WebSocket 连接方式除此之外还支持 Windows X64系统上运行的 32 位 | taos_odbc 版本 | 主要变化 | TDengine 版本 | | :----------- | :-------------------------------------------------------------------------------------------------- | :---------------- | -| v1.1.0 | 1. 支持视图功能;
2. 支持 VARBINARY/GEOMETRY 数据类型; | 3.3.3.0及更高版本 | +| v1.1.0 | 1. 支持视图功能;
2. 支持 VARBINARY/GEOMETRY 数据类型;
3. 支持 ODBC 32 位 WebSocket 连接方式(仅企业版支持);
4. 支持 ODBC 数据源配置对话框设置对工业软件 KingSCADA、Kepware 等的兼容性适配选项(仅企业版支持); | 3.3.3.0及更高版本 | | v1.0.2 | 支持 CP1252 字符编码; | 3.2.3.0及更高版本 | | v1.0.1 | 1. 支持 DSN 设置 BI 模式,在 BI 模式下 TDengine 数据库不返回系统数据库和超级表子表信息;
2. 重构字符集转换模块,提升读写性能;
3. ODBC 数据源配置对话框中默认修改默认连接方式为“WebSocket”;
4. ODBC 数据源配置对话框增加“测试连接”控件;
5. ODBC 数据源配置支持中文/英文界面; | - | | v1.0.0.0 | 发布初始版本,支持与Tdengine数据库交互以读写数据,具体请参考“API 参考”一节 | 3.2.2.0及更高版本 | diff --git a/docs/zh/14-reference/05-connector/index.md b/docs/zh/14-reference/05-connector/index.md index bd2cff6a3d2..4142d111e01 100644 --- a/docs/zh/14-reference/05-connector/index.md +++ b/docs/zh/14-reference/05-connector/index.md @@ -34,8 +34,8 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器 | **3.0.0.0 及以上** | 3.0.2以上 | 当前版本 | 3.0 分支 | 3.0.0 | 3.1.0 | 当前版本 | 与 TDengine 相同版本 | | **2.4.0.14 及以上** | 2.0.38 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 | 与 TDengine 相同版本 | | **2.4.0.4 - 2.4.0.13** | 2.0.37 | 当前版本 | develop 分支 | 1.0.2 - 1.0.6 | 2.0.10 - 2.0.12 | 当前版本 | 与 TDengine 相同版本 | -| **2.2.x.x ** | 2.0.36 | 当前版本 | master 分支 | n/a | 2.0.7 - 2.0.9 | 当前版本 | 与 TDengine 相同版本 | -| **2.0.x.x ** | 2.0.34 | 当前版本 | master 分支 | n/a | 2.0.1 - 2.0.6 | 当前版本 | 与 TDengine 相同版本 | +| **2.2.x.x** | 2.0.36 | 当前版本 | master 分支 | n/a | 2.0.7 - 2.0.9 | 当前版本 | 与 TDengine 相同版本 | +| **2.0.x.x** | 2.0.34 | 当前版本 | master 分支 | n/a | 2.0.1 - 2.0.6 | 当前版本 | 与 TDengine 相同版本 | ## 功能特性 diff --git a/docs/zh/14-reference/07-supported.md b/docs/zh/14-reference/07-supported.md index 10ca237653c..5115bd81888 100644 --- a/docs/zh/14-reference/07-supported.md +++ b/docs/zh/14-reference/07-supported.md @@ -6,15 +6,28 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表" ## TDengine 服务端支持的平台列表 -| | **Windows server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18 以上** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **macOS** | -| ------------ | ---------------------------- | ----------------- | ---------------- | ------------------ | ------------ | ----------------- | ---------------- | --------- | -| X64 | ●/E | ●/E | ● | ● | ●/E | ●/E | ●/E | ● | -| 树莓派 ARM64 | | | ● | | | | | | -| 华为云 ARM64 | | | | ● | | | | | -| M1 | | | | | | | | ● | +| | **版本** | **X64 64bit** | **ARM64** | +| ----------------------|----------------| ------------- | --------- | +| **CentOS** | **7.9 以上** | ● | ● | +| **Ubuntu** | **18 以上** | ● | ● | +| **RedHat** | **RHEL 7 以上** | ● | ● | +| **Debian** | **6.0 以上** | ● | ● | +| **FreeBSD** | **12 以上** | ● | ● | +| **OpenSUSE** | **全部版本** | ● | ● | +| **SUSE Linux** | **11 以上** | ● | ● | +| **Fedora** | **21 以上** | ● | ● | +| **Windows Server** | **2016 以上** | ●/E | | +| **Windows** | **10/11** | ●/E | | +| **银河麒麟** | **V10 以上** | ●/E | ●/E | +| **中标麒麟** | **V7.0 以上** | ●/E | ●/E | +| **统信 UOS** | **V20 以上** | ●/E | | +| **凝思磐石** | **V8.0 以上** | ●/E | | +| **华为欧拉 openEuler** | **V20.03 以上** | ●/E | | +| **龙蜥 Anolis OS** | **V8.6 以上** | ●/E | | +| **macOS** | **11.0 以上** | | ● | 注:1) ● 表示经过官方测试验证, ○ 表示非官方测试验证,E 表示仅企业版支持。 - 2) 社区版仅支持主流操作系统的较新版本,包括 Ubuntu 18+/CentOS 7+/RedHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS 等。如果有其他操作系统及版本的需求,请联系企业版支持。 + 2) 社区版仅支持主流操作系统的较新版本,包括 Ubuntu 18+/CentOS 7+/CentOS Stream/RedHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS 等。如果有其他操作系统及版本的需求,请联系企业版支持。 ## TDengine 客户端和连接器支持的平台列表 @@ -22,16 +35,16 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表" 对照矩阵如下: -| **CPU** | **X64 64bit** | **X64 64bit** | **ARM64** | **X64 64bit** | **ARM64** | -| ----------- | ------------- | ------------- | --------- | ------------- | --------- | -| **OS** | **Linux** | **Win64** | **Linux** | **macOS** | **macOS** | -| **C/C++** | ● | ● | ● | ● | ● | -| **JDBC** | ● | ● | ● | ○ | ○ | -| **Python** | ● | ● | ● | ● | ● | -| **Go** | ● | ● | ● | ● | ● | -| **NodeJs** | ● | ● | ● | ○ | ○ | -| **C#** | ● | ● | ○ | ○ | ○ | -| **Rust** | ● | ● | ○ | ● | ● | -| **RESTful** | ● | ● | ● | ● | ● | +| **CPU** | **X64 64bit** | **X64 64bit** | **X64 64bit** | **ARM64** | **ARM64** | +| ----------- | ------------- | ------------- | ------------- | --------- | --------- | +| **OS** | **Linux** | **Win64** | **macOS** | **Linux** | **macOS** | +| **C/C++** | ● | ● | ● | ● | ● | +| **JDBC** | ● | ● | ● | ● | ● | +| **Python** | ● | ● | ● | ● | ● | +| **Go** | ● | ● | ● | ● | ● | +| **NodeJs** | ● | ● | ● | ● | ● | +| **C#** | ● | ● | ○ | ● | ○ | +| **Rust** | ● | ● | ● | ○ | ● | +| **RESTful** | ● | ● | ● | ● | ● | 注:● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。 diff --git a/docs/zh/26-tdinternal/01-arch.md b/docs/zh/26-tdinternal/01-arch.md index 04e47797a86..8aa69e45d5d 100644 --- a/docs/zh/26-tdinternal/01-arch.md +++ b/docs/zh/26-tdinternal/01-arch.md @@ -178,7 +178,7 @@ TDengine 集群可以容纳单个、多个甚至几千个数据节点。应用 TDengine 存储的数据包括采集的时序数据以及库、表相关的元数据、标签数据等,这些数据具体分为三部分: -- 时序数据:TDengine 的核心存储对象,存放于 vnode 里,由 data、head 和 last 三个文件组成,数据量大,查询量取决于应用场景。允许乱序写入,但暂时不支持删除操作,并且仅在 update 参数设置为 1 时允许更新操作。通过采用一个采集点一张表的模型,一个时间段的数据是连续存储,对单张表的写入是简单的追加操作,一次读,可以读到多条记录,这样保证对单个采集点的插入和查询操作,性能达到最优。 +- 时序数据:时序数据是 TDengine 的核心存储对象,它们被存储在 vnode 中。时序数据由 data、head、sma 和 stt 4 类文件组成,这些文件共同构成了时序数据的完整存储结构。由于时序数据的特点是数据量大且查询需求取决于具体应用场景,因此 TDengine 采用了“一个数据采集点一张表”的模型来优化存储和查询性能。在这种模型下,一个时间段内的数据是连续存储的,对单张表的写入是简单的追加操作,一次读取可以获取多条记录。这种设计确保了单个数据采集点的写入和查询操作都能达到最优性能。 - 数据表元数据:包含标签信息和 Table Schema 信息,存放于 vnode 里的 meta 文件,支持增删改查四个标准操作。数据量很大,有 N 张表,就有 N 条记录,因此采用 LRU 存储,支持标签数据的索引。TDengine 支持多核多线程并发查询。只要计算内存足够,元数据全内存存储,千万级别规模的标签数据过滤结果能毫秒级返回。在内存资源不足的情况下,仍然可以支持数千万张表的快速查询。 - 数据库元数据:存放于 mnode 里,包含系统节点、用户、DB、STable Schema 等信息,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。 @@ -321,4 +321,4 @@ TDengine 采用了一种数据驱动的策略来实现缓存数据的持久化 此外,TDengine 还提供了数据分级存储的功能,允许用户将不同时间段的数据存储在不同存储设备的目录中,以此实现将“热”数据和“冷”数据分开存储。这样做可以充分利用各种存储资源,同时节约成本。例如,对于最新采集且需要频繁访问的数据,由于其读取性能要求较高,用户可以配置将这些数据存储在高性能的固态硬盘上。而对于超过一定期限、查询需求较低的数据,则可以将其存储在成本相对较低的机械硬盘上。 -为了进一步降低存储成本,TDengine 还支持将时序数据存储在对象存储系统中。通过其创新性的设计,在大多数情况下,从对象存储系统中查询时序数据的性能接近本地硬盘的一半,而在某些场景下,性能甚至可以与本地硬盘相媲美。同时,TDengine 还允许用户对存储在对象存储中的时序数据执行删除和更新操作。 \ No newline at end of file +为了进一步降低存储成本,TDengine 还支持将时序数据存储在对象存储系统中。通过其创新性的设计,在大多数情况下,从对象存储系统中查询时序数据的性能接近本地硬盘的一半,而在某些场景下,性能甚至可以与本地硬盘相媲美。同时,TDengine 还允许用户对存储在对象存储中的时序数据执行删除和更新操作。 diff --git a/include/client/taos.h b/include/client/taos.h index 80dbe27c471..924d0ff66e4 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -81,6 +81,13 @@ typedef enum { TSDB_SML_TIMESTAMP_NANO_SECONDS, } TSDB_SML_TIMESTAMP_TYPE; +typedef enum TAOS_FIELD_T { + TAOS_FIELD_COL = 1, + TAOS_FIELD_TAG, + TAOS_FIELD_QUERY, + TAOS_FIELD_TBNAME, +} TAOS_FIELD_T; + typedef struct taosField { char name[65]; int8_t type; @@ -95,6 +102,15 @@ typedef struct TAOS_FIELD_E { int32_t bytes; } TAOS_FIELD_E; +typedef struct TAOS_FIELD_STB { + char name[65]; + int8_t type; + uint8_t precision; + uint8_t scale; + int32_t bytes; + TAOS_FIELD_T field_type; +} TAOS_FIELD_STB; + #ifdef WINDOWS #define DLL_EXPORT __declspec(dllexport) #else @@ -195,13 +211,6 @@ DLL_EXPORT int taos_stmt_affected_rows_once(TAOS_STMT *stmt); typedef void TAOS_STMT2; -typedef enum TAOS_FIELD_T { - TAOS_FIELD_COL = 1, - TAOS_FIELD_TAG, - TAOS_FIELD_QUERY, - TAOS_FIELD_TBNAME, -} TAOS_FIELD_T; - typedef struct TAOS_STMT2_OPTION { int64_t reqid; bool singleStbInsert; @@ -232,7 +241,9 @@ DLL_EXPORT int taos_stmt2_exec(TAOS_STMT2 *stmt, int *affected_rows); DLL_EXPORT int taos_stmt2_close(TAOS_STMT2 *stmt); DLL_EXPORT int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert); DLL_EXPORT int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields); +DLL_EXPORT int taos_stmt2_get_stb_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_STB **fields); DLL_EXPORT void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields); +DLL_EXPORT void taos_stmt2_free_stb_fields(TAOS_STMT2 *stmt, TAOS_FIELD_STB *fields); DLL_EXPORT TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt); DLL_EXPORT char *taos_stmt2_error(TAOS_STMT2 *stmt); @@ -251,17 +262,17 @@ DLL_EXPORT int64_t taos_affected_rows64(TAOS_RES *res); DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res); DLL_EXPORT int taos_select_db(TAOS *taos, const char *db); DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); -DLL_EXPORT int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); -DLL_EXPORT void taos_stop_query(TAOS_RES *res); -DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); -DLL_EXPORT int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows); -DLL_EXPORT bool taos_is_update_query(TAOS_RES *res); -DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); -DLL_EXPORT int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows); -DLL_EXPORT int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData); -DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex); -DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); -DLL_EXPORT void taos_reset_current_db(TAOS *taos); +DLL_EXPORT int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); +DLL_EXPORT void taos_stop_query(TAOS_RES *res); +DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); +DLL_EXPORT int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows); +DLL_EXPORT bool taos_is_update_query(TAOS_RES *res); +DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); +DLL_EXPORT int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows); +DLL_EXPORT int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData); +DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex); +DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); +DLL_EXPORT void taos_reset_current_db(TAOS *taos); DLL_EXPORT int *taos_fetch_lengths(TAOS_RES *res); DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res); diff --git a/include/common/tanal.h b/include/common/tanalytics.h similarity index 96% rename from include/common/tanal.h rename to include/common/tanalytics.h index 69d110d161d..85eb963129f 100644 --- a/include/common/tanal.h +++ b/include/common/tanalytics.h @@ -36,7 +36,7 @@ typedef struct { int32_t anode; int32_t urlLen; char *url; -} SAnalUrl; +} SAnalyticsUrl; typedef enum { ANAL_BUF_TYPE_JSON = 0, @@ -53,18 +53,18 @@ typedef struct { TdFilePtr filePtr; char fileName[TSDB_FILENAME_LEN + 10]; int64_t numOfRows; -} SAnalColBuf; +} SAnalyticsColBuf; typedef struct { EAnalBufType bufType; TdFilePtr filePtr; char fileName[TSDB_FILENAME_LEN]; int32_t numOfCols; - SAnalColBuf *pCols; + SAnalyticsColBuf *pCols; } SAnalBuf; -int32_t taosAnalInit(); -void taosAnalCleanup(); +int32_t taosAnalyticsInit(); +void taosAnalyticsCleanup(); SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBuf); int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen); diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 3910ea6745b..ea764e6760f 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -154,6 +154,7 @@ typedef enum EStreamType { STREAM_TRANS_STATE, STREAM_MID_RETRIEVE, STREAM_PARTITION_DELETE_DATA, + STREAM_GET_RESULT, } EStreamType; #pragma pack(push, 1) @@ -383,6 +384,10 @@ typedef struct STUidTagInfo { #define TABLE_NAME_COLUMN_INDEX 6 #define PRIMARY_KEY_COLUMN_INDEX 7 +//steam get result block column +#define DATA_TS_COLUMN_INDEX 0 +#define DATA_VERSION_COLUMN_INDEX 1 + // stream create table block column #define UD_TABLE_NAME_COLUMN_INDEX 0 #define UD_GROUPID_COLUMN_INDEX 1 diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 99cdb531037..1103b89ccb0 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -189,7 +189,12 @@ static FORCE_INLINE void colDataSetDouble(SColumnInfoData* pColumnInfoData, uint int32_t getJsonValueLen(const char* data); +// For the VAR_DATA_TYPE type, new data is inserted strictly according to the position of SVarColAttr.length. +// If the same row is inserted repeatedly, data holes will result. int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull); +// For the VAR_DATA_TYPE type, if a row already has data before inserting it (judged by offset != -1), +// it will be inserted at the original position and the old data will be overwritten. +int32_t colDataSetValOrCover(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull); int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData); int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue); @@ -233,7 +238,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo); * @brief find how many rows already in order start from first row */ int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo); -void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk); +int32_t blockDataCheck(const SSDataBlock* pDataBlock); int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows, bool clearPayload); int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows); @@ -266,7 +271,7 @@ SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId) int32_t bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index, SColumnInfoData** pColInfoData); int32_t blockGetEncodeSize(const SSDataBlock* pBlock); -int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols); +int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataLen, int32_t numOfCols); int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos); // for debug diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 9b6d4eae5e7..94c1320ea3a 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -151,6 +151,7 @@ extern bool tsMonitorForceV2; // audit extern bool tsEnableAudit; extern bool tsEnableAuditCreateTable; +extern bool tsEnableAuditDelete; extern int32_t tsAuditInterval; // telem @@ -162,6 +163,12 @@ extern bool tsEnableCrashReport; extern char *tsTelemUri; extern char *tsClientCrashReportUri; extern char *tsSvrCrashReportUri; +extern int8_t tsSafetyCheckLevel; +enum { + TSDB_SAFETY_CHECK_LEVELL_NEVER = 0, + TSDB_SAFETY_CHECK_LEVELL_NORMAL = 1, + TSDB_SAFETY_CHECK_LEVELL_BYROW = 2, +}; // query buffer management extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing @@ -203,10 +210,10 @@ extern int32_t tsMinIntervalTime; extern int32_t tsMaxInsertBatchRows; // build info -extern char version[]; -extern char compatible_version[]; -extern char gitinfo[]; -extern char buildinfo[]; +extern char td_version[]; +extern char td_compatible_version[]; +extern char td_gitinfo[]; +extern char td_buildinfo[]; // lossy extern char tsLossyColumns[]; diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 01808d4f2f8..7ff70b243a4 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -421,7 +421,7 @@ typedef enum ENodeType { // physical plan node QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100, QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN, - QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN, + QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN, // INACTIVE QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN, @@ -435,7 +435,7 @@ typedef enum ENodeType { QUERY_NODE_PHYSICAL_PLAN_SORT, QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT, QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL, - QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL, + QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL, // INACTIVE QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL, @@ -467,9 +467,11 @@ typedef enum ENodeType { QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT, QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT, QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL, + QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL, QUERY_NODE_PHYSICAL_PLAN_MERGE_ANOMALY, QUERY_NODE_PHYSICAL_PLAN_STREAM_ANOMALY, QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC, + QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC, } ENodeType; typedef struct { @@ -1022,6 +1024,7 @@ typedef struct { char sDetailVer[128]; int64_t whiteListVer; SMonitorParas monitorParas; + int8_t enableAuditDelete; } SConnectRsp; int32_t tSerializeSConnectRsp(void* buf, int32_t bufLen, SConnectRsp* pRsp); @@ -1215,6 +1218,7 @@ typedef struct { int32_t bytes; int8_t type; uint8_t pk; + bool noData; } SColumnInfo; typedef struct STimeWindow { @@ -1337,6 +1341,7 @@ typedef struct { char* sql; int8_t withArbitrator; int8_t encryptAlgorithm; + char dnodeListStr[TSDB_DNODE_LIST_LEN]; } SCreateDbReq; int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq); @@ -1825,6 +1830,17 @@ int32_t tSerializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq); int32_t tDeserializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq); void tFreeSStatisReq(SStatisReq* pReq); +typedef struct { + char db[TSDB_DB_FNAME_LEN]; + char table[TSDB_TABLE_NAME_LEN]; + char operation[AUDIT_OPERATION_LEN]; + int32_t sqlLen; + char* pSql; +} SAuditReq; +int32_t tSerializeSAuditReq(void* buf, int32_t bufLen, SAuditReq* pReq); +int32_t tDeserializeSAuditReq(void* buf, int32_t bufLen, SAuditReq* pReq); +void tFreeSAuditReq(SAuditReq* pReq); + typedef struct { int32_t dnodeId; int64_t clusterId; @@ -2813,9 +2829,11 @@ typedef struct { int32_t code; } STaskDropRsp; -#define STREAM_TRIGGER_AT_ONCE 1 -#define STREAM_TRIGGER_WINDOW_CLOSE 2 -#define STREAM_TRIGGER_MAX_DELAY 3 +#define STREAM_TRIGGER_AT_ONCE 1 +#define STREAM_TRIGGER_WINDOW_CLOSE 2 +#define STREAM_TRIGGER_MAX_DELAY 3 +#define STREAM_TRIGGER_FORCE_WINDOW_CLOSE 4 + #define STREAM_DEFAULT_IGNORE_EXPIRED 1 #define STREAM_FILL_HISTORY_ON 1 #define STREAM_FILL_HISTORY_OFF 0 @@ -3413,6 +3431,7 @@ typedef struct { int32_t svrTimestamp; SArray* rsps; // SArray SMonitorParas monitorParas; + int8_t enableAuditDelete; } SClientHbBatchRsp; static FORCE_INLINE uint32_t hbKeyHashFunc(const char* key, uint32_t keyLen) { return taosIntHash_64(key, keyLen); } @@ -4104,18 +4123,16 @@ void tDeleteMqMetaRsp(SMqMetaRsp* pRsp); #define MQ_DATA_RSP_VERSION 100 typedef struct { - struct { - SMqRspHead head; - STqOffsetVal rspOffset; - STqOffsetVal reqOffset; - int32_t blockNum; - int8_t withTbName; - int8_t withSchema; - SArray* blockDataLen; - SArray* blockData; - SArray* blockTbName; - SArray* blockSchema; - }; + SMqRspHead head; + STqOffsetVal rspOffset; + STqOffsetVal reqOffset; + int32_t blockNum; + int8_t withTbName; + int8_t withSchema; + SArray* blockDataLen; + SArray* blockData; + SArray* blockTbName; + SArray* blockSchema; union{ struct{ diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 2c797e39bf6..c22a3da5ade 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -259,6 +259,7 @@ TD_DEF_MSG_TYPE(TDMT_MND_STREAM_DROP_ORPHANTASKS, "stream-drop-orphan-tasks", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_STREAM_TASK_RESET, "stream-reset-tasks", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_UPDATE_DNODE_INFO, "update-dnode-info", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_AUDIT, "audit", NULL, NULL) TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG) TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8 diff --git a/include/common/ttokendef.h b/include/common/ttokendef.h index b86830869cc..e5e40f225ed 100644 --- a/include/common/ttokendef.h +++ b/include/common/ttokendef.h @@ -16,394 +16,7 @@ #ifndef _TD_COMMON_TOKEN_H_ #define _TD_COMMON_TOKEN_H_ -#define TK_OR 1 -#define TK_AND 2 -#define TK_UNION 3 -#define TK_ALL 4 -#define TK_MINUS 5 -#define TK_EXCEPT 6 -#define TK_INTERSECT 7 -#define TK_NK_BITAND 8 -#define TK_NK_BITOR 9 -#define TK_NK_LSHIFT 10 -#define TK_NK_RSHIFT 11 -#define TK_NK_PLUS 12 -#define TK_NK_MINUS 13 -#define TK_NK_STAR 14 -#define TK_NK_SLASH 15 -#define TK_NK_REM 16 -#define TK_NK_CONCAT 17 -#define TK_CREATE 18 -#define TK_ACCOUNT 19 -#define TK_NK_ID 20 -#define TK_PASS 21 -#define TK_NK_STRING 22 -#define TK_ALTER 23 -#define TK_PPS 24 -#define TK_TSERIES 25 -#define TK_STORAGE 26 -#define TK_STREAMS 27 -#define TK_QTIME 28 -#define TK_DBS 29 -#define TK_USERS 30 -#define TK_CONNS 31 -#define TK_STATE 32 -#define TK_NK_COMMA 33 -#define TK_HOST 34 -#define TK_IS_IMPORT 35 -#define TK_NK_INTEGER 36 -#define TK_CREATEDB 37 -#define TK_USER 38 -#define TK_ENABLE 39 -#define TK_SYSINFO 40 -#define TK_ADD 41 -#define TK_DROP 42 -#define TK_GRANT 43 -#define TK_ON 44 -#define TK_TO 45 -#define TK_REVOKE 46 -#define TK_FROM 47 -#define TK_SUBSCRIBE 48 -#define TK_READ 49 -#define TK_WRITE 50 -#define TK_NK_DOT 51 -#define TK_WITH 52 -#define TK_ENCRYPT_KEY 53 -#define TK_ANODE 54 -#define TK_UPDATE 55 -#define TK_ANODES 56 -#define TK_DNODE 57 -#define TK_PORT 58 -#define TK_DNODES 59 -#define TK_RESTORE 60 -#define TK_NK_IPTOKEN 61 -#define TK_FORCE 62 -#define TK_UNSAFE 63 -#define TK_CLUSTER 64 -#define TK_LOCAL 65 -#define TK_QNODE 66 -#define TK_BNODE 67 -#define TK_SNODE 68 -#define TK_MNODE 69 -#define TK_VNODE 70 -#define TK_DATABASE 71 -#define TK_USE 72 -#define TK_FLUSH 73 -#define TK_TRIM 74 -#define TK_S3MIGRATE 75 -#define TK_COMPACT 76 -#define TK_IF 77 -#define TK_NOT 78 -#define TK_EXISTS 79 -#define TK_BUFFER 80 -#define TK_CACHEMODEL 81 -#define TK_CACHESIZE 82 -#define TK_COMP 83 -#define TK_DURATION 84 -#define TK_NK_VARIABLE 85 -#define TK_MAXROWS 86 -#define TK_MINROWS 87 -#define TK_KEEP 88 -#define TK_PAGES 89 -#define TK_PAGESIZE 90 -#define TK_TSDB_PAGESIZE 91 -#define TK_PRECISION 92 -#define TK_REPLICA 93 -#define TK_VGROUPS 94 -#define TK_SINGLE_STABLE 95 -#define TK_RETENTIONS 96 -#define TK_SCHEMALESS 97 -#define TK_WAL_LEVEL 98 -#define TK_WAL_FSYNC_PERIOD 99 -#define TK_WAL_RETENTION_PERIOD 100 -#define TK_WAL_RETENTION_SIZE 101 -#define TK_WAL_ROLL_PERIOD 102 -#define TK_WAL_SEGMENT_SIZE 103 -#define TK_STT_TRIGGER 104 -#define TK_TABLE_PREFIX 105 -#define TK_TABLE_SUFFIX 106 -#define TK_S3_CHUNKSIZE 107 -#define TK_S3_KEEPLOCAL 108 -#define TK_S3_COMPACT 109 -#define TK_KEEP_TIME_OFFSET 110 -#define TK_ENCRYPT_ALGORITHM 111 -#define TK_NK_COLON 112 -#define TK_BWLIMIT 113 -#define TK_START 114 -#define TK_TIMESTAMP 115 -#define TK_END 116 -#define TK_TABLE 117 -#define TK_NK_LP 118 -#define TK_NK_RP 119 -#define TK_USING 120 -#define TK_FILE 121 -#define TK_STABLE 122 -#define TK_COLUMN 123 -#define TK_MODIFY 124 -#define TK_RENAME 125 -#define TK_TAG 126 -#define TK_SET 127 -#define TK_NK_EQ 128 -#define TK_TAGS 129 -#define TK_BOOL 130 -#define TK_TINYINT 131 -#define TK_SMALLINT 132 -#define TK_INT 133 -#define TK_INTEGER 134 -#define TK_BIGINT 135 -#define TK_FLOAT 136 -#define TK_DOUBLE 137 -#define TK_BINARY 138 -#define TK_NCHAR 139 -#define TK_UNSIGNED 140 -#define TK_JSON 141 -#define TK_VARCHAR 142 -#define TK_MEDIUMBLOB 143 -#define TK_BLOB 144 -#define TK_VARBINARY 145 -#define TK_GEOMETRY 146 -#define TK_DECIMAL 147 -#define TK_COMMENT 148 -#define TK_MAX_DELAY 149 -#define TK_WATERMARK 150 -#define TK_ROLLUP 151 -#define TK_TTL 152 -#define TK_SMA 153 -#define TK_DELETE_MARK 154 -#define TK_FIRST 155 -#define TK_LAST 156 -#define TK_SHOW 157 -#define TK_FULL 158 -#define TK_PRIVILEGES 159 -#define TK_DATABASES 160 -#define TK_TABLES 161 -#define TK_STABLES 162 -#define TK_MNODES 163 -#define TK_QNODES 164 -#define TK_ARBGROUPS 165 -#define TK_FUNCTIONS 166 -#define TK_INDEXES 167 -#define TK_ACCOUNTS 168 -#define TK_APPS 169 -#define TK_CONNECTIONS 170 -#define TK_LICENCES 171 -#define TK_GRANTS 172 -#define TK_LOGS 173 -#define TK_MACHINES 174 -#define TK_ENCRYPTIONS 175 -#define TK_QUERIES 176 -#define TK_SCORES 177 -#define TK_TOPICS 178 -#define TK_VARIABLES 179 -#define TK_BNODES 180 -#define TK_SNODES 181 -#define TK_TRANSACTIONS 182 -#define TK_DISTRIBUTED 183 -#define TK_CONSUMERS 184 -#define TK_SUBSCRIPTIONS 185 -#define TK_VNODES 186 -#define TK_ALIVE 187 -#define TK_VIEWS 188 -#define TK_VIEW 189 -#define TK_COMPACTS 190 -#define TK_NORMAL 191 -#define TK_CHILD 192 -#define TK_LIKE 193 -#define TK_TBNAME 194 -#define TK_QTAGS 195 -#define TK_AS 196 -#define TK_SYSTEM 197 -#define TK_TSMA 198 -#define TK_INTERVAL 199 -#define TK_RECURSIVE 200 -#define TK_TSMAS 201 -#define TK_FUNCTION 202 -#define TK_INDEX 203 -#define TK_COUNT 204 -#define TK_LAST_ROW 205 -#define TK_META 206 -#define TK_ONLY 207 -#define TK_TOPIC 208 -#define TK_CONSUMER 209 -#define TK_GROUP 210 -#define TK_DESC 211 -#define TK_DESCRIBE 212 -#define TK_RESET 213 -#define TK_QUERY 214 -#define TK_CACHE 215 -#define TK_EXPLAIN 216 -#define TK_ANALYZE 217 -#define TK_VERBOSE 218 -#define TK_NK_BOOL 219 -#define TK_RATIO 220 -#define TK_NK_FLOAT 221 -#define TK_OUTPUTTYPE 222 -#define TK_AGGREGATE 223 -#define TK_BUFSIZE 224 -#define TK_LANGUAGE 225 -#define TK_REPLACE 226 -#define TK_STREAM 227 -#define TK_INTO 228 -#define TK_PAUSE 229 -#define TK_RESUME 230 -#define TK_PRIMARY 231 -#define TK_KEY 232 -#define TK_TRIGGER 233 -#define TK_AT_ONCE 234 -#define TK_WINDOW_CLOSE 235 -#define TK_IGNORE 236 -#define TK_EXPIRED 237 -#define TK_FILL_HISTORY 238 -#define TK_SUBTABLE 239 -#define TK_UNTREATED 240 -#define TK_KILL 241 -#define TK_CONNECTION 242 -#define TK_TRANSACTION 243 -#define TK_BALANCE 244 -#define TK_VGROUP 245 -#define TK_LEADER 246 -#define TK_MERGE 247 -#define TK_REDISTRIBUTE 248 -#define TK_SPLIT 249 -#define TK_DELETE 250 -#define TK_INSERT 251 -#define TK_NK_BIN 252 -#define TK_NK_HEX 253 -#define TK_NULL 254 -#define TK_NK_QUESTION 255 -#define TK_NK_ALIAS 256 -#define TK_NK_ARROW 257 -#define TK_ROWTS 258 -#define TK_QSTART 259 -#define TK_QEND 260 -#define TK_QDURATION 261 -#define TK_WSTART 262 -#define TK_WEND 263 -#define TK_WDURATION 264 -#define TK_IROWTS 265 -#define TK_ISFILLED 266 -#define TK_FLOW 267 -#define TK_FHIGH 268 -#define TK_FROWTS 269 -#define TK_CAST 270 -#define TK_POSITION 271 -#define TK_IN 272 -#define TK_FOR 273 -#define TK_NOW 274 -#define TK_TODAY 275 -#define TK_RAND 276 -#define TK_SUBSTR 277 -#define TK_SUBSTRING 278 -#define TK_BOTH 279 -#define TK_TRAILING 280 -#define TK_LEADING 281 -#define TK_TIMEZONE 282 -#define TK_CLIENT_VERSION 283 -#define TK_SERVER_VERSION 284 -#define TK_SERVER_STATUS 285 -#define TK_CURRENT_USER 286 -#define TK_PI 287 -#define TK_CASE 288 -#define TK_WHEN 289 -#define TK_THEN 290 -#define TK_ELSE 291 -#define TK_BETWEEN 292 -#define TK_IS 293 -#define TK_NK_LT 294 -#define TK_NK_GT 295 -#define TK_NK_LE 296 -#define TK_NK_GE 297 -#define TK_NK_NE 298 -#define TK_MATCH 299 -#define TK_NMATCH 300 -#define TK_CONTAINS 301 -#define TK_JOIN 302 -#define TK_INNER 303 -#define TK_LEFT 304 -#define TK_RIGHT 305 -#define TK_OUTER 306 -#define TK_SEMI 307 -#define TK_ANTI 308 -#define TK_ASOF 309 -#define TK_WINDOW 310 -#define TK_WINDOW_OFFSET 311 -#define TK_JLIMIT 312 -#define TK_SELECT 313 -#define TK_NK_HINT 314 -#define TK_DISTINCT 315 -#define TK_WHERE 316 -#define TK_PARTITION 317 -#define TK_BY 318 -#define TK_SESSION 319 -#define TK_STATE_WINDOW 320 -#define TK_EVENT_WINDOW 321 -#define TK_COUNT_WINDOW 322 -#define TK_ANOMALY_WINDOW 323 -#define TK_SLIDING 324 -#define TK_FILL 325 -#define TK_VALUE 326 -#define TK_VALUE_F 327 -#define TK_NONE 328 -#define TK_PREV 329 -#define TK_NULL_F 330 -#define TK_LINEAR 331 -#define TK_NEXT 332 -#define TK_HAVING 333 -#define TK_RANGE 334 -#define TK_EVERY 335 -#define TK_ORDER 336 -#define TK_SLIMIT 337 -#define TK_SOFFSET 338 -#define TK_LIMIT 339 -#define TK_OFFSET 340 -#define TK_ASC 341 -#define TK_NULLS 342 -#define TK_ABORT 343 -#define TK_AFTER 344 -#define TK_ATTACH 345 -#define TK_BEFORE 346 -#define TK_BEGIN 347 -#define TK_BITAND 348 -#define TK_BITNOT 349 -#define TK_BITOR 350 -#define TK_BLOCKS 351 -#define TK_CHANGE 352 -#define TK_COMMA 353 -#define TK_CONCAT 354 -#define TK_CONFLICT 355 -#define TK_COPY 356 -#define TK_DEFERRED 357 -#define TK_DELIMITERS 358 -#define TK_DETACH 359 -#define TK_DIVIDE 360 -#define TK_DOT 361 -#define TK_EACH 362 -#define TK_FAIL 363 -#define TK_GLOB 364 -#define TK_ID 365 -#define TK_IMMEDIATE 366 -#define TK_IMPORT 367 -#define TK_INITIALLY 368 -#define TK_INSTEAD 369 -#define TK_ISNULL 370 -#define TK_MODULES 371 -#define TK_NK_BITNOT 372 -#define TK_NK_SEMI 373 -#define TK_NOTNULL 374 -#define TK_OF 375 -#define TK_PLUS 376 -#define TK_PRIVILEGE 377 -#define TK_RAISE 378 -#define TK_RESTRICT 379 -#define TK_ROW 380 -#define TK_STAR 381 -#define TK_STATEMENT 382 -#define TK_STRICT 383 -#define TK_STRING 384 -#define TK_TIMES 385 -#define TK_VALUES 386 -#define TK_VARIABLE 387 -#define TK_WAL 388 +#include "ttokenauto.h" #define TK_NK_SPACE 600 #define TK_NK_COMMENT 601 diff --git a/include/common/ttypes.h b/include/common/ttypes.h index 3934553b1c0..c50ce7ba73a 100644 --- a/include/common/ttypes.h +++ b/include/common/ttypes.h @@ -238,12 +238,26 @@ typedef struct { case TSDB_DATA_TYPE_UBIGINT: \ snprintf(_output, (int32_t)(_outputBytes), "%" PRIu64, *(uint64_t *)(_input)); \ break; \ - case TSDB_DATA_TYPE_FLOAT: \ - snprintf(_output, (int32_t)(_outputBytes), "%f", *(float *)(_input)); \ + case TSDB_DATA_TYPE_FLOAT: { \ + int32_t n = snprintf(_output, (int32_t)(_outputBytes), "%f", *(float *)(_input)); \ + if (n >= (_outputBytes)) { \ + n = snprintf(_output, (int32_t)(_outputBytes), "%.7e", *(float *)(_input)); \ + if (n >= (_outputBytes)) { \ + snprintf(_output, (int32_t)(_outputBytes), "%f", *(float *)(_input)); \ + } \ + } \ break; \ - case TSDB_DATA_TYPE_DOUBLE: \ - snprintf(_output, (int32_t)(_outputBytes), "%f", *(double *)(_input)); \ + } \ + case TSDB_DATA_TYPE_DOUBLE: { \ + int32_t n = snprintf(_output, (int32_t)(_outputBytes), "%f", *(double *)(_input)); \ + if (n >= (_outputBytes)) { \ + snprintf(_output, (int32_t)(_outputBytes), "%.15e", *(double *)(_input)); \ + if (n >= (_outputBytes)) { \ + snprintf(_output, (int32_t)(_outputBytes), "%f", *(double *)(_input)); \ + } \ + } \ break; \ + } \ case TSDB_DATA_TYPE_UINT: \ snprintf(_output, (int32_t)(_outputBytes), "%u", *(uint32_t *)(_input)); \ break; \ @@ -284,6 +298,7 @@ typedef struct { #define IS_VALID_UINT64(_t) ((_t) >= 0 && (_t) <= UINT64_MAX) #define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX) #define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX) +#define IS_INVALID_TYPE(_t) ((_t) < TSDB_DATA_TYPE_NULL || (_t) >= TSDB_DATA_TYPE_MAX) #define IS_CONVERT_AS_SIGNED(_t) \ (IS_SIGNED_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP)) diff --git a/include/libs/audit/audit.h b/include/libs/audit/audit.h index 2e786ab2b3b..f5710256e99 100644 --- a/include/libs/audit/audit.h +++ b/include/libs/audit/audit.h @@ -29,7 +29,6 @@ extern "C" { #endif #define AUDIT_DETAIL_MAX 65472 -#define AUDIT_OPERATION_LEN 20 typedef struct { const char *server; diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index bc29ef8284c..b6f32b76bd4 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -151,8 +151,9 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, * @param tversion * @return */ -int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, - int32_t* tversion, int32_t idx, bool* tbGet); +int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, int32_t dbNameBuffLen, char* tableName, + int32_t tbaleNameBuffLen, int32_t* sversion, int32_t* tversion, int32_t idx, + bool* tbGet); /** * The main task execution function, including query on both table and multiple tables, @@ -210,7 +211,7 @@ SMqBatchMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo); const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo); -const char* qExtractTbnameFromTask(qTaskInfo_t tinfo); +const char* qExtractTbnameFromTask(qTaskInfo_t tinfo); void* qExtractReaderFromStreamScanner(void* scanner); @@ -222,8 +223,8 @@ int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRan int32_t qStreamRecoverFinish(qTaskInfo_t tinfo); bool qStreamScanhistoryFinished(qTaskInfo_t tinfo); int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo); -void resetTaskInfo(qTaskInfo_t tinfo); - +void qResetTaskInfoCode(qTaskInfo_t tinfo); +int32_t qGetStreamIntervalExecInfo(qTaskInfo_t tinfo, int64_t* pWaterMark, SInterval* pInterval, STimeWindow* pLastWindow); int32_t qStreamOperatorReleaseState(qTaskInfo_t tInfo); int32_t qStreamOperatorReloadState(qTaskInfo_t tInfo); diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h index 8e88a1a278b..db0d6339c81 100644 --- a/include/libs/executor/storageapi.h +++ b/include/libs/executor/storageapi.h @@ -39,8 +39,10 @@ extern "C" { #define META_READER_LOCK 0x0 #define META_READER_NOLOCK 0x1 -#define STREAM_STATE_BUFF_HASH 1 -#define STREAM_STATE_BUFF_SORT 2 +#define STREAM_STATE_BUFF_HASH 1 +#define STREAM_STATE_BUFF_SORT 2 +#define STREAM_STATE_BUFF_HASH_SORT 3 +#define STREAM_STATE_BUFF_HASH_SEARCH 4 typedef struct SMeta SMeta; typedef TSKEY (*GetTsFun)(void*); @@ -325,6 +327,9 @@ typedef struct { int64_t number; void* pStreamFileState; int32_t buffIndex; + int32_t hashIter; + void* pHashData; + int64_t minGpId; } SStreamStateCur; typedef struct SStateStore { @@ -337,6 +342,8 @@ typedef struct SStateStore { void (*streamStateReleaseBuf)(SStreamState* pState, void* pVal, bool used); void (*streamStateClearBuff)(SStreamState* pState, void* pVal); void (*streamStateFreeVal)(void* val); + int32_t (*streamStateGetPrev)(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, + int32_t* pVLen, int32_t* pWinCode); int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); int32_t (*streamStateGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode); @@ -349,8 +356,15 @@ typedef struct SStateStore { int32_t (*streamStateGetInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen); int32_t (*streamStateFillPut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); - int32_t (*streamStateFillGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); + int32_t (*streamStateFillGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, + int32_t* pWinCode); + int32_t (*streamStateFillAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, + int32_t* pWinCode); void (*streamStateFillDel)(SStreamState* pState, const SWinKey* key); + int32_t (*streamStateFillGetNext)(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, + int32_t* pVLen, int32_t* pWinCode); + int32_t (*streamStateFillGetPrev)(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, + int32_t* pVLen, int32_t* pWinCode); void (*streamStateCurNext)(SStreamState* pState, SStreamStateCur* pCur); void (*streamStateCurPrev)(SStreamState* pState, SStreamStateCur* pCur); @@ -361,9 +375,12 @@ typedef struct SStateStore { SStreamStateCur* (*streamStateFillSeekKeyPrev)(SStreamState* pState, const SWinKey* key); void (*streamStateFreeCur)(SStreamStateCur* pCur); - int32_t (*streamStateGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); + int32_t (*streamStateFillGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); int32_t (*streamStateGetKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); + void (*streamStateSetFillInfo)(SStreamState* pState); + void (*streamStateClearExpiredState)(SStreamState* pState); + int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen, int32_t* pWinCode); int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen); @@ -400,8 +417,8 @@ typedef struct SStateStore { SUpdateInfo** ppInfo); void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo); void (*updateInfoDestoryColseWinSBF)(SUpdateInfo* pInfo); - int32_t (*updateInfoSerialize)(void* buf, int32_t bufLen, const SUpdateInfo* pInfo, int32_t* pLen); - int32_t (*updateInfoDeserialize)(void* buf, int32_t bufLen, SUpdateInfo* pInfo); + int32_t (*updateInfoSerialize)(SEncoder* pEncoder, const SUpdateInfo* pInfo); + int32_t (*updateInfoDeserialize)(SDecoder* pDeCoder, SUpdateInfo* pInfo); SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key); SStreamStateCur* (*streamStateCountSeekKeyPrev)(SStreamState* pState, const SSessionKey* pKey, COUNT_TYPE count); @@ -411,6 +428,11 @@ typedef struct SStateStore { int32_t (*streamFileStateInit)(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark, const char* id, int64_t ckId, int8_t type, struct SStreamFileState** ppFileState); + + int32_t (*streamStateGroupPut)(SStreamState* pState, int64_t groupId, void* value, int32_t vLen); + SStreamStateCur* (*streamStateGroupGetCur)(SStreamState* pState); + void (*streamStateGroupCurNext)(SStreamStateCur* pCur); + int32_t (*streamStateGroupGetKVByCur)(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen); void (*streamFileStateDestroy)(struct SStreamFileState* pFileState); void (*streamFileStateClear)(struct SStreamFileState* pFileState); diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index e5bacf85b29..f71c2210bef 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -26,6 +26,8 @@ extern "C" { #define FUNC_AGGREGATE_UDF_ID 5001 #define FUNC_SCALAR_UDF_ID 5002 +extern const int32_t funcMgtBuiltinsNum; + typedef enum EFunctionType { // aggregate function FUNCTION_TYPE_APERCENTILE = 1, @@ -290,6 +292,7 @@ bool fmIsElapsedFunc(int32_t funcId); void getLastCacheDataType(SDataType* pType, int32_t pkBytes); int32_t createFunction(const char* pName, SNodeList* pParameterList, SFunctionNode** pFunc); +int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFunc, SNodeList* pParameterList, SFunctionNode** pFunc); int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMidFunc, SFunctionNode** pMergeFunc); diff --git a/include/libs/monitor/clientMonitor.h b/include/libs/monitor/clientMonitor.h index 0085173ecd7..b09a1ac11ce 100644 --- a/include/libs/monitor/clientMonitor.h +++ b/include/libs/monitor/clientMonitor.h @@ -24,6 +24,7 @@ extern "C" { #include "thash.h" #include "query.h" #include "tqueue.h" +#include "clientInt.h" typedef enum { SQL_RESULT_SUCCESS = 0, @@ -81,6 +82,8 @@ void monitorCreateClientCounter(int64_t clusterId, const char* name, void monitorCounterInc(int64_t clusterId, const char* counterName, const char** label_values); const char* monitorResultStr(SQL_RESULT_CODE code); int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data); + +void clientOperateReport(SRequestObj* pRequest); #ifdef __cplusplus } #endif diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index bbf28892898..514eddbc249 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -72,6 +72,7 @@ typedef struct SDatabaseOptions { int8_t compressionLevel; int8_t encryptAlgorithm; int32_t daysPerFile; + char dnodeListStr[TSDB_DNODE_LIST_LEN]; char encryptAlgorithmStr[TSDB_ENCRYPT_ALGO_STR_LEN]; SValueNode* pDaysPerFile; int32_t fsyncPeriod; diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 8e4a3ea32b0..cfd9c1a4226 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -194,14 +194,26 @@ typedef struct SIndefRowsFuncLogicNode { bool isTimeLineFunc; } SIndefRowsFuncLogicNode; +typedef struct SStreamNodeOption { + int8_t triggerType; + int64_t watermark; + int64_t deleteMark; + int8_t igExpired; + int8_t igCheckUpdate; + int8_t destHasPrimaryKey; +} SStreamNodeOption; + typedef struct SInterpFuncLogicNode { - SLogicNode node; - SNodeList* pFuncs; - STimeWindow timeRange; - int64_t interval; - EFillMode fillMode; - SNode* pFillValues; // SNodeListNode - SNode* pTimeSeries; // SColumnNode + SLogicNode node; + SNodeList* pFuncs; + STimeWindow timeRange; + int64_t interval; + int8_t intervalUnit; + int8_t precision; + EFillMode fillMode; + SNode* pFillValues; // SNodeListNode + SNode* pTimeSeries; // SColumnNode + SStreamNodeOption streamNodeOption; } SInterpFuncLogicNode; typedef struct SForecastFuncLogicNode { @@ -333,6 +345,7 @@ typedef struct SFillLogicNode { SNode* pWStartTs; SNode* pValues; // SNodeListNode STimeWindow timeRange; + SNodeList* pFillNullExprs; } SFillLogicNode; typedef struct SSortLogicNode { @@ -504,17 +517,21 @@ typedef struct SIndefRowsFuncPhysiNode { } SIndefRowsFuncPhysiNode; typedef struct SInterpFuncPhysiNode { - SPhysiNode node; - SNodeList* pExprs; - SNodeList* pFuncs; - STimeWindow timeRange; - int64_t interval; - int8_t intervalUnit; - EFillMode fillMode; - SNode* pFillValues; // SNodeListNode - SNode* pTimeSeries; // SColumnNode + SPhysiNode node; + SNodeList* pExprs; + SNodeList* pFuncs; + STimeWindow timeRange; + int64_t interval; + int8_t intervalUnit; + int8_t precision; + EFillMode fillMode; + SNode* pFillValues; // SNodeListNode + SNode* pTimeSeries; // SColumnNode + SStreamNodeOption streamNodeOption; } SInterpFuncPhysiNode; +typedef SInterpFuncPhysiNode SStreamInterpFuncPhysiNode; + typedef struct SForecastFuncPhysiNode { SPhysiNode node; SNodeList* pExprs; @@ -649,7 +666,7 @@ typedef struct SWindowPhysiNode { int64_t watermark; int64_t deleteMark; int8_t igExpired; - int8_t destHasPrimayKey; + int8_t destHasPrimaryKey; bool mergeDataBlock; } SWindowPhysiNode; @@ -677,6 +694,7 @@ typedef struct SFillPhysiNode { SNode* pWStartTs; // SColumnNode SNode* pValues; // SNodeListNode STimeWindow timeRange; + SNodeList* pFillNullExprs; } SFillPhysiNode; typedef SFillPhysiNode SStreamFillPhysiNode; @@ -786,9 +804,9 @@ typedef struct SDataDeleterNode { char tableFName[TSDB_TABLE_NAME_LEN]; char tsColName[TSDB_COL_NAME_LEN]; STimeWindow deleteTimeRange; - SNode* pAffectedRows; - SNode* pStartTs; - SNode* pEndTs; + SNode* pAffectedRows; // usless + SNode* pStartTs; // usless + SNode* pEndTs; // usless } SDataDeleterNode; typedef struct SSubplan { diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 4763077ed91..763882ab3a9 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -457,6 +457,7 @@ typedef struct SSelectStmt { bool hasCountFunc; bool hasUdaf; bool hasStateKey; + bool hasTwaOrElapsedFunc; bool onlyHasKeepOrderFunc; bool groupSort; bool tagScan; diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index 7271da8ff69..0fb6261ac83 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -65,7 +65,7 @@ typedef struct SParseCsvCxt { const char* pLastSqlPos; // the location of the last parsed sql } SParseCsvCxt; -typedef void(*setQueryFn)(int64_t); +typedef void (*setQueryFn)(int64_t); typedef struct SParseContext { uint64_t requestId; @@ -147,6 +147,7 @@ int32_t qBindStmtColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, c int32_t qBindStmtSingleColValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen, int32_t colIdx, int32_t rowNum); int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields); +int32_t qBuildStmtStbColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_STB** fields); int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields); int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen); @@ -176,8 +177,8 @@ int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsS STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, int32_t msgBufLen); int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash); -int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD* fields, - int numFields, bool needChangeLength, char* errstr, int32_t errstrLen); +int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, void* fields, + int numFields, bool needChangeLength, char* errstr, int32_t errstrLen, bool raw); int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray); int32_t serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap, SArray** pOut); diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index 81a39524631..d2f714f4008 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -364,7 +364,7 @@ void* getTaskPoolWorkerCb(); #define NEED_CLIENT_REFRESH_VG_ERROR(_code) \ ((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID) #define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) \ - ((_code) == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER || (_code) == TSDB_CODE_MND_INVALID_SCHEMA_VER) + ((_code) == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER || (_code) == TSDB_CODE_MND_INVALID_SCHEMA_VER || (_code) == TSDB_CODE_SCH_DATA_SRC_EP_MISS) #define NEED_CLIENT_HANDLE_ERROR(_code) \ (NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || \ NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code)) diff --git a/include/libs/scalar/scalar.h b/include/libs/scalar/scalar.h index fd936dd087d..4b89a6a439f 100644 --- a/include/libs/scalar/scalar.h +++ b/include/libs/scalar/scalar.h @@ -105,6 +105,7 @@ int32_t timeTruncateFunction(SScalarParam *pInput, int32_t inputNum, SScalarPara int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); +int32_t timeZoneStrLen(); int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t weekdayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t dayofweekFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); diff --git a/include/libs/scheduler/scheduler.h b/include/libs/scheduler/scheduler.h index b98170f1685..af8deff1a02 100644 --- a/include/libs/scheduler/scheduler.h +++ b/include/libs/scheduler/scheduler.h @@ -76,8 +76,6 @@ int32_t schedulerExecJob(SSchedulerReq* pReq, int64_t* pJob); int32_t schedulerFetchRows(int64_t jobId, SSchedulerReq* pReq); -void schedulerFetchRowsA(int64_t job, schedulerFetchFp fp, void* param); - int32_t schedulerGetTasksStatus(int64_t job, SArray* pSub); void schedulerStopQueryHb(void* pTrans); @@ -100,6 +98,8 @@ void schedulerFreeJob(int64_t* job, int32_t errCode); void schedulerDestroy(void); +int32_t schedulerValidatePlan(SQueryPlan* pPlan); + #ifdef __cplusplus } #endif diff --git a/include/libs/stream/streamState.h b/include/libs/stream/streamState.h index f9469a449d2..a50451c3ebe 100644 --- a/include/libs/stream/streamState.h +++ b/include/libs/stream/streamState.h @@ -49,6 +49,8 @@ void streamStateClear(SStreamState* pState); void streamStateSetNumber(SStreamState* pState, int32_t number, int32_t tsIdex); void streamStateSaveInfo(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen); int32_t streamStateGetInfo(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen); +int32_t streamStateGetPrev(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen, + int32_t* pWinCode); // session window int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen, @@ -75,8 +77,14 @@ int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, ch // fill int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); -int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); +int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode); +int32_t streamStateFillAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, + int32_t* pWinCode); void streamStateFillDel(SStreamState* pState, const SWinKey* key); +int32_t streamStateFillGetNext(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen, + int32_t* pWinCode); +int32_t streamStateFillGetPrev(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen, + int32_t* pWinCode); int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode); @@ -96,15 +104,25 @@ SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey* void streamStateFreeCur(SStreamStateCur* pCur); void streamStateResetCur(SStreamStateCur* pCur); -int32_t streamStateGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); +int32_t streamStateFillGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); +// twa +void streamStateSetFillInfo(SStreamState* pState); +void streamStateClearExpiredState(SStreamState* pState); + void streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur); void streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur); int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname); int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal, bool onlyCache, int32_t* pWinCode); +// group id +int32_t streamStateGroupPut(SStreamState* pState, int64_t groupId, void* value, int32_t vLen); +SStreamStateCur* streamStateGroupGetCur(SStreamState* pState); +void streamStateGroupCurNext(SStreamStateCur* pCur); +int32_t streamStateGroupGetKVByCur(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen); + void streamStateReloadInfo(SStreamState* pState, TSKEY ts); void streamStateCopyBackend(SStreamState* src, SStreamState* dst); diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index a189cee0bbb..de10d6844e0 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -70,7 +70,8 @@ typedef struct SActiveCheckpointInfo SActiveCheckpointInfo; #define SSTREAM_TASK_NEED_CONVERT_VER 2 #define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 -extern int32_t streamMetaId; +extern int32_t streamMetaRefPool; +extern int32_t streamTaskRefPool; enum { STREAM_STATUS__NORMAL = 0, @@ -113,7 +114,7 @@ enum { enum { TASK_TRIGGER_STATUS__INACTIVE = 1, - TASK_TRIGGER_STATUS__ACTIVE, + TASK_TRIGGER_STATUS__MAY_ACTIVE, }; typedef enum { @@ -258,6 +259,7 @@ typedef struct STaskId { typedef struct SStreamTaskId { int64_t streamId; int32_t taskId; + int64_t refId; const char* idStr; } SStreamTaskId; @@ -291,12 +293,12 @@ typedef struct SStreamStatus { int8_t schedStatus; int8_t statusBackup; int32_t schedIdleTime; // idle time before invoke again - int32_t timerActive; // timer is active int64_t lastExecTs; // last exec time stamp int32_t inScanHistorySentinel; - bool appendTranstateBlock; // has append the transfer state data block already + bool appendTranstateBlock; // has appended the transfer state data block already bool removeBackendFiles; // remove backend files on disk when free stream tasks SConsenChkptInfo consenChkptInfo; + STimeWindow latestForceWindow; // latest generated time window, only valid in } SStreamStatus; typedef struct SDataRange { @@ -305,14 +307,16 @@ typedef struct SDataRange { } SDataRange; typedef struct SSTaskBasicInfo { - int32_t nodeId; // vgroup id or snode id - SEpSet epSet; - SEpSet mnodeEpset; // mnode epset for send heartbeat - int32_t selfChildId; - int32_t totalLevel; - int8_t taskLevel; - int8_t fillHistory; // is fill history task or not - int64_t delaySchedParam; // in msec + int32_t nodeId; // vgroup id or snode id + SEpSet epSet; + SEpSet mnodeEpset; // mnode epset for send heartbeat + int32_t selfChildId; + int32_t trigger; + int8_t taskLevel; + int8_t fillHistory; // is fill history task or not + int64_t delaySchedParam; // in msec + int64_t watermark; // extracted from operators + SInterval interval; } SSTaskBasicInfo; typedef struct SStreamRetrieveReq SStreamRetrieveReq; @@ -454,7 +458,6 @@ struct SStreamTask { // the followings attributes don't be serialized SScanhistorySchedInfo schedHistoryInfo; - int32_t refCnt; int32_t transferStateAlignCnt; struct SStreamMeta* pMeta; SSHashObj* pNameMap; @@ -544,9 +547,10 @@ typedef struct STaskUpdateEntry { typedef int32_t (*__state_trans_user_fn)(SStreamTask*, void* param); -int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam, - SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** pTask); -void tFreeStreamTask(SStreamTask* pTask); +int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int32_t trigger, + int64_t triggerParam, SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, + SStreamTask** pTask); +void tFreeStreamTask(void* pTask); int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask); int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask); int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver); @@ -664,6 +668,8 @@ void streamTaskResetStatus(SStreamTask* pTask); void streamTaskSetStatusReady(SStreamTask* pTask); ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask); const char* streamTaskGetExecType(int32_t type); +int32_t streamTaskAllocRefId(SStreamTask* pTask, int64_t** pRefId); +void streamTaskFreeRefId(int64_t* pRefId); bool streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList); void streamTaskResetUpstreamStageInfo(SStreamTask* pTask); @@ -752,16 +758,15 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); +int32_t streamMetaAcquireTaskUnsafe(SStreamMeta* pMeta, STaskId* pId, SStreamTask** pTask); int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); -int32_t streamMetaAcquireOneTask(SStreamTask* pTask); void streamMetaClear(SStreamMeta* pMeta); void streamMetaInitBackend(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta); int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta); void streamMetaNotifyClose(SStreamMeta* pMeta); void streamMetaStartHb(SStreamMeta* pMeta); -bool streamMetaTaskInTimer(SStreamMeta* pMeta); int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, int64_t endTs, bool ready); int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo); diff --git a/include/libs/stream/tstreamFileState.h b/include/libs/stream/tstreamFileState.h index a265ae7e600..4a696d97980 100644 --- a/include/libs/stream/tstreamFileState.h +++ b/include/libs/stream/tstreamFileState.h @@ -16,8 +16,6 @@ #ifndef _STREAM_FILE_STATE_H_ #define _STREAM_FILE_STATE_H_ -#include "os.h" - #include "storageapi.h" #include "tarray.h" #include "tdef.h" @@ -37,7 +35,7 @@ typedef void (*_state_buff_cleanup_fn)(void* pRowBuff); typedef void* (*_state_buff_create_statekey_fn)(SRowBuffPos* pPos, int64_t num); typedef int32_t (*_state_file_remove_fn)(SStreamFileState* pFileState, const void* pKey); -typedef int32_t (*_state_file_get_fn)(SStreamFileState* pFileState, void* pKey, void* data, int32_t* pDataLen); +typedef int32_t (*_state_file_get_fn)(SStreamFileState* pFileState, void* pKey, void** data, int32_t* pDataLen); typedef int32_t (*_state_file_clear_fn)(SStreamState* pState); typedef int32_t (*_state_fun_get_fn)(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, @@ -45,6 +43,8 @@ typedef int32_t (*_state_fun_get_fn)(SStreamFileState* pFileState, void* pKey, i typedef int32_t (*range_cmpr_fn)(const SSessionKey* pWin1, const SSessionKey* pWin2); +typedef int (*__session_compare_fn_t)(const void* pWin, const void* pDatas, int pos); + int32_t streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark, const char* taskId, int64_t checkpointId, int8_t type, struct SStreamFileState** ppFileState); @@ -54,6 +54,8 @@ bool needClearDiskBuff(SStreamFileState* pFileState); void streamFileStateReleaseBuff(SStreamFileState* pFileState, SRowBuffPos* pPos, bool used); void streamFileStateClearBuff(SStreamFileState* pFileState, SRowBuffPos* pPos); +int32_t addRowBuffIfNotExist(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen, + int32_t* pWinCode); int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen, int32_t* pWinCode); void deleteRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t keyLen); @@ -71,9 +73,11 @@ int32_t streamFileStateGetSelectRowSize(SStreamFileState* pFileState); void streamFileStateReloadInfo(SStreamFileState* pFileState, TSKEY ts); void* getRowStateBuff(SStreamFileState* pFileState); +void* getSearchBuff(SStreamFileState* pFileState); void* getStateFileStore(SStreamFileState* pFileState); bool isDeteled(SStreamFileState* pFileState, TSKEY ts); bool isFlushedState(SStreamFileState* pFileState, TSKEY ts, TSKEY gap); +TSKEY getFlushMark(SStreamFileState* pFileState); SRowBuffPos* getNewRowPosForWrite(SStreamFileState* pFileState); int32_t getRowStateRowSize(SStreamFileState* pFileState); @@ -94,6 +98,7 @@ int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId); void sessionWinStateClear(SStreamFileState* pFileState); void sessionWinStateCleanup(void* pBuff); +SStreamStateCur* createStateCursor(SStreamFileState* pFileState); SStreamStateCur* sessionWinStateSeekKeyCurrentPrev(SStreamFileState* pFileState, const SSessionKey* pWinKey); SStreamStateCur* sessionWinStateSeekKeyCurrentNext(SStreamFileState* pFileState, const SSessionKey* pWinKey); SStreamStateCur* sessionWinStateSeekKeyNext(SStreamFileState* pFileState, const SSessionKey* pWinKey); @@ -103,6 +108,8 @@ void sessionWinStateMoveToNext(SStreamStateCur* pCur); int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey, range_cmpr_fn cmpFn); +int32_t binarySearch(void* keyList, int num, const void* key, __session_compare_fn_t cmpFn); + // state window int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, char* pKeyData, int32_t keyDataLen, state_key_cmpr_fn fn, void** pVal, int32_t* pVLen, int32_t* pWinCode); @@ -117,6 +124,34 @@ int32_t getSessionRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyL int32_t* pWinCode); int32_t getFunctionRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen); +// time slice +int32_t getHashSortRowBuff(SStreamFileState* pFileState, const SWinKey* pKey, void** pVal, int32_t* pVLen, + int32_t* pWinCode); +int32_t hashSortFileGetFn(SStreamFileState* pFileState, void* pKey, void** data, int32_t* pDataLen); +int32_t hashSortFileRemoveFn(SStreamFileState* pFileState, const void* pKey); +void clearSearchBuff(SStreamFileState* pFileState); +int32_t getHashSortNextRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, + int32_t* pVLen, int32_t* pWinCode); +int32_t getHashSortPrevRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** ppVal, + int32_t* pVLen, int32_t* pWinCode); +int32_t recoverFillSnapshot(SStreamFileState* pFileState, int64_t ckId); +void deleteHashSortRowBuff(SStreamFileState* pFileState, const SWinKey* pKey); + +//group +int32_t streamFileStateGroupPut(SStreamFileState* pFileState, int64_t groupId, void* value, int32_t vLen); +void streamFileStateGroupCurNext(SStreamStateCur* pCur); +int32_t streamFileStateGroupGetKVByCur(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen); +SSHashObj* getGroupIdCache(SStreamFileState* pFileState); +int fillStateKeyCompare(const void* pWin1, const void* pDatas, int pos); +int32_t getRowStatePrevRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** ppVal, + int32_t* pVLen, int32_t* pWinCode); +int32_t addSearchItem(SStreamFileState* pFileState, SArray* pWinStates, const SWinKey* pKey); + +//twa +void setFillInfo(SStreamFileState* pFileState); +void clearExpiredState(SStreamFileState* pFileState); +int32_t addArrayBuffIfNotExist(SSHashObj* pSearchBuff, uint64_t groupId, SArray** ppResStates); + #ifdef __cplusplus } #endif diff --git a/include/libs/stream/tstreamUpdate.h b/include/libs/stream/tstreamUpdate.h index 06465e79e58..32712736c2f 100644 --- a/include/libs/stream/tstreamUpdate.h +++ b/include/libs/stream/tstreamUpdate.h @@ -36,8 +36,8 @@ bool updateInfoIsTableInserted(SUpdateInfo* pInfo, int64_t tbUid); void updateInfoDestroy(SUpdateInfo* pInfo); void updateInfoAddCloseWindowSBF(SUpdateInfo* pInfo); void updateInfoDestoryColseWinSBF(SUpdateInfo* pInfo); -int32_t updateInfoSerialize(void* buf, int32_t bufLen, const SUpdateInfo* pInfo, int32_t* pLen); -int32_t updateInfoDeserialize(void* buf, int32_t bufLen, SUpdateInfo* pInfo); +int32_t updateInfoSerialize(SEncoder* pEncoder, const SUpdateInfo* pInfo); +int32_t updateInfoDeserialize(SDecoder* pDeCoder, SUpdateInfo* pInfo); void windowSBfDelete(SUpdateInfo* pInfo, uint64_t count); int32_t windowSBfAdd(SUpdateInfo* pInfo, uint64_t count); bool isIncrementalTimeStamp(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts, void* pPkVal, int32_t len); diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 4af3ca58e13..c8131c9c4c6 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -210,6 +210,7 @@ int32_t taosGetErrSize(); #define TSDB_CODE_TSC_COMPRESS_PARAM_ERROR TAOS_DEF_ERROR_CODE(0, 0X0233) #define TSDB_CODE_TSC_COMPRESS_LEVEL_ERROR TAOS_DEF_ERROR_CODE(0, 0X0234) #define TSDB_CODE_TSC_FAIL_GENERATE_JSON TAOS_DEF_ERROR_CODE(0, 0X0235) +#define TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR TAOS_DEF_ERROR_CODE(0, 0X0236) #define TSDB_CODE_TSC_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0X02FF) // mnode-common @@ -354,6 +355,8 @@ int32_t taosGetErrSize(); #define TSDB_CODE_MND_INVALID_SYS_TABLENAME TAOS_DEF_ERROR_CODE(0, 0x039A) #define TSDB_CODE_MND_ENCRYPT_NOT_ALLOW_CHANGE TAOS_DEF_ERROR_CODE(0, 0x039B) #define TSDB_CODE_MND_INVALID_WAL_LEVEL TAOS_DEF_ERROR_CODE(0, 0x039C) +#define TSDB_CODE_MND_INVALID_DNODE_LIST_FMT TAOS_DEF_ERROR_CODE(0, 0x039D) +#define TSDB_CODE_MND_DNODE_LIST_REPEAT TAOS_DEF_ERROR_CODE(0, 0x039E) // mnode-node #define TSDB_CODE_MND_MNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03A0) @@ -776,6 +779,7 @@ int32_t taosGetErrSize(); #define TSDB_CODE_SCH_TIMEOUT_ERROR TAOS_DEF_ERROR_CODE(0, 0x2504) #define TSDB_CODE_SCH_JOB_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x2505) #define TSDB_CODE_SCH_JOB_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x2506) +#define TSDB_CODE_SCH_DATA_SRC_EP_MISS TAOS_DEF_ERROR_CODE(0, 0x2507) //parser #define TSDB_CODE_PAR_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x2600) @@ -922,6 +926,11 @@ int32_t taosGetErrSize(); #define TSDB_CODE_FUNC_INVALID_RES_LENGTH TAOS_DEF_ERROR_CODE(0, 0x280E) #define TSDB_CODE_FUNC_HISTOGRAM_ERROR TAOS_DEF_ERROR_CODE(0, 0x280F) #define TSDB_CODE_FUNC_PERCENTILE_ERROR TAOS_DEF_ERROR_CODE(0, 0x2810) +#define TSDB_CODE_FUNC_FUNTION_PARA_RANGE TAOS_DEF_ERROR_CODE(0, 0x2811) +#define TSDB_CODE_FUNC_FUNTION_PARA_PRIMTS TAOS_DEF_ERROR_CODE(0, 0x2812) +#define TSDB_CODE_FUNC_FUNTION_PARA_PK TAOS_DEF_ERROR_CODE(0, 0x2813) +#define TSDB_CODE_FUNC_FUNTION_PARA_HAS_COL TAOS_DEF_ERROR_CODE(0, 0x2814) +#define TSDB_CODE_FUNC_FUNCTION_HISTO_TYPE TAOS_DEF_ERROR_CODE(0, 0x2815) //udf diff --git a/include/util/tcompression.h b/include/util/tcompression.h index 1f09b750cbe..140b7fe392a 100644 --- a/include/util/tcompression.h +++ b/include/util/tcompression.h @@ -152,15 +152,12 @@ int32_t tsDecompressBigint(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int // for internal usage int32_t getWordLength(char type); -#ifdef __AVX2__ int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, char *const output, const char type); int32_t tsDecompressFloatImpAvx2(const char *input, int32_t nelements, char *output); int32_t tsDecompressDoubleImpAvx2(const char *input, int32_t nelements, char *output); -#endif -#ifdef __AVX512VL__ -void tsDecompressTimestampAvx2(const char *input, int32_t nelements, char *output, bool bigEndian); -void tsDecompressTimestampAvx512(const char *const input, const int32_t nelements, char *const output, bool bigEndian); -#endif +int32_t tsDecompressTimestampAvx2(const char *input, int32_t nelements, char *output, bool bigEndian); +int32_t tsDecompressTimestampAvx512(const char *const input, const int32_t nelements, char *const output, + bool bigEndian); /************************************************************************* * REGULAR COMPRESSION 2 diff --git a/include/util/tdef.h b/include/util/tdef.h index 486cb19200e..64e0a6a9ebd 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -41,6 +41,7 @@ extern const int32_t TYPE_BYTES[21]; #define FLOAT_BYTES sizeof(float) #define DOUBLE_BYTES sizeof(double) #define POINTER_BYTES sizeof(void *) +#define M256_BYTES 32 #define TSDB_KEYSIZE sizeof(TSKEY) #define TSDB_NCHAR_SIZE sizeof(TdUcs4) @@ -188,6 +189,47 @@ typedef enum EOperatorType { OP_TYPE_ASSIGN = 200 } EOperatorType; +static const EOperatorType OPERATOR_ARRAY[] = { + OP_TYPE_ADD, + OP_TYPE_SUB, + OP_TYPE_MULTI, + OP_TYPE_DIV, + OP_TYPE_REM, + + OP_TYPE_MINUS, + + OP_TYPE_BIT_AND, + OP_TYPE_BIT_OR, + + OP_TYPE_GREATER_THAN, + OP_TYPE_GREATER_EQUAL, + OP_TYPE_LOWER_THAN, + OP_TYPE_LOWER_EQUAL, + OP_TYPE_EQUAL, + OP_TYPE_NOT_EQUAL, + OP_TYPE_IN, + OP_TYPE_NOT_IN, + OP_TYPE_LIKE, + OP_TYPE_NOT_LIKE, + OP_TYPE_MATCH, + OP_TYPE_NMATCH, + + OP_TYPE_IS_NULL, + OP_TYPE_IS_NOT_NULL, + OP_TYPE_IS_TRUE, + OP_TYPE_IS_FALSE, + OP_TYPE_IS_UNKNOWN, + OP_TYPE_IS_NOT_TRUE, + OP_TYPE_IS_NOT_FALSE, + OP_TYPE_IS_NOT_UNKNOWN, + //OP_TYPE_COMPARE_MAX_VALUE, + + OP_TYPE_JSON_GET_VALUE, + OP_TYPE_JSON_CONTAINS, + + OP_TYPE_ASSIGN +}; + #define OP_TYPE_CALC_MAX OP_TYPE_BIT_OR typedef enum ELogicConditionType { @@ -411,6 +453,7 @@ typedef enum ELogicConditionType { #define TSDB_CACHE_MODEL_LAST_ROW 1 #define TSDB_CACHE_MODEL_LAST_VALUE 2 #define TSDB_CACHE_MODEL_BOTH 3 +#define TSDB_DNODE_LIST_LEN 256 #define TSDB_ENCRYPT_ALGO_STR_LEN 16 #define TSDB_ENCRYPT_ALGO_NONE_STR "none" #define TSDB_ENCRYPT_ALGO_SM4_STR "sm4" @@ -450,13 +493,13 @@ typedef enum ELogicConditionType { #define TSDB_MIN_S3_CHUNK_SIZE (128 * 1024) #define TSDB_MAX_S3_CHUNK_SIZE (1024 * 1024) -#define TSDB_DEFAULT_S3_CHUNK_SIZE (256 * 1024) +#define TSDB_DEFAULT_S3_CHUNK_SIZE (128 * 1024) #define TSDB_MIN_S3_KEEP_LOCAL (1 * 1440) // unit minute #define TSDB_MAX_S3_KEEP_LOCAL (365000 * 1440) -#define TSDB_DEFAULT_S3_KEEP_LOCAL (3650 * 1440) +#define TSDB_DEFAULT_S3_KEEP_LOCAL (365 * 1440) #define TSDB_MIN_S3_COMPACT 0 #define TSDB_MAX_S3_COMPACT 1 -#define TSDB_DEFAULT_S3_COMPACT 0 +#define TSDB_DEFAULT_S3_COMPACT 1 #define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1 #define TSDB_REP_DEF_DB_WAL_RET_PERIOD 3600 @@ -610,6 +653,8 @@ enum { RAND_ERR_MEMORY = 1, RAND_ERR_FILE = 2, RAND_ERR_NETWORK = 4 }; #define MONITOR_TAG_VALUE_LEN 300 #define MONITOR_METRIC_NAME_LEN 100 +#define AUDIT_OPERATION_LEN 20 + typedef enum { ANAL_ALGO_TYPE_ANOMALY_DETECT = 0, ANAL_ALGO_TYPE_FORECAST = 1, diff --git a/include/util/tlog.h b/include/util/tlog.h index 09ebb35e8f3..a6c87593d11 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -70,7 +70,7 @@ extern int32_t tdbDebugFlag; extern int32_t sndDebugFlag; extern int32_t simDebugFlag; -extern int32_t tqClientDebug; +extern int32_t tqClientDebugFlag; int32_t taosInitLog(const char *logName, int32_t maxFiles, bool tsc); void taosCloseLog(); diff --git a/include/util/version.h b/include/util/version.h index b241dd248b9..7b62914a332 100644 --- a/include/util/version.h +++ b/include/util/version.h @@ -20,11 +20,11 @@ extern "C" { #endif -extern char version[]; -extern char compatible_version[]; -extern char gitinfo[]; -extern char gitinfoOfInternal[]; -extern char buildinfo[]; +extern char td_version[]; +extern char td_compatible_version[]; +extern char td_gitinfo[]; +extern char td_gitinfoOfInternal[]; +extern char td_buildinfo[]; #ifdef __cplusplus } diff --git a/packaging/delete_ref_lock.py b/packaging/delete_ref_lock.py new file mode 100644 index 00000000000..cf0e4cdd058 --- /dev/null +++ b/packaging/delete_ref_lock.py @@ -0,0 +1,59 @@ +import subprocess +import re + +# 执行 git fetch 命令并捕获输出 +def git_fetch(): + result = subprocess.run(['git', 'fetch'], capture_output=True, text=True) + return result + +# 解析分支名称 +def parse_branch_name_type1(error_output): + # 使用正则表达式匹配 'is at' 前的分支名称 + match = re.search(r"error: cannot lock ref '(refs/remotes/origin/[^']+)': is at", error_output) + if match: + return match.group(1) + return None + +# 解析第二种错误中的分支名称 +def parse_branch_name_type2(error_output): + # 使用正则表达式匹配 'exists' 前的第一个引号内的分支名称 + match = re.search(r"'(refs/remotes/origin/[^']+)' exists;", error_output) + if match: + return match.group(1) + return None + +# 执行 git update-ref -d 命令 +def git_update_ref(branch_name): + if branch_name: + subprocess.run(['git', 'update-ref', '-d', f'{branch_name}'], check=True) + +# 解析错误类型并执行相应的修复操作 +def handle_error(error_output): + # 错误类型1:本地引用的提交ID与远程不一致 + if "is at" in error_output and "but expected" in error_output: + branch_name = parse_branch_name_type1(error_output) + if branch_name: + print(f"Detected error type 1, attempting to delete ref for branch: {branch_name}") + git_update_ref(branch_name) + else: + print("Error parsing branch name for type 1.") + # 错误类型2:尝试创建新的远程引用时,本地已经存在同名的引用 + elif "exists; cannot create" in error_output: + branch_name = parse_branch_name_type2(error_output) + if branch_name: + print(f"Detected error type 2, attempting to delete ref for branch: {branch_name}") + git_update_ref(branch_name) + else: + print("Error parsing branch name for type 2.") + +# 主函数 +def main(): + fetch_result = git_fetch() + if fetch_result.returncode != 0: # 如果 git fetch 命令失败 + error_output = fetch_result.stderr + handle_error(error_output) + else: + print("Git fetch successful.") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/packaging/smokeTest/assets/style.css b/packaging/smokeTest/assets/style.css new file mode 100644 index 00000000000..c89d42818c6 --- /dev/null +++ b/packaging/smokeTest/assets/style.css @@ -0,0 +1,319 @@ +body { + font-family: Helvetica, Arial, sans-serif; + font-size: 12px; + /* do not increase min-width as some may use split screens */ + min-width: 800px; + color: #999; + } + + h1 { + font-size: 24px; + color: black; + } + + h2 { + font-size: 16px; + color: black; + } + + p { + color: black; + } + + a { + color: #999; + } + + table { + border-collapse: collapse; + } + + /****************************** + * SUMMARY INFORMATION + ******************************/ + #environment td { + padding: 5px; + border: 1px solid #e6e6e6; + vertical-align: top; + } + #environment tr:nth-child(odd) { + background-color: #f6f6f6; + } + #environment ul { + margin: 0; + padding: 0 20px; + } + + /****************************** + * TEST RESULT COLORS + ******************************/ + span.passed, + .passed .col-result { + color: green; + } + + span.skipped, + span.xfailed, + span.rerun, + .skipped .col-result, + .xfailed .col-result, + .rerun .col-result { + color: orange; + } + + span.error, + span.failed, + span.xpassed, + .error .col-result, + .failed .col-result, + .xpassed .col-result { + color: red; + } + + .col-links__extra { + margin-right: 3px; + } + + /****************************** + * RESULTS TABLE + * + * 1. Table Layout + * 2. Extra + * 3. Sorting items + * + ******************************/ + /*------------------ + * 1. Table Layout + *------------------*/ + #results-table { + border: 1px solid #e6e6e6; + color: #999; + font-size: 12px; + width: 100%; + } + #results-table th, + #results-table td { + padding: 5px; + border: 1px solid #e6e6e6; + text-align: left; + } + #results-table th { + font-weight: bold; + } + + /*------------------ + * 2. Extra + *------------------*/ + .logwrapper { + max-height: 230px; + overflow-y: scroll; + background-color: #e6e6e6; + } + .logwrapper.expanded { + max-height: none; + } + .logwrapper.expanded .logexpander:after { + content: "collapse [-]"; + } + .logwrapper .logexpander { + z-index: 1; + position: sticky; + top: 10px; + width: max-content; + border: 1px solid; + border-radius: 3px; + padding: 5px 7px; + margin: 10px 0 10px calc(100% - 80px); + cursor: pointer; + background-color: #e6e6e6; + } + .logwrapper .logexpander:after { + content: "expand [+]"; + } + .logwrapper .logexpander:hover { + color: #000; + border-color: #000; + } + .logwrapper .log { + min-height: 40px; + position: relative; + top: -50px; + height: calc(100% + 50px); + border: 1px solid #e6e6e6; + color: black; + display: block; + font-family: "Courier New", Courier, monospace; + padding: 5px; + padding-right: 80px; + white-space: pre-wrap; + } + + div.media { + border: 1px solid #e6e6e6; + float: right; + height: 240px; + margin: 0 5px; + overflow: hidden; + width: 320px; + } + + .media-container { + display: grid; + grid-template-columns: 25px auto 25px; + align-items: center; + flex: 1 1; + overflow: hidden; + height: 200px; + } + + .media-container--fullscreen { + grid-template-columns: 0px auto 0px; + } + + .media-container__nav--right, + .media-container__nav--left { + text-align: center; + cursor: pointer; + } + + .media-container__viewport { + cursor: pointer; + text-align: center; + height: inherit; + } + .media-container__viewport img, + .media-container__viewport video { + object-fit: cover; + width: 100%; + max-height: 100%; + } + + .media__name, + .media__counter { + display: flex; + flex-direction: row; + justify-content: space-around; + flex: 0 0 25px; + align-items: center; + } + + .collapsible td:not(.col-links) { + cursor: pointer; + } + .collapsible td:not(.col-links):hover::after { + color: #bbb; + font-style: italic; + cursor: pointer; + } + + .col-result { + width: 130px; + } + .col-result:hover::after { + content: " (hide details)"; + } + + .col-result.collapsed:hover::after { + content: " (show details)"; + } + + #environment-header h2:hover::after { + content: " (hide details)"; + color: #bbb; + font-style: italic; + cursor: pointer; + font-size: 12px; + } + + #environment-header.collapsed h2:hover::after { + content: " (show details)"; + color: #bbb; + font-style: italic; + cursor: pointer; + font-size: 12px; + } + + /*------------------ + * 3. Sorting items + *------------------*/ + .sortable { + cursor: pointer; + } + .sortable.desc:after { + content: " "; + position: relative; + left: 5px; + bottom: -12.5px; + border: 10px solid #4caf50; + border-bottom: 0; + border-left-color: transparent; + border-right-color: transparent; + } + .sortable.asc:after { + content: " "; + position: relative; + left: 5px; + bottom: 12.5px; + border: 10px solid #4caf50; + border-top: 0; + border-left-color: transparent; + border-right-color: transparent; + } + + .hidden, .summary__reload__button.hidden { + display: none; + } + + .summary__data { + flex: 0 0 550px; + } + .summary__reload { + flex: 1 1; + display: flex; + justify-content: center; + } + .summary__reload__button { + flex: 0 0 300px; + display: flex; + color: white; + font-weight: bold; + background-color: #4caf50; + text-align: center; + justify-content: center; + align-items: center; + border-radius: 3px; + cursor: pointer; + } + .summary__reload__button:hover { + background-color: #46a049; + } + .summary__spacer { + flex: 0 0 550px; + } + + .controls { + display: flex; + justify-content: space-between; + } + + .filters, + .collapse { + display: flex; + align-items: center; + } + .filters button, + .collapse button { + color: #999; + border: none; + background: none; + cursor: pointer; + text-decoration: underline; + } + .filters button:hover, + .collapse button:hover { + color: #ccc; + } + + .filter__label { + margin-right: 10px; + } \ No newline at end of file diff --git a/packaging/smokeTest/conftest.py b/packaging/smokeTest/conftest.py new file mode 100644 index 00000000000..a5f6ebbbe96 --- /dev/null +++ b/packaging/smokeTest/conftest.py @@ -0,0 +1,115 @@ +# conftest.py +import pytest + + +def pytest_addoption(parser): + parser.addoption( + "--verMode", default="enterprise", help="community or enterprise" + ) + parser.addoption( + "--tVersion", default="3.3.2.6", help="the version of taos" + ) + parser.addoption( + "--baseVersion", default="smoking", help="the path of nas" + ) + parser.addoption( + "--sourcePath", default="nas", help="only support nas currently" + ) + + + + +# Collect the setup and teardown of each test case and their std information +setup_stdout_info = {} +teardown_stdout_info = {} + + +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item, call): + outcome = yield + rep = outcome.get_result() + + # Record the std of setup and teardown + if call.when == 'setup': + for i in rep.sections: + if i[0] == "Captured stdout setup": + if not setup_stdout_info: + setup_stdout_info[item.nodeid] = i[1] + elif call.when == 'teardown': + for i in rep.sections: + if i[0] == "Captured stdout teardown": + teardown_stdout_info[item.nodeid] = i[1] + + +# Insert setup and teardown's std in the summary section +def pytest_html_results_summary(prefix, summary, postfix): + if setup_stdout_info or teardown_stdout_info: + rows = [] + + # Insert setup stdout + if setup_stdout_info: + for nodeid, stdout in setup_stdout_info.items(): + html_content = ''' + + Setup: + + Show Setup + + + + '''.format(stdout.strip()) + + # 如果需要在 Python 脚本中生成 HTML,并使用 JavaScript 控制折叠内容的显示,可以这样做: + + html_script = ''' + + ''' + + # 输出完整的 HTML 代码 + final_html = html_content + html_script + rows.append(final_html) + rows.append("
") + # Insert teardown stdout + if teardown_stdout_info: + for nodeid, stdout in teardown_stdout_info.items(): + html_content = ''' + + Teardown: + + Show Teardown + + + + '''.format(stdout.strip()) + + # 如果需要在 Python 脚本中生成 HTML,并使用 JavaScript 控制折叠内容的显示,可以这样做: + + html_script = ''' + + ''' + + # 输出完整的 HTML 代码 + final_html = html_content + html_script + rows.append(final_html) + + prefix.extend(rows) diff --git a/packaging/smokeTest/debRpmAutoInstall.sh b/packaging/smokeTest/debRpmAutoInstall.sh new file mode 100755 index 00000000000..8fadffe4c68 --- /dev/null +++ b/packaging/smokeTest/debRpmAutoInstall.sh @@ -0,0 +1,15 @@ +#!/usr/bin/expect +set packageName [lindex $argv 0] +set packageSuffix [lindex $argv 1] +set timeout 30 +if { ${packageSuffix} == "deb" } { + spawn dpkg -i ${packageName} +} elseif { ${packageSuffix} == "rpm"} { + spawn rpm -ivh ${packageName} +} +expect "*one:" +send "\r" +expect "*skip:" +send "\r" + +expect eof diff --git a/packaging/smokeTest/getAndRunInstaller.bat b/packaging/smokeTest/getAndRunInstaller.bat new file mode 100644 index 00000000000..08b04a02719 --- /dev/null +++ b/packaging/smokeTest/getAndRunInstaller.bat @@ -0,0 +1,57 @@ +set baseVersion=%1% +set version=%2% +set verMode=%3% +set sType=%4% +echo %fileType% +rem stop services +if EXIST C:\TDengine ( + if EXIST C:\TDengine\stop-all.bat ( + call C:\TDengine\stop-all.bat /silent + echo "***************Stop taos services***************" + ) + if exist C:\TDengine\unins000.exe ( + call C:\TDengine\unins000.exe /silent + echo "***************uninstall TDengine***************" + ) + rd /S /q C:\TDengine +) +if EXIST C:\ProDB ( + if EXIST C:\ProDB\stop-all.bat ( + call C:\ProDB\stop-all.bat /silent + echo "***************Stop taos services***************" + ) + if exist C:\ProDB\unins000.exe ( + call C:\ProDB\unins000.exe /silent + echo "***************uninstall TDengine***************" + ) + rd /S /q C:\ProDB +) +if "%verMode%"=="enterprise" ( + if "%sType%"=="client" ( + set fileType=enterprise-client + ) else ( + set fileType=enterprise + ) +) else ( + set fileType=%sType% +) + +if "%baseVersion%"=="ProDB" ( + echo %fileType% + set installer=ProDB-%fileType%-%version%-Windows-x64.exe +) else ( + echo %fileType% + set installer=TDengine-%fileType%-%version%-Windows-x64.exe +) + +if "%baseVersion%"=="ProDB" ( + echo %installer% + scp root@192.168.1.213:/nas/OEM/ProDB/v%version%/%installer% C:\workspace +) else ( + echo %installer% + scp root@192.168.1.213:/nas/TDengine/%baseVersion%/v%version%/%verMode%/%installer% C:\workspace +) + +echo "***************Finish installer transfer!***************" +C:\workspace\%installer% /silent +echo "***************Finish install!***************" \ No newline at end of file diff --git a/packaging/smokeTest/getAndRunInstaller.sh b/packaging/smokeTest/getAndRunInstaller.sh new file mode 100755 index 00000000000..7defe6394c6 --- /dev/null +++ b/packaging/smokeTest/getAndRunInstaller.sh @@ -0,0 +1,325 @@ +#!/bin/sh + + +function usage() { + echo "$0" + echo -e "\t -f test file type,server/client/tools/" + echo -e "\t -m pacakage version Type,community/enterprise" + echo -e "\t -l package type,lite or not" + echo -e "\t -c operation type,x64/arm64" + echo -e "\t -v pacakage version,3.0.1.7" + echo -e "\t -o pacakage version,3.0.1.7" + echo -e "\t -s source Path,web/nas" + echo -e "\t -t package Type,tar/rpm/deb" + echo -e "\t -h help" +} + + +#parameter +scriptDir=$(dirname $(readlink -f $0)) +version="3.0.1.7" +originversion="smoking" +testFile="server" +verMode="communtity" +sourcePath="nas" +cpuType="x64" +lite="true" +packageType="tar" +subFile="package.tar.gz" +while getopts "m:c:f:l:s:o:t:v:h" opt; do + case $opt in + m) + verMode=$OPTARG + ;; + v) + version=$OPTARG + ;; + f) + testFile=$OPTARG + ;; + l) + lite=$OPTARG + ;; + s) + sourcePath=$OPTARG + ;; + o) + originversion=$OPTARG + ;; + c) + cpuType=$OPTARG + ;; + t) + packageType=$OPTARG + ;; + h) + usage + exit 0 + ;; + ?) + echo "Invalid option: -$OPTARG" + usage + exit 0 + ;; + esac +done + +systemType=`uname` +if [ ${systemType} == "Darwin" ]; then + platform="macOS" +else + platform="Linux" +fi + +echo "testFile:${testFile},verMode:${verMode},lite:${lite},cpuType:${cpuType},packageType:${packageType},version-${version},originversion:${originversion},sourcePath:${sourcePath}" +# Color setting +RED='\033[41;30m' +GREEN='\033[1;32m' +YELLOW='\033[1;33m' +BLUE='\033[1;34m' +GREEN_DARK='\033[0;32m' +YELLOW_DARK='\033[0;33m' +BLUE_DARK='\033[0;34m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' +if [ "${originversion}" = "ProDB" ]; then + TDengine="ProDB" +else + TDengine="TDengine" +fi +if [[ ${verMode} = "enterprise" ]];then + prePackage="${TDengine}-enterprise" + if [[ ${testFile} = "client" ]];then + prePackage="${TDengine}-enterprise-${testFile}" + fi +elif [ ${verMode} = "community" ];then + prePackage="${TDengine}-${testFile}" +fi +if [ ${lite} = "true" ];then + packageLite="-Lite" +elif [ ${lite} = "false" ];then + packageLite="" +fi +if [[ "$packageType" = "tar" ]] ;then + packageType="tar.gz" +fi + +tdPath="${prePackage}-${version}" + +packageName="${tdPath}-${platform}-${cpuType}${packageLite}.${packageType}" + +if [ "$testFile" == "server" ] ;then + installCmd="install.sh" +elif [ ${testFile} = "client" ];then + installCmd="install_client.sh" +fi + +echo "tdPath:${tdPath},packageName:${packageName}}" +cmdInstall() { +command=$1 +if command -v ${command} ;then + echoColor YD "${command} is already installed" +else + if command -v apt ;then + apt-get install ${command} -y + elif command -v yum ;then + yum -y install ${command} + echoColor YD "you should install ${command} manually" + fi +fi +} + +echoColor() { + color=$1 + command=$2 + if [ ${color} = 'Y' ];then + echo -e "${YELLOW}${command}${NC}" + elif [ ${color} = 'YD' ];then + echo -e "${YELLOW_DARK}${command}${NC}" + elif [ ${color} = 'R' ];then + echo -e "${RED}${command}${NC}" + elif [ ${color} = 'G' ];then + echo -e "${GREEN}${command}${NC}\r\n" + elif [ ${color} = 'B' ];then + echo -e "${BLUE}${command}${NC}" + elif [ ${color} = 'BD' ];then + echo -e "${BLUE_DARK}${command}${NC}" + fi +} + +wgetFile() { + + file=$1 + versionPath=$2 + sourceP=$3 + nasServerIP="192.168.1.213" + if [ "${originversion}" = "ProDB" ]; then + packagePath="/nas/OEM/ProDB/v${versionPath}" + else + packagePath="/nas/TDengine/${originversion}/v${versionPath}/${verMode}" + fi + if [ -f ${file} ];then + echoColor YD "${file} already exists ,it will delete it and download it again " + # rm -rf ${file} + fi + + if [[ ${sourceP} = 'web' ]];then + echoColor BD "====download====:wget https://www.taosdata.com/assets-download/3.0/${file}" + wget https://www.taosdata.com/assets-download/3.0/${file} + elif [[ ${sourceP} = 'nas' ]];then + echoColor BD "====download====:scp root@${nasServerIP}:${packagePath}/${file} ." + scp root@${nasServerIP}:${packagePath}/${file} . + fi +} + +function newPath { + +buildPath=$1 + +if [ ! -d ${buildPath} ] ;then + echoColor BD "mkdir -p ${buildPath}" + mkdir -p ${buildPath} +else + echoColor YD "${buildPath} already exists" +fi + +} + +echoColor G "===== install basesoft =====" +cmdInstall tree +cmdInstall wget +cmdInstall expect + +echoColor G "===== Uninstall all components of TDeingne =====" + +if command -v rmtaos ;then + echoColor YD "uninstall all components of TDeingne:rmtaos" + rmtaos +else + echoColor YD "os doesn't include TDengine" +fi + +if [[ ${packageName} =~ "server" ]] ;then + echoColor BD " pkill -9 taosd " + pkill -9 taosd +fi + +if command -v rmprodb ;then + echoColor YD "uninstall all components of TDeingne:rmprodb" + rmprodb +else + echoColor YD "os doesn't include TDengine" +fi + +if [[ ${packageName} =~ "server" ]] ;then + echoColor BD " pkill -9 prodbd " + pkill -9 prodbd +fi + +echoColor G "===== new workroom path =====" +installPath="/usr/local/src/packageTest" + +if [ ${systemType} == "Darwin" ]; then + installPath="${WORK_DIR}/packageTest" +fi + +newPath ${installPath} + +#if [ -d ${installPath}/${tdPath} ] ;then +# echoColor BD "rm -rf ${installPath}/${tdPath}/*" +# rm -rf ${installPath}/${tdPath}/* +#fi + +echoColor G "===== download installPackage =====" +cd ${installPath} && wgetFile ${packageName} ${version} ${sourcePath} +#cd ${oriInstallPath} && wgetFile ${originPackageName} ${originversion} ${sourcePath} + + +cd ${installPath} +cp -r ${scriptDir}/debRpmAutoInstall.sh . + +packageSuffix=$(echo ${packageName} | awk -F '.' '{print $NF}') + + +if [ ! -f debRpmAutoInstall.sh ];then + echo '#!/usr/bin/expect ' > debRpmAutoInstall.sh + echo 'set packageName [lindex $argv 0]' >> debRpmAutoInstall.sh + echo 'set packageSuffix [lindex $argv 1]' >> debRpmAutoInstall.sh + echo 'set timeout 30 ' >> debRpmAutoInstall.sh + echo 'if { ${packageSuffix} == "deb" } {' >> debRpmAutoInstall.sh + echo ' spawn dpkg -i ${packageName} ' >> debRpmAutoInstall.sh + echo '} elseif { ${packageSuffix} == "rpm"} {' >> debRpmAutoInstall.sh + echo ' spawn rpm -ivh ${packageName}' >> debRpmAutoInstall.sh + echo '}' >> debRpmAutoInstall.sh + echo 'expect "*one:"' >> debRpmAutoInstall.sh + echo 'send "\r"' >> debRpmAutoInstall.sh + echo 'expect "*skip:"' >> debRpmAutoInstall.sh + echo 'send "\r" ' >> debRpmAutoInstall.sh +fi + + +echoColor G "===== install Package =====" + +if [[ ${packageName} =~ "deb" ]];then + cd ${installPath} + dpkg -r taostools + dpkg -r tdengine + if [[ ${packageName} =~ "TDengine" ]];then + echoColor BD "./debRpmAutoInstall.sh ${packageName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packageName} ${packageSuffix} + else + echoColor BD "dpkg -i ${packageName}" && dpkg -i ${packageName} + fi +elif [[ ${packageName} =~ "rpm" ]];then + cd ${installPath} + sudo rpm -e tdengine + sudo rpm -e taostools + if [[ ${packageName} =~ "TDengine" ]];then + echoColor BD "./debRpmAutoInstall.sh ${packageName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packageName} ${packageSuffix} + else + echoColor BD "rpm -ivh ${packageName}" && rpm -ivh ${packageName} + fi +elif [[ ${packageName} =~ "tar" ]];then + echoColor G "===== check installPackage File of tar =====" + + cd ${installPath} + echoColor YD "unzip the new installation package" + echoColor BD "tar -xf ${packageName}" && tar -xf ${packageName} + + cd ${installPath}/${tdPath} && tree -I "driver" > ${installPath}/now_${version}_checkfile + + cd ${installPath} + diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log + diffNumbers=`cat ${installPath}/diffFile.log |wc -l ` + + if [ ${diffNumbers} != 0 ];then + echoColor R "The number and names of files is different from the previous installation package" + diffLog=`cat ${installPath}/diffFile.log` + echoColor Y "${diffLog}" + exit -1 + else + echoColor G "The number and names of files are the same as previous installation packages" + rm -rf ${installPath}/diffFile.log + fi + echoColor YD "===== install Package of tar =====" + cd ${installPath}/${tdPath} + if [ ${testFile} = "server" ];then + echoColor BD "bash ${installCmd} -e no " + bash ${installCmd} -e no + else + echoColor BD "bash ${installCmd} " + bash ${installCmd} + fi +elif [[ ${packageName} =~ "pkg" ]];then + cd ${installPath} + sudo installer -pkg ${packageName} -target / + echoColor YD "===== install Package successfully! =====" +fi + +#cd ${installPath} +# +#rm -rf ${installPath}/${packageName} +#if [ ${platform} == "Linux" ]; then +# rm -rf ${installPath}/${tdPath}/ +#fi +echoColor YD "===== end of shell file =====" + diff --git a/packaging/smokeTest/lib.py b/packaging/smokeTest/lib.py new file mode 100644 index 00000000000..86c30bf8b12 --- /dev/null +++ b/packaging/smokeTest/lib.py @@ -0,0 +1,12 @@ + +import subprocess + + +def run_cmd(command): + print("CMD:", command) + result = subprocess.run(command, capture_output=True, text=True, shell=True) + print("STDOUT:", result.stdout) + print("STDERR:", result.stderr) + print("Return Code:", result.returncode) + #assert result.returncode == 0 + return result diff --git a/packaging/smokeTest/main.py b/packaging/smokeTest/main.py new file mode 100644 index 00000000000..cb7356f80e8 --- /dev/null +++ b/packaging/smokeTest/main.py @@ -0,0 +1,21 @@ +import pytest + +# python3 -m pytest test_server.py -v --html=/var/www/html/report.html --json-report --json-report-file="/var/www/html/report.json" --timeout=60 + +# pytest.main(["-s", "-v"]) +import pytest + +import subprocess + + +# define cmd function + + + + +def main(): + pytest.main(['--html=report.html']) + + +if __name__ == '__main__': + main() diff --git a/packaging/smokeTest/pytest_require.txt b/packaging/smokeTest/pytest_require.txt new file mode 100644 index 00000000000..34019c6e8a7 --- /dev/null +++ b/packaging/smokeTest/pytest_require.txt @@ -0,0 +1,17 @@ +pytest-html +pytest-json-report +pytest-timeout +taospy +numpy +fabric2 +psutil +pandas +toml +distro +requests +pexpect +faker +pyopenssl +taos-ws-py +taospy +tzlocal \ No newline at end of file diff --git a/packaging/smokeTest/runCases.bat b/packaging/smokeTest/runCases.bat new file mode 100644 index 00000000000..922766785ca --- /dev/null +++ b/packaging/smokeTest/runCases.bat @@ -0,0 +1,11 @@ +rm -rf %WIN_TDENGINE_ROOT_DIR%\debug +mkdir %WIN_TDENGINE_ROOT_DIR%\debug +mkdir %WIN_TDENGINE_ROOT_DIR%\debug\build +mkdir %WIN_TDENGINE_ROOT_DIR%\debug\build\bin +xcopy C:\TDengine\taos*.exe %WIN_TDENGINE_ROOT_DIR%\debug\build\bin + +set case_out_file=%cd%\case.out + +cd %WIN_TDENGINE_ROOT_DIR%\tests\system-test +python3 .\test.py -f 0-others\taosShell.py +python3 .\test.py -f 6-cluster\5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py -N 6 -M 3 \ No newline at end of file diff --git a/packaging/smokeTest/runCases.sh b/packaging/smokeTest/runCases.sh new file mode 100644 index 00000000000..4de7a7658bc --- /dev/null +++ b/packaging/smokeTest/runCases.sh @@ -0,0 +1,29 @@ +#!/bin/bash +ulimit -c unlimited + +rm -rf ${TDENGINE_ROOT_DIR}/debug +mkdir ${TDENGINE_ROOT_DIR}/debug +mkdir ${TDENGINE_ROOT_DIR}/debug/build +mkdir ${TDENGINE_ROOT_DIR}/debug/build/bin + +systemType=`uname` +if [ ${systemType} == "Darwin" ]; then + cp /usr/local/bin/taos* ${TDENGINE_ROOT_DIR}/debug/build/bin/ +else + cp /usr/bin/taos* ${TDENGINE_ROOT_DIR}/debug/build/bin/ +fi + +case_out_file=`pwd`/case.out +python3 -m pip install -r ${TDENGINE_ROOT_DIR}/tests/requirements.txt >> $case_out_file +python3 -m pip install taos-ws-py taospy >> $case_out_file + +cd ${TDENGINE_ROOT_DIR}/tests/army +python3 ./test.py -f query/query_basic.py -N 3 >> $case_out_file + +cd ${TDENGINE_ROOT_DIR}/tests/system-test +python3 ./test.py -f 1-insert/insert_column_value.py >> $case_out_file +python3 ./test.py -f 2-query/primary_ts_base_5.py >> $case_out_file +python3 ./test.py -f 2-query/case_when.py >> $case_out_file +python3 ./test.py -f 2-query/partition_limit_interval.py >> $case_out_file +python3 ./test.py -f 2-query/join.py >> $case_out_file +python3 ./test.py -f 2-query/fill.py >> $case_out_file diff --git a/packaging/smokeTest/smokeTestClient.py b/packaging/smokeTest/smokeTestClient.py new file mode 100644 index 00000000000..eee9667300f --- /dev/null +++ b/packaging/smokeTest/smokeTestClient.py @@ -0,0 +1,251 @@ +#!/usr/bin/python +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### +# install pip +# pip install src/connector/python/ + +# -*- coding: utf-8 -*- +import sys, os +import re +import platform +import getopt +import subprocess +# from this import d +import time + +# input for server + +opts, args = getopt.gnu_getopt(sys.argv[1:], 'h:P:v:u', [ + 'host=', 'Port=', 'version=']) +serverHost = "" +serverPort = 0 +version = "" +uninstall = False +for key, value in opts: + if key in ['--help']: + print('A collection of test cases written using Python') + print('-h serverHost') + print('-P serverPort') + print('-v test client version') + print('-u test uninstall process, will uninstall TDengine') + sys.exit(0) + + if key in ['-h']: + serverHost = value + if key in ['-P']: + serverPort = int(value) + if key in ['-v']: + version = value + if key in ['-u']: + uninstall = True +if not serverHost: + print("Please input use -h to specify your server host.") + sys.exit(0) +if not version: + print("No version specified, will not run version check.") +if serverPort == 0: + serverPort = 6030 + print("No server port specified, use default 6030.") + + +system = platform.system() + +arch = platform.machine() + +databaseName = re.sub(r'[^a-zA-Z0-9]', '', subprocess.getoutput("hostname")).lower() +# install taospy +taospy_version = "" +if system == 'Windows': + taospy_version = subprocess.getoutput("pip3 show taospy|findstr Version") +else: + taospy_version = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ") + +print("taospy version %s " % taospy_version) +if taospy_version == "": + subprocess.getoutput("pip3 install git+https://github.com/taosdata/taos-connector-python.git") + print("install taos python connector") +else: + subprocess.getoutput("pip3 install taospy") + +# prepare data by taosBenchmark +cmd = "taosBenchmark -y -a 3 -n 100 -t 100 -d %s -h %s -P %d &" % (databaseName, serverHost, serverPort) +process_out = subprocess.getoutput(cmd) +print(cmd) +#os.system("taosBenchmark -y -a 3 -n 100 -t 100 -d %s -h %s -P %d" % (databaseName, serverHost, serverPort)) +taosBenchmark_test_result = True +time.sleep(10) +import taos + +conn = taos.connect(host=serverHost, + user="root", + password="taosdata", + database=databaseName, + port=serverPort, + timezone="Asia/Shanghai") # default your host's timezone + +server_version = conn.server_info +print("server_version", server_version) +client_version = conn.client_info +print("client_version", client_version) # 3.0.0.0 + +# Execute a sql and get its result set. It's useful for SELECT statement +result: taos.TaosResult = conn.query("SELECT count(*) from meters") + +data = result.fetch_all() +print(data) +if data[0][0] !=10000: + print(" taosBenchmark work not as expected ") + print("!!!!!!!!!!!Test Result: taosBenchmark test failed! !!!!!!!!!!") + sys.exit(1) +#else: +# print("**********Test Result: taosBenchmark test passed **********") + + +# drop database of test +taos_test_result = False +print("drop database test") +print("run taos -s 'drop database %s;' -h %s -P %d" % (databaseName, serverHost, serverPort)) +taos_cmd_outpur = subprocess.getoutput('taos -s "drop database %s;" -h %s -P %d' % (databaseName, serverHost, serverPort)) +print(taos_cmd_outpur) +if ("Drop OK" in taos_cmd_outpur): + taos_test_result = True + #print("*******Test Result: taos test passed ************") + +version_test_result = False +if version: + print("Client info is: %s"%conn.client_info) + taos_V_output = "" + if system == "Windows": + taos_V_output = subprocess.getoutput("taos -V | findstr version") + else: + taos_V_output = subprocess.getoutput("taos -V | grep version") + + print("taos -V output is: %s" % taos_V_output) + if version in taos_V_output and version in conn.client_info: + version_test_result = True + #print("*******Test Result: Version check passed ************") + +conn.close() +if uninstall: + print("Start to run rmtaos") + leftFile = False + print("Platform: ", system) + + if system == "Linux": + # 创建一个subprocess.Popen对象,并使用stdin和stdout进行交互 + process = subprocess.Popen(['rmtaos'], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True) + # 向子进程发送输入 + process.stdin.write("y\n") + process.stdin.flush() # 确保输入被发送到子进程 + process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n") + process.stdin.flush() # 确保输入被发送到子进程 + # 关闭子进程的stdin,防止它无限期等待更多输入 + process.stdin.close() + # 等待子进程结束 + process.wait() + # 检查目录清除情况 + out = subprocess.getoutput("ls /etc/systemd/system/taos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/bin/taos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/local/bin/taos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/lib/libtaos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/lib64/libtaos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/include/taos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/local/taos") + #print(out) + if "No such file or directory" not in out: + print("Uninstall left some files in /usr/local/taos:%s" % out) + leftFile = True + if not leftFile: + print("*******Test Result: uninstall test passed ************") + + elif system == "Darwin": + # 创建一个subprocess.Popen对象,并使用stdin和stdout进行交互 + process = subprocess.Popen(['sudo', 'rmtaos'], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True) + # 向子进程发送输入 + process.stdin.write("y\n") + process.stdin.flush() # 确保输入被发送到子进程 + process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n") + process.stdin.flush() # 确保输入被发送到子进程 + # 关闭子进程的stdin,防止它无限期等待更多输入 + process.stdin.close() + # 等待子进程结束 + process.wait() + # 检查目录清除情况 + out = subprocess.getoutput("ls /usr/local/bin/taos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/local/lib/libtaos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/local/include/taos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + #out = subprocess.getoutput("ls /usr/local/Cellar/tdengine/") + #print(out) + #if out: + # print("Uninstall left some files: /usr/local/Cellar/tdengine/%s" % out) + # leftFile = True + #if not leftFile: + # print("*******Test Result: uninstall test passed ************") + + elif system == "Windows": + process = subprocess.Popen(['unins000','/silent'], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True) + process.wait() + time.sleep(10) + out = subprocess.getoutput("ls C:\TDengine") + print(out) + if len(out.split("\n")) > 3: + leftFile = True + print("Uninstall left some files: %s" % out) + +if taosBenchmark_test_result: + print("**********Test Result: taosBenchmark test passed! **********") +if taos_test_result: + print("**********Test Result: taos test passed! **********") +else: + print("!!!!!!!!!!!Test Result: taos test failed! !!!!!!!!!!") +if version_test_result: + print("**********Test Result: version test passed! **********") +else: + print("!!!!!!!!!!!Test Result: version test failed! !!!!!!!!!!") +if not leftFile: + print("**********Test Result: uninstall test passed! **********") +else: + print("!!!!!!!!!!!Test Result: uninstall test failed! !!!!!!!!!!") +if taosBenchmark_test_result and taos_test_result and version_test_result and not leftFile: + sys.exit(0) +else: + sys.exit(1) + diff --git a/packaging/smokeTest/smokeTestJenkinsFile b/packaging/smokeTest/smokeTestJenkinsFile new file mode 100644 index 00000000000..464393d85d7 --- /dev/null +++ b/packaging/smokeTest/smokeTestJenkinsFile @@ -0,0 +1,380 @@ +def sync_source(branch_name) { + sh ''' + hostname + ip addr|grep 192|awk '{print $2}'|sed "s/\\/.*//" + echo ''' + branch_name + ''' + ''' + sh ''' + cd ${TDENGINE_ROOT_DIR} + set +e + git reset --hard + git fetch || git fetch + git checkout -f '''+branch_name+''' + git reset --hard origin/'''+branch_name+''' + git log | head -n 20 + git clean -fxd + set -e + ''' + return 1 +} +def sync_source_win() { + bat ''' + hostname + taskkill /f /t /im taosd.exe + ipconfig + set + date /t + time /t + ''' + bat ''' + echo %branch_name% + cd %WIN_TDENGINE_ROOT_DIR% + git reset --hard + git fetch || git fetch + git checkout -f ''' + env.BRANCH_NAME + ''' + git reset --hard origin/''' + env.BRANCH_NAME + ''' + git branch + git restore . + git remote prune origin + git pull || git pull + git log | head -n 20 + git clean -fxd + ''' + return 1 +} +pipeline { + agent none + parameters { + choice( + name: 'sourcePath', + choices: ['nas','web'], + description: 'Choice which way to download the installation pacakge;web is Office Web and nas means taos nas server ' + ) + choice( + name: 'verMode', + choices: ['enterprise','community'], + description: 'Choice which types of package you want do check ' + ) + string ( + name:'version', + defaultValue:'3.3.2.0', + description: 'Release version number,eg: 3.0.0.1' + ) + string ( + name:'baseVersion', + defaultValue:'smoking', + description: 'Tnas root path. eg:smoking, 3.3' + ) + choice ( + name:'mode', + choices: ['server','client'], + description: 'Choose which mode of package you want do run ' + ) + choice ( + name:'smoke_branch', + choices: ['test/3.0/smokeTest','test/main/smokeTest','test/3.1/smokeTest'], + description: 'Choose which mode of package you want do run ' + ) + string ( + name:'runPlatforms', + defaultValue:'server_Linux_x64, server_Linux_arm64, server_Windows_x64, server_Mac_x64', + description: 'run package list hotfix usually run: server: server_Linux_x64, server_Linux_arm64 client: client_Linux_x64, client_Linux_arm64 release usually run: enterprise server: server_Linux_x64, server_Linux_arm64, server_Windows_x64 enterprise client: client_Linux_x64, client_Linux_arm64, client_Windows_x64 community server: server_Linux_x64, server_Linux_arm64, server_Mac_x64, server_Mac_arm64(not supported), server_Linux_x64_lite(not supported) community client: client_Linux_x64, client_Linux_arm64, client_Windows_x64, client_Mac_x64, client_Mac_arm64(not supported), client_Linux_x64_lite(not supported)' + ) + } + environment{ + WORK_DIR = "/var/lib/jenkins/workspace" + TDINTERNAL_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal' + TDENGINE_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal/community' + BRANCH_NAME = "${smoke_branch}" + } + stages { + stage ('Start Server for Client Test') { + when { + beforeAgent true + expression { mode == 'client' } + } + agent{label " ubuntu18 "} + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + withEnv(['JENKINS_NODE_COOKIE=dontkillme']) { + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest + bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + bash start3NodesServer.sh + ''' + } + } + } + } + stage ('Run SmokeTest') { + parallel { + stage('server_Linux_x64') { + when { + beforeAgent true + allOf { + expression { mode == 'server' } + expression { runPlatforms.contains('server_Linux_x64') } + } + } + agent{label " ubuntu16 "} + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + mkdir -p /var/www/html/${baseVersion}/${version}/${verMode}/json + cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest + bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 -m pytest test_server.py -v --html=/var/www/html/${baseVersion}/${version}/${verMode}/${mode}_linux_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true + cp report.json /var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_linux_x64_report.json + curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=server&build=linux_x64" + ''' + } + } + } + stage('server_Linux_arm64') { + when { + beforeAgent true + allOf { + expression { mode == 'server' } + expression { runPlatforms.contains('server_Linux_arm64') } + } + } + agent{label "worker06_arm64"} + steps { + timeout(time: 60, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest + bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 -m pytest test_server.py -v --html=${mode}_linux_arm64_report.html --json-report --json-report-file=report.json --timeout=600 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true + scp ${mode}_linux_arm64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/ + scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_linux_arm64_report.json + curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=server&build=linux_arm64" + ''' + } + } + } + stage ('server_Mac_x64') { + when { + beforeAgent true + allOf { + expression { mode == 'server' } + expression { runPlatforms.contains('server_Mac_x64') } + } + } + agent{label " release_Darwin_x64 "} + environment{ + WORK_DIR = "/Users/zwen/jenkins/workspace" + TDINTERNAL_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal' + TDENGINE_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal/community' + } + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest + bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t pkg + python3 -m pytest -v -k linux --html=${mode}_Mac_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true + scp ${mode}_Mac_x64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/ + scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_Mac_x64_report.json + curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=server&build=Mac_x64" + ''' + } + } + } + stage ('server_Mac_arm64') { + when { + beforeAgent true + allOf { + expression { mode == 'server' } + expression { runPlatforms.contains('server_Mac_arm64') } + } + } + agent{label " release_Darwin_arm64 "} + environment{ + WORK_DIR = "/Users/zwen/jenkins/workspace" + TDINTERNAL_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal' + TDENGINE_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal/community' + } + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest + bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t pkg + python3 -m pytest -v -k linux --html=${mode}_Mac_arm64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true + scp ${mode}_Mac_arm64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/ + scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_Mac_arm64_report.json + curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=server&build=Mac_arm64" + ''' + } + } + } + stage('server_Windows_x64') { + when { + beforeAgent true + allOf { + expression { mode == 'server' } + expression { runPlatforms.contains('server_Windows_x64') } + } + } + agent{label " windows11 "} + environment{ + WIN_WORK_DIR="C:\\workspace" + WIN_TDINTERNAL_ROOT_DIR="C:\\workspace\\TDinternal" + WIN_TDENGINE_ROOT_DIR="C:\\workspace\\TDinternal\\community" + } + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source_win() + bat ''' + cd %WIN_TDENGINE_ROOT_DIR%\\packaging\\smokeTest + call getAndRunInstaller.bat %baseVersion% %version% %verMode% server + cd %WIN_TDENGINE_ROOT_DIR%\\packaging\\smokeTest + pip3 install -r pytest_require.txt + python3 -m pytest test_server.py -v --html=%mode%_Windows_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=%verMode% --tVersion=%version% --baseVersion=%baseVersion% --sourcePath=%sourcePath% + scp %mode%_Windows_x64_report.html root@192.168.0.21:/var/www/html/%baseVersion%/%version%/%verMode%/ + scp report.json root@192.168.0.21:/var/www/html/%baseVersion%/%version%/%verMode%/json/%mode%_Windows_x64_report.json + curl "http://192.168.0.176/api/addSmoke?version=%version%&tag=%baseVersion%&type=%verMode%&role=server&build=Windows_x64" + ''' + } + } + } + stage('client_Linux_x64') { + when { + beforeAgent true + allOf { + expression { mode == 'client' } + expression { runPlatforms.contains('client_Linux_x64') } + } + } + agent{label " ubuntu16 "} + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + mkdir -p /var/www/html/${baseVersion}/${version}/${verMode}/json + cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest + bash getAndRunInstaller.sh -m ${verMode} -f client -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 -m pytest test_client.py -v --html=/var/www/html/${baseVersion}/${version}/${verMode}/${mode}_linux_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true + cp report.json /var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_linux_x64_report.json + curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=client&build=linux_x64" + ''' + } + } + } + stage('client_Linux_arm64') { + when { + beforeAgent true + allOf { + expression { mode == 'client' } + expression { runPlatforms.contains('client_Linux_arm64') } + } + } + agent{label " worker06_arm64 "} + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest + bash getAndRunInstaller.sh -m ${verMode} -f client -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + python3 -m pytest test_client.py -v --html=${mode}_linux_arm64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true + scp ${mode}_linux_arm64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/ + scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_linux_arm64_report.json + curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=client&build=linux_arm64" + ''' + } + } + } + stage ('client_Mac_x64') { + when { + beforeAgent true + allOf { + expression { mode == 'client' } + expression { runPlatforms.contains('client_Mac_x64') } + } + } + agent{label " release_Darwin_x64 "} + environment{ + WORK_DIR = "/Users/zwen/jenkins/workspace" + TDINTERNAL_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal' + TDENGINE_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal/community' + } + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest + bash getAndRunInstaller.sh -m ${verMode} -f client -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t pkg + rm -rf /opt/taos/main/TDinternal/debug/* || true + python3 -m pytest test_client.py -v --html=${mode}_Mac_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true + scp ${mode}_Mac_x64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/ + scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_Mac_x64_report.json + curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=client&build=Mac_x64" + ''' + } + } + } + stage ('client_Mac_arm64') { + when { + beforeAgent true + allOf { + expression { mode == 'client' } + expression { runPlatforms.contains('client_Mac_arm64') } + } + } + agent{label " release_Darwin_arm64 "} + environment{ + WORK_DIR = "/Users/zwen/jenkins/workspace" + TDINTERNAL_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal' + TDENGINE_ROOT_DIR = '/Users/zwen/jenkins/workspace/TDinternal/community' + } + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source("${BRANCH_NAME}") + sh ''' + cd ${TDENGINE_ROOT_DIR}/packaging/smokeTest + bash getAndRunInstaller.sh -m ${verMode} -f client -l false -c arm64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t pkg + rm -rf /opt/taos/main/TDinternal/debug/* || true + python3 -m pytest test_client.py -v --html=${mode}_Mac_arm64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=${verMode} --tVersion=${version} --baseVersion=${baseVersion} --sourcePath=${sourcePath} || true + scp ${mode}_Mac_arm64_report.html root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/ + scp report.json root@192.168.0.21:/var/www/html/${baseVersion}/${version}/${verMode}/json/${mode}_Mac_arm64_report.json + curl "http://192.168.0.176/api/addSmoke?version=${version}&tag=${baseVersion}&type=${verMode}&role=client&build=Mac_arm64" + ''' + } + } + } + stage('client_Windows_x64') { + when { + beforeAgent true + allOf { + expression { mode == 'client' } + expression { runPlatforms.contains('client_Windows_x64') } + } + } + agent{label " windows71 "} + environment{ + WIN_WORK_DIR="C:\\workspace" + WIN_TDINTERNAL_ROOT_DIR="C:\\workspace\\TDinternal" + WIN_TDENGINE_ROOT_DIR="C:\\workspace\\TDinternal\\community" + } + steps { + timeout(time: 30, unit: 'MINUTES'){ + sync_source_win() + bat ''' + cd %WIN_TDENGINE_ROOT_DIR%\\packaging\\smokeTest + call getAndRunInstaller.bat %baseVersion% %version% %verMode% client + pip3 install -r pytest_require.txt + python3 -m pytest test_client.py -v --html=%mode%_Windows_x64_report.html --json-report --json-report-file=report.json --timeout=300 --verMode=%verMode% --tVersion=%version% --baseVersion=%baseVersion% --sourcePath=%sourcePath% + scp %mode%_Windows_x64_report.html root@192.168.0.21:/var/www/html/%baseVersion%/%version%/%verMode%/ + scp report.json root@192.168.0.21:/var/www/html/%baseVersion%/%version%/%verMode%/json/%mode%_Windows_x64_report.json + curl "http://192.168.0.176/api/addSmoke?version=%version%&tag=%baseVersion%&type=%verMode%&role=client&build=Windows_x64" + ''' + } + } + } + } + } + } +} \ No newline at end of file diff --git a/packaging/smokeTest/start3NodesServer.sh b/packaging/smokeTest/start3NodesServer.sh new file mode 100644 index 00000000000..b446a467efc --- /dev/null +++ b/packaging/smokeTest/start3NodesServer.sh @@ -0,0 +1,67 @@ +#!/bin/bash +BUILD_ID=dontKillMe + +#******This script setup 3 nodes env for remote client installer test. Only for Linux ********* + +pwd=`pwd` +hostname=`hostname` +if [ -z $JENKINS_HOME ]; then + workdir="${pwd}/cluster" + echo $workdir +else + workdir="${JENKINS_HOME}/workspace/cluster" + echo $workdir +fi + +name="taos" +if command -v prodb ;then + name="prodb" +fi + +# Stop all taosd processes +for(( i=0; i<3; i++)) +do + pid=$(ps -ef | grep ${name}d | grep -v grep | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo}kill -9 $pid || : + fi +done + +# Init 3 dnodes workdir and config file +rm -rf ${workdir} +mkdir ${workdir} +mkdir ${workdir}/output +mkdir ${workdir}/dnode1 +mkdir ${workdir}/dnode1/data +mkdir ${workdir}/dnode1/log +mkdir ${workdir}/dnode1/cfg +touch ${workdir}/dnode1/cfg/${name}.cfg +echo -e "firstEp ${hostname}:6031\nsecondEp ${hostname}:6032\nfqdn ${hostname}\nserverPort 6031\nlogDir ${workdir}/dnode1/log\ndataDir ${workdir}/dnode1/data\n" >> ${workdir}/dnode1/cfg/${name}.cfg + +# Start first node +nohup ${name}d -c ${workdir}/dnode1/cfg/${name}.cfg & > /dev/null +sleep 5 + +${name} -P 6031 -s "CREATE DNODE \`${hostname}:6032\`;CREATE DNODE \`${hostname}:6033\`" + +mkdir ${workdir}/dnode2 +mkdir ${workdir}/dnode2/data +mkdir ${workdir}/dnode2/log +mkdir ${workdir}/dnode2/cfg +touch ${workdir}/dnode2/cfg/${name}.cfg +echo -e "firstEp ${hostname}:6031\nsecondEp ${hostname}:6032\nfqdn ${hostname}\nserverPort 6032\nlogDir ${workdir}/dnode2/log\ndataDir ${workdir}/dnode2/data\n" >> ${workdir}/dnode2/cfg/${name}.cfg + +nohup ${name}d -c ${workdir}/dnode2/cfg/${name}.cfg & > /dev/null +sleep 5 + +mkdir ${workdir}/dnode3 +mkdir ${workdir}/dnode3/data +mkdir ${workdir}/dnode3/log +mkdir ${workdir}/dnode3/cfg +touch ${workdir}/dnode3/cfg/${name}.cfg +echo -e "firstEp ${hostname}:6031\nsecondEp ${hostname}:6032\nfqdn ${hostname}\nserverPort 6033\nlogDir ${workdir}/dnode3/log\ndataDir ${workdir}/dnode3/data\n" >> ${workdir}/dnode3/cfg/${name}.cfg + +nohup ${name}d -c ${workdir}/dnode3/cfg/${name}.cfg & > /dev/null +sleep 5 + +${name} -P 6031 -s "CREATE MNODE ON DNODE 2;CREATE MNODE ON DNODE 3;" \ No newline at end of file diff --git a/packaging/smokeTest/test_client.py b/packaging/smokeTest/test_client.py new file mode 100644 index 00000000000..0b1003e3702 --- /dev/null +++ b/packaging/smokeTest/test_client.py @@ -0,0 +1,137 @@ +import pytest +import subprocess +import os +import sys +import platform +import getopt +import re +import time +import taos +from versionCheckAndUninstallforPytest import UninstallTaos + +# python3 smokeTestClient.py -h 192.168.0.22 -P 6031 -v ${version} -u + +OEM = ["ProDB"] + + +@pytest.fixture(scope="module") +def get_config(request): + verMode = request.config.getoption("--verMode") + taosVersion = request.config.getoption("--tVersion") + baseVersion = request.config.getoption("--baseVersion") + sourcePath = request.config.getoption("--sourcePath") + config = { + "verMode": verMode, + "taosVersion": taosVersion, + "baseVersion": baseVersion, + "sourcePath": sourcePath, + "system": platform.system(), + "arch": platform.machine(), + "serverHost": "192.168.0.22", + "serverPort": 6031, + "databaseName": re.sub(r'[^a-zA-Z0-9]', '', subprocess.getoutput("hostname")).lower() + } + return config + + +@pytest.fixture(scope="module") +def setup_module(get_config): + config = get_config + # install taospy + if config["system"] == 'Windows': + taospy_version = subprocess.getoutput("pip3 show taospy|findstr Version") + else: + taospy_version = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ") + + print("taospy version %s " % taospy_version) + if taospy_version == "": + subprocess.getoutput("pip3 install git+https://github.com/taosdata/taos-connector-python.git") + print("install taos python connector") + else: + subprocess.getoutput("pip3 install taospy") + + +def get_connect(host, port, database=None): + conn = taos.connect(host=host, + user="root", + password="taosdata", + database=database, + port=port, + timezone="Asia/Shanghai") # default your host's timezone + return conn + + +def run_cmd(command): + print("CMD: %s" % command) + result = subprocess.run(command, capture_output=True, text=True, shell=True) + print("STDOUT:", result.stdout) + print("STDERR:", result.stderr) + print("Return Code:", result.returncode) + assert result.returncode == 0 + return result + + +class TestClient: + @pytest.mark.all + def test_basic(self, get_config, setup_module): + config = get_config + name = "taos" + + if config["baseVersion"] in OEM: + name = config["baseVersion"].lower() + if config["baseVersion"] in OEM and config["system"] == 'Windows': + cmd = f'{name} -s "create database {config["databaseName"]};" -h {config["serverHost"]} -P {config["serverPort"]}' + run_cmd(cmd) + cmd = f'{name} -s "CREATE STABLE {config["databaseName"]}.meters (`ts` TIMESTAMP,`current` FLOAT, `phase` FLOAT) TAGS (`groupid` INT, `location` VARCHAR(24));" -h {config["serverHost"]} -P {config["serverPort"]}' + run_cmd(cmd) + else: + cmd = f'{name}Benchmark -y -a 3 -n 100 -t 100 -d {config["databaseName"]} -h {config["serverHost"]} -P {config["serverPort"]} &' + run_cmd(cmd) + # os.system("taosBenchmark -y -a 3 -n 100 -t 100 -d %s -h %s -P %d" % (databaseName, serverHost, serverPort)) + time.sleep(5) + conn = get_connect(config["serverHost"], config["serverPort"], config["databaseName"]) + sql = "SELECT count(*) from meters" + result: taos.TaosResult = conn.query(sql) + data = result.fetch_all() + print("SQL: %s" % sql) + print("Result: %s" % data) + if config["system"] == 'Windows' and config["baseVersion"] in OEM: + pass + elif data[0][0] != 10000: + raise f"{name}Benchmark work not as expected " + # drop database of test + cmd = f'{name} -s "drop database {config["databaseName"]};" -h {config["serverHost"]} -P {config["serverPort"]}' + result = run_cmd(cmd) + assert "Drop OK" in result.stdout + conn.close() + + @pytest.mark.all + def test_version(self, get_config, setup_module): + config = get_config + conn = get_connect(config["serverHost"], config["serverPort"]) + server_version = conn.server_info + print("server_version: ", server_version) + client_version = conn.client_info + print("client_version: ", client_version) + name = "taos" + if config["baseVersion"] in OEM: + name = config["baseVersion"].lower() + if config["system"] == "Windows": + taos_V_output = subprocess.getoutput(f"{name} -V | findstr version") + else: + taos_V_output = subprocess.getoutput(f"{name} -V | grep version") + assert config["taosVersion"] in taos_V_output + assert config["taosVersion"] in client_version + if config["taosVersion"] not in server_version: + print("warning: client version is not same as server version") + conn.close() + + @pytest.mark.all + def test_uninstall(self, get_config, setup_module): + config = get_config + name = "taos" + if config["baseVersion"] in OEM: + name = config["baseVersion"].lower() + subprocess.getoutput("rm /usr/local/bin/taos") + subprocess.getoutput("pkill taosd") + UninstallTaos(config["taosVersion"], config["verMode"], True, name) diff --git a/packaging/smokeTest/test_server.py b/packaging/smokeTest/test_server.py new file mode 100644 index 00000000000..36d86357a38 --- /dev/null +++ b/packaging/smokeTest/test_server.py @@ -0,0 +1,238 @@ +import pytest +import subprocess +import os +from versionCheckAndUninstallforPytest import UninstallTaos +import platform +import re +import time +import signal + +system = platform.system() +current_path = os.path.abspath(os.path.dirname(__file__)) +if system == 'Windows': + with open(r"%s\test_server_windows_case" % current_path) as f: + cases = f.read().splitlines() +else: + with open("%s/test_server_unix_case" % current_path) as f: + cases = f.read().splitlines() + +OEM = ["ProDB"] + + +@pytest.fixture(scope="module") +def get_config(request): + verMode = request.config.getoption("--verMode") + taosVersion = request.config.getoption("--tVersion") + baseVersion = request.config.getoption("--baseVersion") + sourcePath = request.config.getoption("--sourcePath") + config = { + "verMode": verMode, + "taosVersion": taosVersion, + "baseVersion": baseVersion, + "sourcePath": sourcePath, + "system": platform.system(), + "arch": platform.machine() + } + return config + + +@pytest.fixture(scope="module") +def setup_module(get_config): + def run_cmd(command): + print("CMD:", command) + result = subprocess.run(command, capture_output=True, text=True, shell=True) + print("STDOUT:", result.stdout) + print("STDERR:", result.stderr) + print("Return Code:", result.returncode) + assert result.returncode == 0 + return result + + # setup before module tests + config = get_config + # bash getAndRunInstaller.sh -m ${verMode} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar + # t = "tar" + # if config["system"] == "Darwin": + # t = "pkg" + # cmd = "bash getAndRunInstaller.sh -m %s -f server -l false -c x64 -v %s -o %s -s %s -t %s" % ( + # config["verMode"], config["taosVersion"], config["baseVersion"], config["sourcePath"], t) + # run_cmd(cmd) + if config["system"] == "Windows": + cmd = r"mkdir ..\..\debug\build\bin" + else: + cmd = "mkdir -p ../../debug/build/bin/" + subprocess.getoutput(cmd) + if config["system"] == "Linux": # add tmq_sim + cmd = "cp -rf ../../../debug/build/bin/tmq_sim ../../debug/build/bin/." + subprocess.getoutput(cmd) + if config["system"] == "Darwin": + cmd = "sudo cp -rf /usr/local/bin/taos* ../../debug/build/bin/" + elif config["system"] == "Windows": + cmd = r"xcopy C:\TDengine\taos*.exe ..\..\debug\build\bin /Y" + else: + if config["baseVersion"] in OEM: + cmd = '''sudo find /usr/bin -name 'prodb*' -exec sh -c 'for file; do cp "$file" "../../debug/build/bin/taos${file##/usr/bin/%s}"; done' sh {} +''' % ( + config["baseVersion"].lower()) + else: + cmd = "sudo cp /usr/bin/taos* ../../debug/build/bin/" + run_cmd(cmd) + if config["baseVersion"] in OEM: # mock OEM + cmd = "sed -i 's/taos.cfg/%s.cfg/g' ../../tests/pytest/util/dnodes.py" % config["baseVersion"].lower() + run_cmd(cmd) + cmd = "sed -i 's/taosdlog.0/%sdlog.0/g' ../../tests/pytest/util/dnodes.py" % config["baseVersion"].lower() + run_cmd(cmd) + cmd = "sed -i 's/taos.cfg/%s.cfg/g' ../../tests/army/frame/server/dnode.py" % config["baseVersion"].lower() + run_cmd(cmd) + cmd = "sed -i 's/taosdlog.0/%sdlog.0/g' ../../tests/army/frame/server/dnode.py" % config["baseVersion"].lower() + run_cmd(cmd) + cmd = "ln -s /usr/bin/prodb /usr/local/bin/taos" + subprocess.getoutput(cmd) + + # yield + # + # name = "taos" + # if config["baseVersion"] in OEM: + # name = config["baseVersion"].lower() + # subprocess.getoutput("rm /usr/local/bin/taos") + # subprocess.getoutput("pkill taosd") + # UninstallTaos(config["taosVersion"], config["verMode"], True, name) + + +# use pytest fixture to exec case +@pytest.fixture(params=cases) +def run_command(request): + commands = request.param + if commands.strip().startswith("#"): + pytest.skip("This case has been marked as skipped") + d, command = commands.strip().split(",") + if system == "Windows": + cmd = r"cd %s\..\..\tests\%s && %s" % (current_path, d, command) + else: + cmd = "cd %s/../../tests/%s&&sudo %s" % (current_path, d, command) + print(cmd) + result = subprocess.run(cmd, capture_output=True, text=True, shell=True) + return { + "command": command, + "stdout": result.stdout, + "stderr": result.stderr, + "returncode": result.returncode + } + + +class TestServer: + @pytest.mark.all + def test_taosd_up(self, setup_module): + # start process + if system == 'Windows': + subprocess.getoutput("taskkill /IM taosd.exe /F") + cmd = "..\\..\\debug\\build\\bin\\taosd.exe" + else: + subprocess.getoutput("pkill taosd") + cmd = "../../debug/build/bin/taosd" + process = subprocess.Popen( + [cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) + # monitor output + while True: + line = process.stdout.readline() + if line: + print(line.strip()) + if "succeed to write dnode" in line: + time.sleep(15) + # 发送终止信号 + os.kill(process.pid, signal.SIGTERM) + break + + @pytest.mark.all + def test_execute_cases(self, setup_module, run_command): + # assert the result + if run_command['returncode'] != 0: + print(f"Running command: {run_command['command']}") + print("STDOUT:", run_command['stdout']) + print("STDERR:", run_command['stderr']) + print("Return Code:", run_command['returncode']) + else: + print(f"Running command: {run_command['command']}") + if len(run_command['stdout']) > 1000: + print("STDOUT:", run_command['stdout'][:1000] + "...") + else: + print("STDOUT:", run_command['stdout']) + print("STDERR:", run_command['stderr']) + print("Return Code:", run_command['returncode']) + + assert run_command[ + 'returncode'] == 0, f"Command '{run_command['command']}' failed with return code {run_command['returncode']}" + + @pytest.mark.all + @pytest.mark.check_version + def test_check_version(self, get_config, setup_module): + config = get_config + databaseName = re.sub(r'[^a-zA-Z0-9]', '', subprocess.getoutput("hostname")).lower() + # install taospy + taospy_version = "" + system = config["system"] + version = config["taosVersion"] + verMode = config["verMode"] + if system == 'Windows': + taospy_version = subprocess.getoutput("pip3 show taospy|findstr Version") + else: + taospy_version = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ") + + print("taospy version %s " % taospy_version) + if taospy_version == "": + subprocess.getoutput("pip3 install git+https://github.com/taosdata/taos-connector-python.git") + print("install taos python connector") + else: + subprocess.getoutput("pip3 install taospy") + + # start taosd server + if system == 'Windows': + cmd = ["C:\\TDengine\\start-all.bat"] + # elif system == 'Linux': + # cmd = "systemctl start taosd".split(' ') + else: + # cmd = "sudo launchctl start com.tdengine.taosd".split(' ') + cmd = "start-all.sh" + process_out = subprocess.Popen(cmd, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True) + print(cmd) + time.sleep(5) + + import taos + conn = taos.connect() + check_list = {} + check_list["server_version"] = conn.server_info + check_list["client_version"] = conn.client_info + # Execute sql get version info + result: taos.TaosResult = conn.query("SELECT server_version()") + check_list["select_server"] = result.fetch_all()[0][0] + result: taos.TaosResult = conn.query("SELECT client_version()") + check_list["select_client"] = result.fetch_all()[0][0] + conn.close() + + binary_files = ["taos", "taosd", "taosadapter", "taoskeeper", "taosBenchmark"] + if verMode.lower() == "enterprise": + binary_files.append("taosx") + if config["baseVersion"] in OEM: + binary_files = [i.replace("taos", config["baseVersion"].lower()) for i in binary_files] + if system == "Windows": + for i in binary_files: + check_list[i] = subprocess.getoutput("%s -V | findstr version" % i) + else: + for i in binary_files: + check_list[i] = subprocess.getoutput("%s -V | grep version | awk -F ' ' '{print $3}'" % i) + for i in check_list: + print("%s version is: %s" % (i, check_list[i])) + assert version in check_list[i] + + @pytest.mark.all + def test_uninstall(self, get_config, setup_module): + config = get_config + name = "taos" + if config["baseVersion"] in OEM: + name = config["baseVersion"].lower() + subprocess.getoutput("rm /usr/local/bin/taos") + subprocess.getoutput("pkill taosd") + UninstallTaos(config["taosVersion"], config["verMode"], True, name) diff --git a/packaging/smokeTest/test_server_unix_case b/packaging/smokeTest/test_server_unix_case new file mode 100644 index 00000000000..1bbde109326 --- /dev/null +++ b/packaging/smokeTest/test_server_unix_case @@ -0,0 +1,10 @@ +system-test,python3 ./test.py -f 2-query/join.py +system-test,python3 ./test.py -f 1-insert/insert_column_value.py +system-test,python3 ./test.py -f 2-query/primary_ts_base_5.py +system-test,python3 ./test.py -f 2-query/case_when.py +system-test,python3 ./test.py -f 2-query/partition_limit_interval.py +system-test,python3 ./test.py -f 2-query/fill.py +army,python3 ./test.py -f query/query_basic.py -N 3 +system-test,python3 ./test.py -f 7-tmq/basic5.py +system-test,python3 ./test.py -f 8-stream/stream_basic.py +system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3 \ No newline at end of file diff --git a/packaging/smokeTest/test_server_windows_case b/packaging/smokeTest/test_server_windows_case new file mode 100644 index 00000000000..e64213b1eef --- /dev/null +++ b/packaging/smokeTest/test_server_windows_case @@ -0,0 +1,2 @@ +system-test,python3 .\test.py -f 0-others\taosShell.py +system-test,python3 .\test.py -f 6-cluster\5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py -N 6 -M 3 \ No newline at end of file diff --git a/packaging/smokeTest/versionCheckAndUninstall.py b/packaging/smokeTest/versionCheckAndUninstall.py new file mode 100644 index 00000000000..80dea9a15fb --- /dev/null +++ b/packaging/smokeTest/versionCheckAndUninstall.py @@ -0,0 +1,260 @@ +#!/usr/bin/python +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### +# install pip +# pip install src/connector/python/ + +# -*- coding: utf-8 -*- +import sys, os +import re +import platform +import getopt +import subprocess +# from this import d +import time + +# input for server + +opts, args = getopt.gnu_getopt(sys.argv[1:], 'v:m:u', ['version=', 'verMode=']) +serverHost = "" +serverPort = 0 +version = "" +uninstall = False +verMode = "" +for key, value in opts: + if key in ['--help']: + print('A collection of test cases written using Python') + print('-v test client version') + print('-u test uninstall process, will uninstall TDengine') + sys.exit(0) + + if key in ['-v']: + version = value + if key in ['-u']: + uninstall = True + if key in ['-m']: + verMode = value +if not version: + print("No version specified, will not run version check.") + + +system = platform.system() +arch = platform.machine() + +databaseName = re.sub(r'[^a-zA-Z0-9]', '', subprocess.getoutput("hostname")).lower() +# install taospy +taospy_version = "" +if system == 'Windows': + taospy_version = subprocess.getoutput("pip3 show taospy|findstr Version") +else: + taospy_version = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ") + +print("taospy version %s " % taospy_version) +if taospy_version == "": + subprocess.getoutput("pip3 install git+https://github.com/taosdata/taos-connector-python.git") + print("install taos python connector") +else: + subprocess.getoutput("pip3 install taospy") + +# start taosd server +if system == 'Windows': + cmd = ["C:\\TDengine\\start-all.bat"] +elif system == 'Linux': + cmd = "systemctl start taosd".split(' ') +else: + cmd = "sudo launchctl start com.tdengine.taosd".split(' ') +process_out = subprocess.Popen(cmd, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True) +print(cmd) +time.sleep(5) + +#get taosc version info +version_test_result = False +if version: + import taos + conn = taos.connect() + server_version = conn.server_info + print("server_version", server_version) + client_version = conn.client_info + print("client_version", client_version) + # Execute sql get version info + result: taos.TaosResult = conn.query("SELECT server_version()") + select_server = result.fetch_all()[0][0] + print("SELECT server_version():" + select_server) + result: taos.TaosResult = conn.query("SELECT client_version()") + select_client = result.fetch_all()[0][0] + print("SELECT client_version():" + select_client) + conn.close() + + taos_V_output = "" + taosd_V_output = "" + taosadapter_V_output = "" + taoskeeper_V_output = "" + taosx_V_output = "" + taosB_V_output = "" + taosxVersion = False + if system == "Windows": + taos_V_output = subprocess.getoutput("taos -V | findstr version") + taosd_V_output = subprocess.getoutput("taosd -V | findstr version") + taosadapter_V_output = subprocess.getoutput("taosadapter -V | findstr version") + taoskeeper_V_output = subprocess.getoutput("taoskeeper -V | findstr version") + taosB_V_output = subprocess.getoutput("taosBenchmark -V | findstr version") + if verMode == "Enterprise": + taosx_V_output = subprocess.getoutput("taosx -V | findstr version") + else: + taos_V_output = subprocess.getoutput("taos -V | grep version | awk -F ' ' '{print $3}'") + taosd_V_output = subprocess.getoutput("taosd -V | grep version | awk -F ' ' '{print $3}'") + taosadapter_V_output = subprocess.getoutput("taosadapter -V | grep version | awk -F ' ' '{print $3}'") + taoskeeper_V_output = subprocess.getoutput("taoskeeper -V | grep version | awk -F ' ' '{print $3}'") + taosB_V_output = subprocess.getoutput("taosBenchmark -V | grep version | awk -F ' ' '{print $3}'") + if verMode == "Enterprise": + taosx_V_output = subprocess.getoutput("taosx -V | grep version | awk -F ' ' '{print $3}'") + + print("taos -V output is: %s" % taos_V_output) + print("taosd -V output is: %s" % taosd_V_output) + print("taosadapter -V output is: %s" % taosadapter_V_output) + print("taoskeeper -V output is: %s" % taoskeeper_V_output) + print("taosBenchmark -V output is: %s" % taosB_V_output) + if verMode == "Enterprise": + print("taosx -V output is: %s" % taosx_V_output) + taosxVersion = version in taosx_V_output + else: + taosxVersion = True + if (version in client_version + and version in server_version + and version in select_server + and version in select_client + and version in taos_V_output + and version in taosd_V_output + and version in taosadapter_V_output + and version in taoskeeper_V_output + and version in taosB_V_output + and taosxVersion + ): + version_test_result = True +leftFile = False +if uninstall: + print("Start to run rmtaos") + print("Platform: ", system) + # stop taosd server + if system == 'Windows': + cmd = "C:\\TDengine\\stop_all.bat" + elif system == 'Linux': + cmd = "systemctl stop taosd" + else: + cmd = "sudo launchctl stop com.tdengine.taosd" + process_out = subprocess.getoutput(cmd) + print(cmd) + time.sleep(10) + if system == "Linux": + # 创建一个subprocess.Popen对象,并使用stdin和stdout进行交互 + process = subprocess.Popen(['rmtaos'], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True) + # 向子进程发送输入 + process.stdin.write("y\n") + process.stdin.flush() # 确保输入被发送到子进程 + process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n") + process.stdin.flush() # 确保输入被发送到子进程 + # 关闭子进程的stdin,防止它无限期等待更多输入 + process.stdin.close() + # 等待子进程结束 + process.wait() + # 检查目录清除情况 + out = subprocess.getoutput("ls /etc/systemd/system/taos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/bin/taos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/local/bin/taos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/lib/libtaos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/lib64/libtaos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/include/taos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/local/taos") + #print(out) + if "No such file or directory" not in out: + print("Uninstall left some files in /usr/local/taos:%s" % out) + leftFile = True + if not leftFile: + print("*******Test Result: uninstall test passed ************") + + elif system == "Darwin": + # 创建一个subprocess.Popen对象,并使用stdin和stdout进行交互 + process = subprocess.Popen(['sudo', 'rmtaos'], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True) + # 向子进程发送输入 + process.stdin.write("y\n") + process.stdin.flush() # 确保输入被发送到子进程 + process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n") + process.stdin.flush() # 确保输入被发送到子进程 + # 关闭子进程的stdin,防止它无限期等待更多输入 + process.stdin.close() + # 等待子进程结束 + process.wait() + # 检查目录清除情况 + out = subprocess.getoutput("ls /usr/local/bin/taos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/local/lib/libtaos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/local/include/taos*") + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + #out = subprocess.getoutput("ls /usr/local/Cellar/tdengine/") + #print(out) + #if out: + # print("Uninstall left some files: /usr/local/Cellar/tdengine/%s" % out) + # leftFile = True + #if not leftFile: + # print("*******Test Result: uninstall test passed ************") + + elif system == "Windows": + process = subprocess.Popen(['unins000','/silent'], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True) + process.wait() + time.sleep(10) + out = subprocess.getoutput("ls C:\TDengine") + print(out) + if len(out.split("\n")) > 3: + leftFile = True + print("Uninstall left some files: %s" % out) + +if version_test_result: + print("**********Test Result: version test passed! **********") +else: + print("!!!!!!!!!!!Test Result: version test failed! !!!!!!!!!!") +if not leftFile: + print("**********Test Result: uninstall test passed! **********") +else: + print("!!!!!!!!!!!Test Result: uninstall test failed! !!!!!!!!!!") +if version_test_result and not leftFile: + sys.exit(0) +else: + sys.exit(1) + diff --git a/packaging/smokeTest/versionCheckAndUninstallforPytest.py b/packaging/smokeTest/versionCheckAndUninstallforPytest.py new file mode 100644 index 00000000000..5b752195545 --- /dev/null +++ b/packaging/smokeTest/versionCheckAndUninstallforPytest.py @@ -0,0 +1,137 @@ +#!/usr/bin/python +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### +# install pip +# pip install src/connector/python/ + +# -*- coding: utf-8 -*- +import sys, os +import re +import platform +import getopt +import subprocess +# from this import d +import time +from lib import run_cmd + + +# input for server +def UninstallTaos(version, verMode, uninstall, name): + if not version: + raise "No version specified, will not run version check." + + system = platform.system() + arch = platform.machine() + leftFile = False + if uninstall: + print("Start to run rm%s" % name) + print("Platform: ", system) + # stop taosd server + if system == 'Windows': + cmd = "C:\\TDengine\\stop_all.bat" + else: + cmd = "stop_all.sh" + process_out = subprocess.getoutput(cmd) + print(cmd) + time.sleep(5) + print("start to rm%s" % name) + if system == "Linux": + # 启动命令 + process = subprocess.Popen(['rm%s' % name], stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, text=True) + + # 发送交互输入 + stdout, stderr = process.communicate( + input="y\nI confirm that I would like to delete all data, log and configuration files\n") + + # 打印输出(可选) + print(stdout) + print(stderr) + # 检查目录清除情况 + out = subprocess.getoutput("ls /etc/systemd/system/%s*" % name) + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/bin/%s*" % name) + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/local/bin/%s*" % name) + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/lib/lib%s*" % name) + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/lib64/lib%s*" % name) + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/include/%s*" % name) + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/local/%s" % name) + # print(out) + if "No such file or directory" not in out: + print("Uninstall left some files in /usr/local/%s:%s" % (name, out)) + leftFile = True + if not leftFile: + print("*******Test Result: uninstall test passed ************") + + elif system == "Darwin": + # 创建一个subprocess.Popen对象,并使用stdin和stdout进行交互 + process = subprocess.Popen(['sudo', 'rm%s' % name], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True) + # 向子进程发送输入 + process.stdin.write("y\n") + process.stdin.flush() # 确保输入被发送到子进程 + process.stdin.write("I confirm that I would like to delete all data, log and configuration files\n") + process.stdin.flush() # 确保输入被发送到子进程 + # 关闭子进程的stdin,防止它无限期等待更多输入 + process.stdin.close() + # 等待子进程结束 + process.wait() + # 检查目录清除情况 + out = subprocess.getoutput("ls /usr/local/bin/%s*" % name) + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/local/lib/lib%s*" % name) + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + out = subprocess.getoutput("ls /usr/local/include/%s*" % name) + if "No such file or directory" not in out: + print("Uninstall left some files: %s" % out) + leftFile = True + # out = subprocess.getoutput("ls /usr/local/Cellar/tdengine/") + # print(out) + # if out: + # print("Uninstall left some files: /usr/local/Cellar/tdengine/%s" % out) + # leftFile = True + # if not leftFile: + # print("*******Test Result: uninstall test passed ************") + + elif system == "Windows": + process = subprocess.Popen(['unins000', '/silent'], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True) + process.wait() + time.sleep(10) + for file in ["C:\TDengine\\taos.exe", "C:\TDengine\\unins000.exe", "C:\ProDB\prodb.exe", + "C:\ProDB\\unins000.exe"]: + if os.path.exists(file): + leftFile = True + if leftFile: + raise "uninstall %s fail, please check" % name + else: + print("**********Test Result: uninstall test passed! **********") diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 8a6b159a22c..a6fd69d16fb 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -185,7 +185,14 @@ function kill_process() { function install_main_path() { #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : + ${csudo}rm -rf ${install_main_dir}/cfg || : + ${csudo}rm -rf ${install_main_dir}/bin || : + ${csudo}rm -rf ${install_main_dir}/driver || : + ${csudo}rm -rf ${install_main_dir}/examples || : + ${csudo}rm -rf ${install_main_dir}/include || : + ${csudo}rm -rf ${install_main_dir}/share || : + ${csudo}rm -rf ${install_main_dir}/log || : + ${csudo}mkdir -p ${install_main_dir} ${csudo}mkdir -p ${install_main_dir}/cfg ${csudo}mkdir -p ${install_main_dir}/bin diff --git a/packaging/tools/mac_install_summary_client.txt b/packaging/tools/mac_install_summary_client.txt new file mode 100644 index 00000000000..f49703c5550 --- /dev/null +++ b/packaging/tools/mac_install_summary_client.txt @@ -0,0 +1,4 @@ +TDengine client is installed successfully. Please open a terminal and execute the commands below: + +To configure TDengine client, sudo vi /etc/taos/taos.cfg +To access TDengine command line interface, taos -h YouServerName \ No newline at end of file diff --git a/packaging/tools/make_install.bat b/packaging/tools/make_install.bat index 0b2a55b89c2..04d342ea06f 100644 --- a/packaging/tools/make_install.bat +++ b/packaging/tools/make_install.bat @@ -12,9 +12,18 @@ if exist C:\\TDengine\\data\\dnode\\dnodeCfg.json ( rem // stop and delete service mshta vbscript:createobject("shell.application").shellexecute("%~s0",":stop_delete","","runas",1)(window.close) -echo This might take a few moment to accomplish deleting service taosd/taosadapter ... + +if exist %binary_dir%\\build\\bin\\taosadapter.exe ( + echo This might take a few moment to accomplish deleting service taosd/taosadapter ... +) + +if exist %binary_dir%\\build\\bin\\taoskeeper.exe ( + echo This might take a few moment to accomplish deleting service taosd/taoskeeper ... +) + call :check_svc taosd call :check_svc taosadapter +call :check_svc taoskeeper set source_dir=%2 set source_dir=%source_dir:/=\\% @@ -46,6 +55,11 @@ if exist %binary_dir%\\test\\cfg\\taosadapter.toml ( copy %binary_dir%\\test\\cfg\\taosadapter.toml %target_dir%\\cfg\\taosadapter.toml > nul ) ) +if exist %binary_dir%\\test\\cfg\\taoskeeper.toml ( + if not exist %target_dir%\\cfg\\taoskeeper.toml ( + copy %binary_dir%\\test\\cfg\\taoskeeper.toml %target_dir%\\cfg\\taoskeeper.toml > nul + ) +) copy %source_dir%\\include\\client\\taos.h %target_dir%\\include > nul copy %source_dir%\\include\\util\\taoserror.h %target_dir%\\include > nul copy %source_dir%\\include\\libs\\function\\taosudf.h %target_dir%\\include > nul @@ -98,12 +112,15 @@ if %Enterprise% == TRUE ( copy %binary_dir%\\build\\bin\\*explorer.exe %target_dir% > nul ) ) - + copy %binary_dir%\\build\\bin\\taosd.exe %target_dir% > nul copy %binary_dir%\\build\\bin\\udfd.exe %target_dir% > nul if exist %binary_dir%\\build\\bin\\taosadapter.exe ( copy %binary_dir%\\build\\bin\\taosadapter.exe %target_dir% > nul ) +if exist %binary_dir%\\build\\bin\\taoskeeper.exe ( + copy %binary_dir%\\build\\bin\\taoskeeper.exe %target_dir% > nul +) mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close) @@ -116,6 +133,10 @@ if exist %binary_dir%\\build\\bin\\taosadapter.exe ( echo To start/stop taosAdapter with administrator privileges: %ESC%[92msc start/stop taosadapter %ESC%[0m ) +if exist %binary_dir%\\build\\bin\\taoskeeper.exe ( + echo To start/stop taosKeeper with administrator privileges: %ESC%[92msc start/stop taoskeeper %ESC%[0m +) + goto :eof :hasAdmin @@ -123,6 +144,7 @@ goto :eof call :stop_delete call :check_svc taosd call :check_svc taosadapter +call :check_svc taoskeeper if exist c:\\windows\\sysnative ( echo x86 @@ -141,6 +163,7 @@ if exist c:\\windows\\sysnative ( rem // create services sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND sc create "taosadapter" binPath= "C:\\TDengine\\taosadapter.exe" start= DEMAND +sc create "taoskeeper" binPath= "C:\\TDengine\\taoskeeper.exe" start= DEMAND set "env=HKLM\System\CurrentControlSet\Control\Session Manager\Environment" for /f "tokens=2*" %%I in ('reg query "%env%" /v Path ^| findstr /i "\"') do ( @@ -181,6 +204,8 @@ sc stop taosd sc delete taosd sc stop taosadapter sc delete taosadapter +sc stop taoskeeper +sc delete taoskeeper exit /B 0 :check_svc diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 13447bd5e43..0874433e94f 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -129,6 +129,13 @@ function kill_taosadapter() { fi } +function kill_taoskeeper() { + pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo}kill -9 $pid || : + fi +} + function kill_taosd() { pid=$(ps -ef | grep -w ${serverName} | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then @@ -138,7 +145,14 @@ function kill_taosd() { function install_main_path() { #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : + ${csudo}rm -rf ${install_main_dir}/cfg || : + ${csudo}rm -rf ${install_main_dir}/bin || : + ${csudo}rm -rf ${install_main_dir}/driver || : + ${csudo}rm -rf ${install_main_dir}/examples || : + ${csudo}rm -rf ${install_main_dir}/include || : + ${csudo}rm -rf ${install_main_dir}/share || : + ${csudo}rm -rf ${install_main_dir}/log || : + ${csudo}mkdir -p ${install_main_dir} ${csudo}mkdir -p ${install_main_dir}/cfg ${csudo}mkdir -p ${install_main_dir}/bin @@ -155,6 +169,7 @@ function install_bin() { ${csudo}rm -f ${bin_link_dir}/${clientName} || : ${csudo}rm -f ${bin_link_dir}/${serverName} || : ${csudo}rm -f ${bin_link_dir}/taosadapter || : + ${csudo}rm -f ${bin_link_dir}/taoskeeper || : ${csudo}rm -f ${bin_link_dir}/udfd || : ${csudo}rm -f ${bin_link_dir}/taosdemo || : ${csudo}rm -f ${bin_link_dir}/taosdump || : @@ -169,6 +184,7 @@ function install_bin() { [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || : [ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || : + [ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || : ${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || : @@ -183,6 +199,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || : + [ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo > /dev/null 2>&1 || : @@ -197,6 +214,7 @@ function install_bin() { [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || : [ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || : + [ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/*explorer ] && ${csudo}cp -r ${binary_dir}/build/bin/*explorer ${install_main_dir}/bin || : @@ -208,6 +226,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || : + [ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || : [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || : @@ -407,6 +426,29 @@ function install_taosadapter_config() { fi } +function install_taoskeeper_config() { + if [ ! -f "${cfg_install_dir}/taoskeeper.toml" ]; then + ${csudo}mkdir -p ${cfg_install_dir} || : + [ -f ${binary_dir}/test/cfg/taoskeeper.toml ] && + ${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_install_dir} && + ${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || : + [ -f ${cfg_install_dir}/taoskeeper.toml ] && + ${csudo}chmod 644 ${cfg_install_dir}/taoskeeper.toml || : + [ -f ${binary_dir}/test/cfg/taoskeeper.toml ] && + ${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \ + ${cfg_install_dir}/taoskeeper.toml.${verNumber} || : + [ -f ${cfg_install_dir}/taoskeeper.toml ] && + ${csudo}ln -s ${cfg_install_dir}/taoskeeper.toml \ + ${install_main_dir}/cfg/taoskeeper.toml > /dev/null 2>&1 || : + else + if [ -f "${binary_dir}/test/cfg/taoskeeper.toml" ]; then + ${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \ + ${cfg_install_dir}/taoskeeper.toml.${verNumber} || : + ${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || : + fi + fi +} + function install_log() { ${csudo}rm -rf ${log_dir} || : ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} @@ -526,6 +568,15 @@ function install_taosadapter_service() { fi } +function install_taoskeeper_service() { + if ((${service_mod} == 0)); then + [ -f ${binary_dir}/test/cfg/taoskeeper.service ] && + ${csudo}cp ${binary_dir}/test/cfg/taoskeeper.service \ + ${service_config_dir}/ || : + ${csudo}systemctl daemon-reload + fi +} + function install_service_on_launchctl() { ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || : ${csudo}cp ${script_dir}/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist @@ -534,6 +585,10 @@ function install_service_on_launchctl() { ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : ${csudo}cp ${script_dir}/com.taosdata.taosadapter.plist /Library/LaunchDaemons/com.taosdata.taosadapter.plist ${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : + + ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || : + ${csudo}cp ${script_dir}/com.taosdata.taoskeeper.plist /Library/LaunchDaemons/com.taosdata.taoskeeper.plist + ${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || : } function install_service() { @@ -549,6 +604,7 @@ function install_service() { install_service_on_launchctl fi } + function install_app() { if [ "$osType" = "Darwin" ]; then ${csudo}rm -rf /Applications/TDengine.app && @@ -573,6 +629,7 @@ function update_TDengine() { elif ((${service_mod} == 1)); then ${csudo}service ${serverName} stop || : else + kill_taoskeeper kill_taosadapter kill_taosd fi @@ -591,9 +648,11 @@ function update_TDengine() { install_service install_taosadapter_service + install_taoskeeper_service install_config install_taosadapter_config + install_taoskeeper_config echo echo -e "\033[44;32;1m${productName} is updated successfully!${NC}" @@ -602,22 +661,33 @@ function update_TDengine() { echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml" + [ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}" + [ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}" + [ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}" else if [ "$osType" != "Darwin" ]; then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}" + [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}" else echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}" - echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" + [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}" fi fi @@ -643,9 +713,11 @@ function install_TDengine() { install_service install_taosadapter_service + install_taoskeeper_service install_config install_taosadapter_config + install_taoskeeper_config # Ask if to start the service echo @@ -654,22 +726,33 @@ function install_TDengine() { echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml" + [ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}" + [ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}" + [ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}" else if [ "$osType" != "Darwin" ]; then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}" + [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}" else echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}" - echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" + [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}" fi fi diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt index bbd18892abd..6d5f0065172 100644 --- a/source/client/CMakeLists.txt +++ b/source/client/CMakeLists.txt @@ -10,6 +10,10 @@ else() add_library(taos SHARED ${CLIENT_SRC}) endif() +if(${TD_DARWIN}) + target_compile_options(taos PRIVATE -Wno-error=deprecated-non-prototype) +endif() + INCLUDE_DIRECTORIES(jni) target_include_directories( @@ -46,6 +50,11 @@ set_target_properties( ) add_library(taos_static STATIC ${CLIENT_SRC}) + +if(${TD_DARWIN}) + target_compile_options(taos_static PRIVATE -Wno-error=deprecated-non-prototype) +endif() + target_include_directories( taos_static PUBLIC "${TD_SOURCE_DIR}/include/client" diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 8d45e8b4a81..90505ed25a4 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -58,6 +58,8 @@ enum { #define TD_RES_TMQ_METADATA(res) (*(int8_t*)(res) == RES_TYPE__TMQ_METADATA) #define TD_RES_TMQ_BATCH_META(res) (*(int8_t*)(res) == RES_TYPE__TMQ_BATCH_META) +#define TSC_MAX_SUBPLAN_CAPACITY_NUM 1000 + typedef struct SAppInstInfo SAppInstInfo; typedef struct { @@ -106,6 +108,10 @@ typedef struct SQueryExecMetric { int64_t execCostUs; } SQueryExecMetric; +typedef struct { + SMonitorParas monitorParas; + int8_t enableAuditDelete; +} SAppInstServerCFG; struct SAppInstInfo { int64_t numOfConns; SCorEpSet mgmtEp; @@ -119,7 +125,7 @@ struct SAppInstInfo { void* pTransporter; SAppHbMgr* pAppHbMgr; char* instKey; - SMonitorParas monitorParas; + SAppInstServerCFG serverCfg; }; typedef struct SAppInfo { @@ -295,8 +301,7 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4); void doSetOneRowPtr(SReqResultInfo* pResultInfo); void setResPrecision(SReqResultInfo* pResInfo, int32_t precision); int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4); -int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows, - bool convertUcs4); +int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4); int32_t setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols); void doFreeReqResultInfo(SReqResultInfo* pResInfo); int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq); diff --git a/source/client/inc/clientSml.h b/source/client/inc/clientSml.h index a6aca2fddf6..d94a671c459 100644 --- a/source/client/inc/clientSml.h +++ b/source/client/inc/clientSml.h @@ -92,6 +92,26 @@ extern "C" { } \ } +#define SML_CHECK_CODE(CMD) \ + code = (CMD); \ + if (TSDB_CODE_SUCCESS != code) { \ + lino = __LINE__; \ + goto END; \ + } + +#define SML_CHECK_NULL(CMD) \ + if (NULL == (CMD)) { \ + code = terrno; \ + lino = __LINE__; \ + goto END; \ + } + +#define RETURN \ + if (code != 0){ \ + uError("%s failed code:%d line:%d", __FUNCTION__ , code, lino); \ + } \ + return code; + typedef enum { SCHEMA_ACTION_NULL, SCHEMA_ACTION_CREATE_STABLE, @@ -191,7 +211,6 @@ typedef struct { cJSON *root; // for parse json int8_t offset[OTD_JSON_FIELDS_NUM]; SSmlLineInfo *lines; // element is SSmlLineInfo - bool parseJsonByLib; SArray *tagJsonArray; SArray *valueJsonArray; @@ -211,13 +230,8 @@ typedef struct { extern int64_t smlFactorNS[]; extern int64_t smlFactorS[]; -typedef int32_t (*_equal_fn_sml)(const void *, const void *); - int32_t smlBuildSmlInfo(TAOS *taos, SSmlHandle **handle); void smlDestroyInfo(SSmlHandle *info); -int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset); -int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset); -bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg); void smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2); int32_t smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg); int64_t smlGetTimeValue(const char *value, int32_t len, uint8_t fromPrecision, uint8_t toPrecision); @@ -237,7 +251,7 @@ void smlDestroyTableInfo(void *para); void freeSSmlKv(void* data); int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements); int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements); -int32_t smlParseJSON(SSmlHandle *info, char *payload); +int32_t smlParseJSONExt(SSmlHandle *info, char *payload); int32_t smlBuildSuperTableInfo(SSmlHandle *info, SSmlLineInfo *currElement, SSmlSTableMeta** sMeta); bool isSmlTagAligned(SSmlHandle *info, int cnt, SSmlKv *kv); @@ -246,7 +260,8 @@ int32_t smlProcessChildTable(SSmlHandle *info, SSmlLineInfo *elements); int32_t smlProcessSuperTable(SSmlHandle *info, SSmlLineInfo *elements); int32_t smlJoinMeasureTag(SSmlLineInfo *elements); void smlBuildTsKv(SSmlKv *kv, int64_t ts); -int32_t smlParseEndTelnetJson(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv); +int32_t smlParseEndTelnetJsonFormat(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv); +int32_t smlParseEndTelnetJsonUnFormat(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv); int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs); static inline bool smlDoubleToInt64OverFlow(double num) { diff --git a/source/client/inc/clientStmt2.h b/source/client/inc/clientStmt2.h index 4e9a09c0820..64abf31bc1b 100644 --- a/source/client/inc/clientStmt2.h +++ b/source/client/inc/clientStmt2.h @@ -222,6 +222,7 @@ int stmtSetTbTags2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *tags); int stmtBindBatch2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *bind, int32_t colIdx); int stmtGetTagFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields); int stmtGetColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields); +int stmtGetStbColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_STB **fields); int stmtGetParamNum2(TAOS_STMT2 *stmt, int *nums); int stmtGetParamTbName(TAOS_STMT2 *stmt, int *nums); int stmtIsInsert2(TAOS_STMT2 *stmt, int *insert); diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index f892575f0a2..c56a627ec74 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -166,11 +166,11 @@ static int32_t generateWriteSlowLog(STscObj *pTscObj, SRequestObj *pRequest, int ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "type", cJSON_CreateNumber(reqType))); ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject( json, "rows_num", cJSON_CreateNumber(pRequest->body.resInfo.numOfRows + pRequest->body.resInfo.totalRows))); - if (pRequest->sqlstr != NULL && strlen(pRequest->sqlstr) > pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen) { - char tmp = pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen]; - pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen] = '\0'; + if (pRequest->sqlstr != NULL && strlen(pRequest->sqlstr) > pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen) { + char tmp = pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen]; + pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen] = '\0'; ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr))); - pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen] = tmp; + pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen] = tmp; } else { ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr))); } @@ -284,7 +284,7 @@ static void deregisterRequest(SRequestObj *pRequest) { } } - if (pTscObj->pAppInfo->monitorParas.tsEnableMonitor) { + if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) { if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType || QUERY_NODE_INSERT_STMT == pRequest->stmtType) { sqlReqLog(pTscObj->id, pRequest->killed, pRequest->code, MONITORSQLTYPEINSERT); } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { @@ -294,15 +294,15 @@ static void deregisterRequest(SRequestObj *pRequest) { } } - if ((duration >= pTscObj->pAppInfo->monitorParas.tsSlowLogThreshold * 1000000UL || - duration >= pTscObj->pAppInfo->monitorParas.tsSlowLogThresholdTest * 1000000UL) && - checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->monitorParas.tsSlowLogExceptDb)) { + if ((duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThreshold * 1000000UL || + duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThresholdTest * 1000000UL) && + checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogExceptDb)) { (void)atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1); - if (pTscObj->pAppInfo->monitorParas.tsSlowLogScope & reqType) { + if (pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & reqType) { taosPrintSlowLog("PID:%d, Conn:%u,QID:0x%" PRIx64 ", Start:%" PRId64 " us, Duration:%" PRId64 "us, SQL:%s", taosGetPId(), pTscObj->connId, pRequest->requestId, pRequest->metric.start, duration, pRequest->sqlstr); - if (pTscObj->pAppInfo->monitorParas.tsEnableMonitor) { + if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) { slowQueryLog(pTscObj->id, pRequest->killed, pRequest->code, duration); if (TSDB_CODE_SUCCESS != generateWriteSlowLog(pTscObj, pRequest, reqType, duration)) { tscError("failed to generate write slow log"); @@ -375,7 +375,7 @@ int32_t openTransporter(const char *user, const char *auth, int32_t numOfThread, rpcInit.startReadTimer = 1; rpcInit.readTimeout = tsReadTimeout; - int32_t code = taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + int32_t code = taosVersionStrToInt(td_version, &rpcInit.compatibilityVer); if (TSDB_CODE_SUCCESS != code) { tscError("invalid version string."); return code; @@ -689,7 +689,7 @@ void doDestroyRequest(void *p) { int32_t code = taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self)); if (TSDB_CODE_SUCCESS != code) { - tscError("failed to remove request from hash, code:%s", tstrerror(code)); + tscWarn("failed to remove request from hash, code:%s", tstrerror(code)); } schedulerFreeJob(&pRequest->body.queryJob, 0); diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index ecebcaa2107..9b9d0a6dab8 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -606,7 +606,8 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) { return code; } - pInst->monitorParas = pRsp.monitorParas; + pInst->serverCfg.monitorParas = pRsp.monitorParas; + pInst->serverCfg.enableAuditDelete = pRsp.enableAuditDelete; tscDebug("[monitor] paras from hb, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d", pInst->clusterId, pRsp.monitorParas.tsSlowLogThreshold, pRsp.monitorParas.tsSlowLogScope); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 9131d29f30a..8a0b1ddaab0 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1250,6 +1250,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { void launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQuery, void** res) { int32_t code = 0; + int32_t subplanNum = 0; if (pQuery->pRoot) { pRequest->stmtType = pQuery->pRoot->type; @@ -1405,6 +1406,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat if (TSDB_CODE_SUCCESS == code) { code = schedulerExecJob(&req, &pRequest->body.queryJob); } + taosArrayDestroy(pNodeList); } else { qDestroyQueryPlan(pDag); @@ -1698,7 +1700,7 @@ static int32_t buildConnectMsg(SRequestObj* pRequest, SMsgSendInfo** pMsgSendInf tstrncpy(connectReq.app, appInfo.appName, sizeof(connectReq.app)); tstrncpy(connectReq.user, pObj->user, sizeof(connectReq.user)); tstrncpy(connectReq.passwd, pObj->pass, sizeof(connectReq.passwd)); - tstrncpy(connectReq.sVer, version, sizeof(connectReq.sVer)); + tstrncpy(connectReq.sVer, td_version, sizeof(connectReq.sVer)); int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq); void* pReq = taosMemoryMalloc(contLen); @@ -1768,19 +1770,15 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg, } } -int32_t doProcessMsgFromServer(void* param) { - AsyncArg* arg = (AsyncArg*)param; - SRpcMsg* pMsg = &arg->msg; - SEpSet* pEpSet = arg->pEpset; - +int32_t doProcessMsgFromServerImpl(SRpcMsg* pMsg, SEpSet* pEpSet) { SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle; if (pMsg->info.ahandle == NULL) { tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL"); - taosMemoryFree(arg->pEpset); rpcFreeCont(pMsg->pCont); - taosMemoryFree(arg); + taosMemoryFree(pEpSet); return TSDB_CODE_TSC_INTERNAL_ERROR; } + STscObj* pTscObj = NULL; STraceId* trace = &pMsg->info.traceId; @@ -1800,10 +1798,9 @@ int32_t doProcessMsgFromServer(void* param) { if (TSDB_CODE_SUCCESS != taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId)) { tscError("doProcessMsgFromServer taosReleaseRef failed"); } - taosMemoryFree(arg->pEpset); rpcFreeCont(pMsg->pCont); + taosMemoryFree(pEpSet); destroySendMsgInfo(pSendInfo); - taosMemoryFree(arg); return TSDB_CODE_TSC_INTERNAL_ERROR; } pTscObj = pRequest->pTscObj; @@ -1842,20 +1839,24 @@ int32_t doProcessMsgFromServer(void* param) { rpcFreeCont(pMsg->pCont); destroySendMsgInfo(pSendInfo); - - taosMemoryFree(arg); return TSDB_CODE_SUCCESS; } +int32_t doProcessMsgFromServer(void* param) { + AsyncArg* arg = (AsyncArg*)param; + int32_t code = doProcessMsgFromServerImpl(&arg->msg, arg->pEpset); + taosMemoryFree(arg); + return code; +} void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { + int32_t code = 0; SEpSet* tEpSet = NULL; if (pEpSet != NULL) { tEpSet = taosMemoryCalloc(1, sizeof(SEpSet)); if (NULL == tEpSet) { - pMsg->code = TSDB_CODE_OUT_OF_MEMORY; - rpcFreeCont(pMsg->pCont); - destroySendMsgInfo(pMsg->info.ahandle); - return; + code = terrno; + pMsg->code = terrno; + goto _exit; } (void)memcpy((void*)tEpSet, (void*)pEpSet, sizeof(SEpSet)); } @@ -1877,21 +1878,25 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { AsyncArg* arg = taosMemoryCalloc(1, sizeof(AsyncArg)); if (NULL == arg) { - pMsg->code = TSDB_CODE_OUT_OF_MEMORY; - taosMemoryFree(tEpSet); - rpcFreeCont(pMsg->pCont); - destroySendMsgInfo(pMsg->info.ahandle); - return; + code = terrno; + pMsg->code = code; + goto _exit; } + arg->msg = *pMsg; arg->pEpset = tEpSet; - if (0 != taosAsyncExec(doProcessMsgFromServer, arg, NULL)) { - tscError("failed to sched msg to tsc, tsc ready to quit"); - rpcFreeCont(pMsg->pCont); - taosMemoryFree(arg->pEpset); - destroySendMsgInfo(pMsg->info.ahandle); + if ((code = taosAsyncExec(doProcessMsgFromServer, arg, NULL)) != 0) { + pMsg->code = code; taosMemoryFree(arg); + goto _exit; + } + return; +_exit: + tscError("failed to sched msg to tsc since %s", tstrerror(code)); + code = doProcessMsgFromServerImpl(pMsg, tEpSet); + if (code != 0) { + tscError("failed to sched msg to tsc, tsc ready quit"); } } @@ -2079,12 +2084,12 @@ static int32_t doPrepareResPtr(SReqResultInfo* pResInfo) { return TSDB_CODE_SUCCESS; } -static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int32_t numOfCols, int32_t* colLength) { +static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t* colLength) { int32_t idx = -1; iconv_t conv = taosAcquireConv(&idx, C2M); if (conv == (iconv_t)-1) return TSDB_CODE_TSC_INTERNAL_ERROR; - for (int32_t i = 0; i < numOfCols; ++i) { + for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) { int32_t type = pResultInfo->fields[i].type; int32_t bytes = pResultInfo->fields[i].bytes; @@ -2098,7 +2103,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int pResultInfo->convertBuf[i] = p; SResultColumn* pCol = &pResultInfo->pCol[i]; - for (int32_t j = 0; j < numOfRows; ++j) { + for (int32_t j = 0; j < pResultInfo->numOfRows; ++j) { if (pCol->offset[j] != -1) { char* pStart = pCol->offset[j] + pCol->pData; @@ -2131,10 +2136,13 @@ int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) { numOfCols * (sizeof(int8_t) + sizeof(int32_t)); } -static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) { +static int32_t estimateJsonLen(SReqResultInfo* pResultInfo) { char* p = (char*)pResultInfo->pData; int32_t blockVersion = *(int32_t*)p; + int32_t numOfRows = pResultInfo->numOfRows; + int32_t numOfCols = pResultInfo->numOfCols; + // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column // length | int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3); @@ -2193,10 +2201,16 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i } pStart += colLen; } + + // Ensure the complete structure of the block, including the blankfill field, + // even though it is not used on the client side. + len += sizeof(bool); return len; } -static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) { +static int32_t doConvertJson(SReqResultInfo* pResultInfo) { + int32_t numOfRows = pResultInfo->numOfRows; + int32_t numOfCols = pResultInfo->numOfCols; bool needConvert = false; for (int32_t i = 0; i < numOfCols; ++i) { if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) { @@ -2213,7 +2227,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int char* p = (char*)pResultInfo->pData; int32_t blockVersion = *(int32_t*)p; - int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows); + int32_t dataLen = estimateJsonLen(pResultInfo); if (dataLen <= 0) { return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -2336,27 +2350,36 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int pStart1 += colLen1; } + // Ensure the complete structure of the block, including the blankfill field, + // even though it is not used on the client side. + // (void)memcpy(pStart1, pStart, sizeof(bool)); + totalLen += sizeof(bool); + *(int32_t*)(pResultInfo->convertJson + 4) = totalLen; pResultInfo->pData = pResultInfo->convertJson; return TSDB_CODE_SUCCESS; } -int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows, - bool convertUcs4) { - if (numOfCols <= 0 || pFields == NULL || pResultInfo == NULL) { +int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { + if (pResultInfo == NULL || pResultInfo->numOfCols <= 0 || pResultInfo->fields == NULL) { tscError("setResultDataPtr paras error"); return TSDB_CODE_TSC_INTERNAL_ERROR; } - if (numOfRows == 0) { + if (pResultInfo->numOfRows == 0) { return TSDB_CODE_SUCCESS; } + if (pResultInfo->pData == NULL) { + tscError("setResultDataPtr error: pData is NULL"); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + int32_t code = doPrepareResPtr(pResultInfo); if (code != TSDB_CODE_SUCCESS) { return code; } - code = doConvertJson(pResultInfo, numOfCols, numOfRows); + code = doConvertJson(pResultInfo); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -2376,9 +2399,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 int32_t cols = *(int32_t*)p; p += sizeof(int32_t); - if (rows != numOfRows || cols != numOfCols) { - tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols, - numOfCols); + if (rows != pResultInfo->numOfRows || cols != pResultInfo->numOfCols) { + tscError("setResultDataPtr paras error:rows;%d numOfRows:%" PRId64 " cols:%d numOfCols:%d", rows, pResultInfo->numOfRows, cols, + pResultInfo->numOfCols); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -2389,7 +2412,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 p += sizeof(uint64_t); // check fields - for (int32_t i = 0; i < numOfCols; ++i) { + for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) { int8_t type = *(int8_t*)p; p += sizeof(int8_t); @@ -2398,10 +2421,14 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 } int32_t* colLength = (int32_t*)p; - p += sizeof(int32_t) * numOfCols; + p += sizeof(int32_t) * pResultInfo->numOfCols; char* pStart = p; - for (int32_t i = 0; i < numOfCols; ++i) { + for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) { + if ((pStart - pResultInfo->pData) >= dataLen) { + tscError("setResultDataPtr invalid offset over dataLen %d", dataLen); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } if (blockVersion == BLOCK_VERSION_1) { colLength[i] = htonl(colLength[i]); } @@ -2409,10 +2436,13 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen); return TSDB_CODE_TSC_INTERNAL_ERROR; } - + if (IS_INVALID_TYPE(pResultInfo->fields[i].type)) { + tscError("invalid type %d", pResultInfo->fields[i].type); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) { pResultInfo->pCol[i].offset = (int32_t*)pStart; - pStart += numOfRows * sizeof(int32_t); + pStart += pResultInfo->numOfRows * sizeof(int32_t); } else { pResultInfo->pCol[i].nullbitmap = pStart; pStart += BitmapLen(pResultInfo->numOfRows); @@ -2425,11 +2455,17 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 pStart += colLength[i]; } + p = pStart; // bool blankFill = *(bool*)p; p += sizeof(bool); + int32_t offset = p - pResultInfo->pData; + if (offset > dataLen) { + tscError("invalid offset %d, dataLen %d", offset, dataLen); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } if (convertUcs4) { - code = doConvertUCS4(pResultInfo, numOfRows, numOfCols, colLength); + code = doConvertUCS4(pResultInfo, colLength); } return code; @@ -2542,7 +2578,7 @@ int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableR pResultInfo->totalRows += pResultInfo->numOfRows; int32_t code = - setResultDataPtr(pResultInfo, pResultInfo->fields, pResultInfo->numOfCols, pResultInfo->numOfRows, convertUcs4); + setResultDataPtr(pResultInfo, convertUcs4); return code; } @@ -2571,7 +2607,7 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de rpcInit.connLimitNum = connLimitNum; rpcInit.timeToGetConn = tsTimeToGetAvailableConn; rpcInit.readTimeout = tsReadTimeout; - if (TSDB_CODE_SUCCESS != taosVersionStrToInt(version, &(rpcInit.compatibilityVer))) { + if (TSDB_CODE_SUCCESS != taosVersionStrToInt(td_version, &rpcInit.compatibilityVer)) { tscError("faild to convert taos version from str to int, errcode:%s", terrstr()); goto _OVER; } @@ -2837,6 +2873,7 @@ void syncQueryFn(void* param, void* res, int32_t code) { if (pParam->pRequest) { pParam->pRequest->code = code; + clientOperateReport(pParam->pRequest); } if (TSDB_CODE_SUCCESS != tsem_post(&pParam->sem)) { diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 64631fd7545..9f6be8e45c8 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -84,7 +84,7 @@ void taos_cleanup(void) { taosCloseRef(id); nodesDestroyAllocatorSet(); -// cleanupAppInfo(); + // cleanupAppInfo(); rpcCleanup(); tscDebug("rpc cleanup"); @@ -388,7 +388,6 @@ void taos_free_result(TAOS_RES *res) { tDeleteMqBatchMetaRsp(&pRsp->batchMetaRsp); } taosMemoryFree(pRsp); - } void taos_kill_query(TAOS *taos) { @@ -484,7 +483,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { return taos_print_row_with_size(str, INT32_MAX, row, fields, num_fields); } -int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields){ +int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { int32_t len = 0; for (int i = 0; i < num_fields; ++i) { if (i > 0 && len < size - 1) { @@ -589,7 +588,7 @@ int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD break; } } - if (len < size){ + if (len < size) { str[len] = 0; } @@ -670,7 +669,7 @@ const char *taos_data_type(int type) { } } -const char *taos_get_client_info() { return version; } +const char *taos_get_client_info() { return td_version; } // return int32_t int taos_affected_rows(TAOS_RES *res) { @@ -1804,7 +1803,7 @@ int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) { if (bind->num > 1) { tscError("invalid bind number %d for %s", bind->num, __FUNCTION__); - terrno = TSDB_CODE_INVALID_PARA; + terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR; return terrno; } @@ -1820,7 +1819,7 @@ int taos_stmt_bind_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) { if (bind->num <= 0 || bind->num > INT16_MAX) { tscError("invalid bind num %d", bind->num); - terrno = TSDB_CODE_INVALID_PARA; + terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR; return terrno; } @@ -1832,7 +1831,7 @@ int taos_stmt_bind_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) { } if (0 == insert && bind->num > 1) { tscError("only one row data allowed for query"); - terrno = TSDB_CODE_INVALID_PARA; + terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR; return terrno; } @@ -1860,7 +1859,7 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, in } if (0 == insert && bind->num > 1) { tscError("only one row data allowed for query"); - terrno = TSDB_CODE_INVALID_PARA; + terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR; return terrno; } @@ -2020,7 +2019,7 @@ int taos_stmt2_bind_param(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col if (bind->num <= 0 || bind->num > INT16_MAX) { tscError("invalid bind num %d", bind->num); - terrno = TSDB_CODE_INVALID_PARA; + terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR; return terrno; } @@ -2028,7 +2027,7 @@ int taos_stmt2_bind_param(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col (void)stmtIsInsert2(stmt, &insert); if (0 == insert && bind->num > 1) { tscError("only one row data allowed for query"); - terrno = TSDB_CODE_INVALID_PARA; + terrno = TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR; return terrno; } @@ -2082,7 +2081,7 @@ int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert) { } int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields) { - if (stmt == NULL || NULL == count) { + if (stmt == NULL || count == NULL) { tscError("NULL parameter for %s", __FUNCTION__); terrno = TSDB_CODE_INVALID_PARA; return terrno; @@ -2103,12 +2102,28 @@ int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, } } +int taos_stmt2_get_stb_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_STB **fields) { + if (stmt == NULL || count == NULL) { + tscError("NULL parameter for %s", __FUNCTION__); + terrno = TSDB_CODE_INVALID_PARA; + return terrno; + } + + return stmtGetStbColFields2(stmt, count, fields); +} + void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields) { (void)stmt; if (!fields) return; taosMemoryFree(fields); } +DLL_EXPORT void taos_stmt2_free_stb_fields(TAOS_STMT2 *stmt, TAOS_FIELD_STB *fields) { + (void)stmt; + if (!fields) return; + taosMemoryFree(fields); +} + TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt) { if (stmt == NULL) { tscError("NULL parameter for %s", __FUNCTION__); @@ -2144,4 +2159,4 @@ int taos_set_conn_mode(TAOS *taos, int mode, int value) { return 0; } -char *getBuildInfo() { return buildinfo; } +char *getBuildInfo() { return td_buildinfo; } diff --git a/source/client/src/clientMonitor.c b/source/client/src/clientMonitor.c index 4266534d29f..5f7e11b6a3c 100644 --- a/source/client/src/clientMonitor.c +++ b/source/client/src/clientMonitor.c @@ -2,8 +2,6 @@ #include "cJSON.h" #include "clientInt.h" #include "clientLog.h" -#include "os.h" -#include "tglobal.h" #include "tmisce.h" #include "tqueue.h" #include "ttime.h" @@ -19,6 +17,7 @@ STaosQueue* monitorQueue; SHashObj* monitorSlowLogHash; char tmpSlowLogPath[PATH_MAX] = {0}; TdThread monitorThread; +extern bool tsEnableAuditDelete; static int32_t getSlowLogTmpDir(char* tmpPath, int32_t size) { int ret = tsnprintf(tmpPath, size, "%s/tdengine_slow_log/", tsTempDir); @@ -216,7 +215,7 @@ static void reportSendProcess(void* param, void* tmrId) { SEpSet ep = getEpSet_s(&pInst->mgmtEp); generateClusterReport(pMonitor->registry, pInst->pTransporter, &ep); bool reset = - taosTmrReset(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId); + taosTmrReset(reportSendProcess, pInst->serverCfg.monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId); tscDebug("reset timer, pMonitor:%p, %d", pMonitor, reset); taosRUnLockLatch(&monitorLock); } @@ -289,7 +288,7 @@ void monitorCreateClient(int64_t clusterId) { goto fail; } pMonitor->timer = - taosTmrStart(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, (void*)pMonitor, monitorTimer); + taosTmrStart(reportSendProcess, pInst->serverCfg.monitorParas.tsMonitorInterval * 1000, (void*)pMonitor, monitorTimer); if (pMonitor->timer == NULL) { tscError("failed to start timer"); goto fail; @@ -660,7 +659,7 @@ static void monitorSendAllSlowLog() { taosHashCancelIterate(monitorSlowLogHash, pIter); return; } - if (t - pClient->lastCheckTime > pInst->monitorParas.tsMonitorInterval * 1000) { + if (t - pClient->lastCheckTime > pInst->serverCfg.monitorParas.tsMonitorInterval * 1000) { pClient->lastCheckTime = t; } else { continue; @@ -686,7 +685,7 @@ static void monitorSendAllSlowLog() { static void monitorSendAllSlowLogFromTempDir(int64_t clusterId) { SAppInstInfo* pInst = getAppInstByClusterId((int64_t)clusterId); - if (pInst == NULL || !pInst->monitorParas.tsEnableMonitor) { + if (pInst == NULL || !pInst->serverCfg.monitorParas.tsEnableMonitor) { tscInfo("[monitor] monitor is disabled, skip send slow log"); return; } @@ -933,3 +932,100 @@ int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data) { } return 0; } + +int32_t reportCB(void* param, SDataBuf* pMsg, int32_t code) { + taosMemoryFree(pMsg->pData); + taosMemoryFree(pMsg->pEpSet); + tscDebug("[del report]delete reportCB code:%d", code); + return 0; +} + +int32_t senAuditInfo(STscObj* pTscObj, void* pReq, int32_t len, uint64_t requestId) { + SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); + if (sendInfo == NULL) { + tscError("[del report]failed to allocate memory for sendInfo"); + return terrno; + } + + sendInfo->msgInfo = (SDataBuf){.pData = pReq, .len = len, .handle = NULL}; + + sendInfo->requestId = requestId; + sendInfo->requestObjRefId = 0; + sendInfo->param = NULL; + sendInfo->fp = reportCB; + sendInfo->msgType = TDMT_MND_AUDIT; + + SEpSet epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); + + int32_t code = asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo); + if (code != 0) { + tscError("[del report]failed to send msg to server, code:%d", code); + taosMemoryFree(sendInfo); + return code; + } + return TSDB_CODE_SUCCESS; +} + +static void reportDeleteSql(SRequestObj* pRequest) { + SDeleteStmt* pStmt = (SDeleteStmt*)pRequest->pQuery->pRoot; + STscObj* pTscObj = pRequest->pTscObj; + + if (pTscObj == NULL || pTscObj->pAppInfo == NULL) { + tscError("[del report]invalid tsc obj"); + return; + } + + if(pTscObj->pAppInfo->serverCfg.enableAuditDelete == 0) { + tscDebug("[del report]audit delete is disabled"); + return; + } + + if (pRequest->code != TSDB_CODE_SUCCESS) { + tscDebug("[del report]delete request result code:%d", pRequest->code); + return; + } + + if (nodeType(pStmt->pFromTable) != QUERY_NODE_REAL_TABLE) { + tscError("[del report]invalid from table node type:%d", nodeType(pStmt->pFromTable)); + return; + } + + SRealTableNode* pTable = (SRealTableNode*)pStmt->pFromTable; + SAuditReq req; + req.pSql = pRequest->sqlstr; + req.sqlLen = pRequest->sqlLen; + TAOS_UNUSED(tsnprintf(req.table, TSDB_TABLE_NAME_LEN, "%s", pTable->table.tableName)); + TAOS_UNUSED(tsnprintf(req.db, TSDB_DB_FNAME_LEN, "%s", pTable->table.dbName)); + TAOS_UNUSED(tsnprintf(req.operation, AUDIT_OPERATION_LEN, "delete")); + int32_t tlen = tSerializeSAuditReq(NULL, 0, &req); + void* pReq = taosMemoryCalloc(1, tlen); + if (pReq == NULL) { + tscError("[del report]failed to allocate memory for req"); + return; + } + + if (tSerializeSAuditReq(pReq, tlen, &req) < 0) { + tscError("[del report]failed to serialize req"); + taosMemoryFree(pReq); + return; + } + + int32_t code = senAuditInfo(pRequest->pTscObj, pReq, tlen, pRequest->requestId); + if (code != 0) { + tscError("[del report]failed to send audit info, code:%d", code); + taosMemoryFree(pReq); + return; + } + tscDebug("[del report]delete data, sql:%s", req.pSql); +} + +void clientOperateReport(SRequestObj* pRequest) { + if (pRequest == NULL || pRequest->pQuery == NULL || pRequest->pQuery->pRoot == NULL) { + tscError("[del report]invalid request"); + return; + } + + if (QUERY_NODE_DELETE_STMT == nodeType(pRequest->pQuery->pRoot)) { + reportDeleteSql(pRequest); + } +} diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index aef3cef1c5d..9a723218ff5 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -80,8 +80,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) { goto End; } - if ((code = taosCheckVersionCompatibleFromStr(version, connectRsp.sVer, 3)) != 0) { - tscError("version not compatible. client version: %s, server version: %s", version, connectRsp.sVer); + if ((code = taosCheckVersionCompatibleFromStr(td_version, connectRsp.sVer, 3)) != 0) { + tscError("version not compatible. client version: %s, server version: %s", td_version, connectRsp.sVer); goto End; } @@ -135,7 +135,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) { // update the appInstInfo pTscObj->pAppInfo->clusterId = connectRsp.clusterId; - pTscObj->pAppInfo->monitorParas = connectRsp.monitorParas; + pTscObj->pAppInfo->serverCfg.monitorParas = connectRsp.monitorParas; + pTscObj->pAppInfo->serverCfg.enableAuditDelete = connectRsp.enableAuditDelete; tscDebug("[monitor] paras from connect rsp, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d", connectRsp.clusterId, connectRsp.monitorParas.tsSlowLogThreshold, connectRsp.monitorParas.tsSlowLogScope); lastClusterId = connectRsp.clusterId; @@ -588,7 +589,8 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) { return code; } - size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN; *pRsp = taosMemoryCalloc(1, rspSize); if (NULL == *pRsp) { code = terrno; @@ -603,7 +605,7 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) { (*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows); (*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS); - int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, SHOW_VARIABLES_RESULT_COLS); + int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, SHOW_VARIABLES_RESULT_COLS); if(len < 0) { uError("buildShowVariablesRsp error, len:%d", len); code = terrno; @@ -741,7 +743,8 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr return code; } - size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN; *pRsp = taosMemoryCalloc(1, rspSize); if (NULL == *pRsp) { code = terrno; @@ -757,7 +760,7 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr (*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows); (*pRsp)->numOfCols = htonl(COMPACT_DB_RESULT_COLS); - int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, COMPACT_DB_RESULT_COLS); + int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, COMPACT_DB_RESULT_COLS); if(len < 0) { uError("buildRetriveTableRspForCompactDb error, len:%d", len); code = terrno; diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 0f3148b1606..7e294e4dad7 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -52,10 +52,8 @@ #define TMQ_META_VERSION "1.0" -static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen); - +static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen); static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); } - static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t, SColCmprWrapper* pColCmprRow, cJSON** pJson) { int32_t code = TSDB_CODE_SUCCESS; @@ -457,7 +455,7 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) { cJSON* tvalue = NULL; if (IS_VAR_DATA_TYPE(pTagVal->type)) { - char* buf = NULL; + char* buf = NULL; int64_t bufSize = 0; if (pTagVal->type == TSDB_DATA_TYPE_VARBINARY) { bufSize = pTagVal->nData * 2 + 2 + 3; @@ -890,9 +888,6 @@ static void processDropTable(SMqMetaRsp* metaRsp, cJSON** pJson) { } static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVCreateStbReq req = {0}; SDecoder coder; SMCreateStbReq pReq = {0}; @@ -1003,9 +998,6 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { } static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVDropStbReq req = {0}; SDecoder coder = {0}; SMDropStbReq pReq = {0}; @@ -1115,9 +1107,6 @@ static void destroyCreateTbReqBatch(void* data) { } static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVCreateTbBatchReq req = {0}; SDecoder coder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -1304,9 +1293,6 @@ static void destroyDropTbReqBatch(void* data) { } static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVDropTbBatchReq req = {0}; SDecoder coder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -1419,9 +1405,6 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { } static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SDeleteRes req = {0}; SDecoder coder = {0}; char sql[256] = {0}; @@ -1457,9 +1440,6 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { } static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVAlterTbReq req = {0}; SDecoder dcoder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -1590,7 +1570,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat SHashObj* pVgHash = NULL; SRequestObj* pRequest = NULL; - RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, reqid, &pRequest)); + RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, reqid)); uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d", LOG_ID_VALUE, rows, pData, tbname, fields, numFields); @@ -1622,7 +1602,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat RAW_NULL_CHECK(pVgHash); RAW_RETURN_CHECK( taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData))); - RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields, false, NULL, 0)); + RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields, false, NULL, 0, false)); RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); launchQueryImpl(pRequest, pQuery, true, NULL); @@ -1651,7 +1631,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha SHashObj* pVgHash = NULL; SRequestObj* pRequest = NULL; - RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, reqid, &pRequest)); + RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, reqid)); uDebug(LOG_ID_TAG " write raw block, rows:%d, pData:%p, tbname:%s", LOG_ID_VALUE, rows, pData, tbname); @@ -1682,7 +1662,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha RAW_NULL_CHECK(pVgHash); RAW_RETURN_CHECK( taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData))); - RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0, false, NULL, 0)); + RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0, false, NULL, 0, false)); RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); launchQueryImpl(pRequest, pQuery, true, NULL); @@ -1708,116 +1688,6 @@ static void* getRawDataFromRes(void* pRetrieve) { return rawData; } -static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) { - if (taos == NULL || data == NULL) { - SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); - return TSDB_CODE_INVALID_PARA; - } - int32_t code = TSDB_CODE_SUCCESS; - SHashObj* pVgHash = NULL; - SQuery* pQuery = NULL; - SMqRspObj rspObj = {0}; - SDecoder decoder = {0}; - STableMeta* pTableMeta = NULL; - - SRequestObj* pRequest = NULL; - RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest)); - - uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); - pRequest->syncQuery = true; - rspObj.resIter = -1; - rspObj.resType = RES_TYPE__TMQ; - - int8_t dataVersion = *(int8_t*)data; - if (dataVersion >= MQ_DATA_RSP_VERSION) { - data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); - dataLen -= sizeof(int8_t) + sizeof(int32_t); - } - tDecoderInit(&decoder, data, dataLen); - code = tDecodeMqDataRsp(&decoder, &rspObj.dataRsp); - if (code != 0) { - SET_ERROR_MSG("decode mq data rsp failed"); - code = TSDB_CODE_INVALID_MSG; - goto end; - } - - if (!pRequest->pDb) { - code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; - goto end; - } - - struct SCatalog* pCatalog = NULL; - RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); - - SRequestConnInfo conn = {0}; - conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter; - conn.requestId = pRequest->requestId; - conn.requestObjRefId = pRequest->self; - conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); - - RAW_RETURN_CHECK(smlInitHandle(&pQuery)); - pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pVgHash); - while (++rspObj.resIter < rspObj.dataRsp.blockNum) { - void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); - RAW_NULL_CHECK(pRetrieve); - if (!rspObj.dataRsp.withSchema) { - goto end; - } - - const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); - RAW_NULL_CHECK(tbName); - - SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; - tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN); - tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); - - RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); - - SVgroupInfo vg = {0}; - RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg)); - - void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId)); - if (hData == NULL) { - RAW_RETURN_CHECK(taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); - } - - SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); - RAW_NULL_CHECK(pSW); - TAOS_FIELD* fields = taosMemoryCalloc(pSW->nCols, sizeof(TAOS_FIELD)); - RAW_NULL_CHECK(fields); - for (int i = 0; i < pSW->nCols; i++) { - fields[i].type = pSW->pSchema[i].type; - fields[i].bytes = pSW->pSchema[i].bytes; - tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name)); - } - void* rawData = getRawDataFromRes(pRetrieve); - char err[ERR_MSG_LEN] = {0}; - code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, fields, pSW->nCols, true, err, ERR_MSG_LEN); - taosMemoryFree(fields); - taosMemoryFreeClear(pTableMeta); - if (code != TSDB_CODE_SUCCESS) { - SET_ERROR_MSG("table:%s, err:%s", tbName, err); - goto end; - } - } - - RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); - - launchQueryImpl(pRequest, pQuery, true, NULL); - code = pRequest->code; - -end: - uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code)); - tDeleteMqDataRsp(&rspObj.dataRsp); - tDecoderClear(&decoder); - qDestroyQuery(pQuery); - destroyRequest(pRequest); - taosHashCleanup(pVgHash); - taosMemoryFreeClear(pTableMeta); - return code; -} - static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { // find schema data info int32_t code = 0; @@ -1855,152 +1725,368 @@ static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { return code; } -static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) { - if (taos == NULL || data == NULL) { - SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); - return TSDB_CODE_INVALID_PARA; +typedef enum { + WRITE_RAW_INIT_START = 0, + WRITE_RAW_INIT_OK, + WRITE_RAW_INIT_FAIL, +} WRITE_RAW_INIT_STATUS; + +static SHashObj* writeRawCache = NULL; +static int8_t initFlag = 0; +static int8_t initedFlag = WRITE_RAW_INIT_START; + +typedef struct { + SHashObj* pVgHash; + SHashObj* pNameHash; + SHashObj* pMetaHash; +} rawCacheInfo; + +typedef struct { + SVgroupInfo vgInfo; + int64_t uid; + int64_t suid; +} tbInfo; + +static void tmqFreeMeta(void* data) { + STableMeta* pTableMeta = *(STableMeta**)data; + taosMemoryFree(pTableMeta); +} + +static void freeRawCache(void* data) { + rawCacheInfo* pRawCache = (rawCacheInfo*)data; + taosHashCleanup(pRawCache->pMetaHash); + taosHashCleanup(pRawCache->pNameHash); + taosHashCleanup(pRawCache->pVgHash); +} + +static int32_t initRawCacheHash() { + if (writeRawCache == NULL) { + writeRawCache = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK); + if (writeRawCache == NULL) { + return terrno; + } + taosHashSetFreeFp(writeRawCache, freeRawCache); } - int32_t code = TSDB_CODE_SUCCESS; - SHashObj* pVgHash = NULL; - SQuery* pQuery = NULL; - SMqRspObj rspObj = {0}; - SDecoder decoder = {0}; - STableMeta* pTableMeta = NULL; - SHashObj* pCreateTbHash = NULL; + return 0; +} - SRequestObj* pRequest = NULL; - RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest)); +static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrapper* pSW) { + char* p = (char*)rawData; + // | version | total length | total rows | blankFill | total columns | flag seg| block group id | column schema | each + // column length | + p += sizeof(int32_t); + p += sizeof(int32_t); + p += sizeof(int32_t); + p += sizeof(int32_t); + p += sizeof(int32_t); + p += sizeof(uint64_t); + int8_t* fields = p; + + if (pSW->nCols != pTableMeta->tableInfo.numOfColumns) { + return true; + } + for (int i = 0; i < pSW->nCols; i++) { + int j = 0; + for (; j < pTableMeta->tableInfo.numOfColumns; j++) { + SSchema* pColSchema = &pTableMeta->schema[j]; + char* fieldName = pSW->pSchema[i].name; + + if (strcmp(pColSchema->name, fieldName) == 0) { + if (*fields != pColSchema->type || *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) { + return true; + } + break; + } + } + fields += sizeof(int8_t) + sizeof(int32_t); - uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); - pRequest->syncQuery = true; - rspObj.resIter = -1; - rspObj.resType = RES_TYPE__TMQ_METADATA; + if (j == pTableMeta->tableInfo.numOfColumns) return true; + } + return false; +} + +static int32_t getRawCache(SHashObj** pVgHash, SHashObj** pNameHash, SHashObj** pMetaHash, void* key) { + int32_t code = 0; + void* cacheInfo = taosHashGet(writeRawCache, &key, POINTER_BYTES); + if (cacheInfo == NULL) { + *pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + RAW_NULL_CHECK(*pVgHash); + *pNameHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + RAW_NULL_CHECK(*pNameHash); + *pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + RAW_NULL_CHECK(*pMetaHash); + taosHashSetFreeFp(*pMetaHash, tmqFreeMeta); + rawCacheInfo info = {*pVgHash, *pNameHash, *pMetaHash}; + RAW_RETURN_CHECK(taosHashPut(writeRawCache, &key, POINTER_BYTES, &info, sizeof(rawCacheInfo))); + } else { + rawCacheInfo* info = (rawCacheInfo*)cacheInfo; + *pVgHash = info->pVgHash; + *pNameHash = info->pNameHash; + *pMetaHash = info->pMetaHash; + } + + return 0; +end: + taosHashCleanup(*pMetaHash); + taosHashCleanup(*pNameHash); + taosHashCleanup(*pVgHash); + return code; +} + +static int32_t buildRawRequest(TAOS* taos, SRequestObj** pRequest, SCatalog** pCatalog, SRequestConnInfo* conn) { + int32_t code = 0; + RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, pRequest, 0)); + (*pRequest)->syncQuery = true; + if (!(*pRequest)->pDb) { + code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; + goto end; + } + + RAW_RETURN_CHECK(catalogGetHandle((*pRequest)->pTscObj->pAppInfo->clusterId, pCatalog)); + conn->pTrans = (*pRequest)->pTscObj->pAppInfo->pTransporter; + conn->requestId = (*pRequest)->requestId; + conn->requestObjRefId = (*pRequest)->self; + conn->mgmtEps = getEpSet_s(&(*pRequest)->pTscObj->pAppInfo->mgmtEp); +end: + return code; +} + +typedef int32_t _raw_decode_func_(SDecoder* pDecoder, SMqDataRsp* pRsp); +static int32_t decodeRawData(SDecoder* decoder, void* data, int32_t dataLen, _raw_decode_func_ func, + SMqRspObj* rspObj) { int8_t dataVersion = *(int8_t*)data; if (dataVersion >= MQ_DATA_RSP_VERSION) { data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); dataLen -= sizeof(int8_t) + sizeof(int32_t); } - tDecoderInit(&decoder, data, dataLen); - code = tDecodeSTaosxRsp(&decoder, &rspObj.dataRsp); + rspObj->resIter = -1; + tDecoderInit(decoder, data, dataLen); + int32_t code = func(decoder, &rspObj->dataRsp); if (code != 0) { SET_ERROR_MSG("decode mq taosx data rsp failed"); - code = TSDB_CODE_INVALID_MSG; - goto end; - } - - if (!pRequest->pDb) { - code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; - goto end; } + return code; +} - struct SCatalog* pCatalog = NULL; - RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); - - SRequestConnInfo conn = {0}; - conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter; - conn.requestId = pRequest->requestId; - conn.requestObjRefId = pRequest->self; - conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); - - RAW_RETURN_CHECK(smlInitHandle(&pQuery)); - pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pVgHash); - pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pCreateTbHash); - RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); +static int32_t processCacheMeta(SHashObj* pVgHash, SHashObj* pNameHash, SHashObj* pMetaHash, + SVCreateTbReq* pCreateReqDst, SCatalog* pCatalog, SRequestConnInfo* conn, SName* pName, + STableMeta** pMeta, SSchemaWrapper* pSW, void* rawData, int32_t retry) { + int32_t code = 0; + STableMeta* pTableMeta = NULL; + tbInfo* tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname)); + if (tmpInfo == NULL || retry > 0) { + tbInfo info = {0}; - uDebug(LOG_ID_TAG " write raw metadata block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum); - while (++rspObj.resIter < rspObj.dataRsp.blockNum) { - void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); - RAW_NULL_CHECK(pRetrieve); - if (!rspObj.dataRsp.withSchema) { - goto end; + RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, conn, pName, &info.vgInfo)); + if (pCreateReqDst && tmpInfo == NULL) { // change stable name to get meta + tstrncpy(pName->tname, pCreateReqDst->ctb.stbName, TSDB_TABLE_NAME_LEN); } - - const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); - if (!tbName) { - SET_ERROR_MSG("block tbname is null"); - code = terrno; - goto end; + RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta)); + info.uid = pTableMeta->uid; + if (pTableMeta->tableType == TSDB_CHILD_TABLE) { + info.suid = pTableMeta->suid; + } else { + info.suid = pTableMeta->uid; } - - uDebug(LOG_ID_TAG " write raw metadata block tbname:%s", LOG_ID_VALUE, tbName); - SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; - tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN); - tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); - - // find schema data info - SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); - SVgroupInfo vg = {0}; - RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg)); - if (pCreateReqDst) { // change stable name to get meta - tstrncpy(pName.tname, pCreateReqDst->ctb.stbName, TSDB_TABLE_NAME_LEN); + code = taosHashPut(pMetaHash, &info.suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); + if (code != 0) { + taosMemoryFree(pTableMeta); + goto end; } - RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); - if (pCreateReqDst) { - pTableMeta->vgId = vg.vgId; + pTableMeta->vgId = info.vgInfo.vgId; pTableMeta->uid = pCreateReqDst->uid; pCreateReqDst->ctb.suid = pTableMeta->suid; } - void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId)); - if (hData == NULL) { - RAW_RETURN_CHECK(taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); - } - SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); - RAW_NULL_CHECK(pSW); - TAOS_FIELD* fields = taosMemoryCalloc(pSW->nCols, sizeof(TAOS_FIELD)); - if (fields == NULL) { - SET_ERROR_MSG("calloc fields failed"); - code = terrno; - goto end; - } - for (int i = 0; i < pSW->nCols; i++) { - fields[i].type = pSW->pSchema[i].type; - fields[i].bytes = pSW->pSchema[i].bytes; - tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name)); - } - void* rawData = getRawDataFromRes(pRetrieve); - char err[ERR_MSG_LEN] = {0}; - SVCreateTbReq* pCreateReqTmp = NULL; - if (pCreateReqDst) { - RAW_RETURN_CHECK(cloneSVreateTbReq(pCreateReqDst, &pCreateReqTmp)); + RAW_RETURN_CHECK(taosHashPut(pNameHash, pName->tname, strlen(pName->tname), &info, sizeof(tbInfo))); + tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname)); + RAW_RETURN_CHECK( + taosHashPut(pVgHash, &info.vgInfo.vgId, sizeof(info.vgInfo.vgId), &info.vgInfo, sizeof(SVgroupInfo))); + } + + if (pTableMeta == NULL || retry > 0) { + STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, &tmpInfo->suid, LONG_BYTES); + if (pTableMetaTmp == NULL || retry > 0 || needRefreshMeta(rawData, *pTableMetaTmp, pSW)) { + RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta)); + code = taosHashPut(pMetaHash, &tmpInfo->suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); + if (code != 0) { + taosMemoryFree(pTableMeta); + goto end; + } + + } else { + pTableMeta = *pTableMetaTmp; + pTableMeta->uid = tmpInfo->uid; + pTableMeta->vgId = tmpInfo->vgInfo.vgId; } - code = rawBlockBindData(pQuery, pTableMeta, rawData, &pCreateReqTmp, fields, pSW->nCols, true, err, ERR_MSG_LEN); - if (pCreateReqTmp != NULL) { - tdDestroySVCreateTbReq(pCreateReqTmp); - taosMemoryFree(pCreateReqTmp); + } + *pMeta = pTableMeta; + +end: + return code; +} + +static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) { + int32_t code = TSDB_CODE_SUCCESS; + SQuery* pQuery = NULL; + SMqRspObj rspObj = {0}; + SDecoder decoder = {0}; + + SRequestObj* pRequest = NULL; + SCatalog* pCatalog = NULL; + SRequestConnInfo conn = {0}; + RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn)); + uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); + RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeMqDataRsp, &rspObj)); + + SHashObj* pVgHash = NULL; + SHashObj* pNameHash = NULL; + SHashObj* pMetaHash = NULL; + RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos)); + int retry = 0; + while (1) { + RAW_RETURN_CHECK(smlInitHandle(&pQuery)); + uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum); + while (++rspObj.resIter < rspObj.dataRsp.blockNum) { + if (!rspObj.dataRsp.withSchema) { + goto end; + } + + const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); + RAW_NULL_CHECK(tbName); + SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); + RAW_NULL_CHECK(pSW); + void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); + RAW_NULL_CHECK(pRetrieve); + void* rawData = getRawDataFromRes(pRetrieve); + RAW_NULL_CHECK(rawData); + + uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName); + SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; + tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN); + tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); + + STableMeta* pTableMeta = NULL; + RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, NULL, pCatalog, &conn, &pName, &pTableMeta, pSW, + rawData, retry)); + char err[ERR_MSG_LEN] = {0}; + code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); + if (code != TSDB_CODE_SUCCESS) { + SET_ERROR_MSG("table:%s, err:%s", pName.tname, err); + goto end; + } } - taosMemoryFree(fields); - taosMemoryFreeClear(pTableMeta); - if (code != TSDB_CODE_SUCCESS) { - SET_ERROR_MSG("table:%s, err:%s", tbName, err); - goto end; + RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); + launchQueryImpl(pRequest, pQuery, true, NULL); + code = pRequest->code; + + if (NEED_CLIENT_HANDLE_ERROR(code) && retry++ < 3) { + uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code)); + qDestroyQuery(pQuery); + pQuery = NULL; + rspObj.resIter = -1; + continue; } + break; } - RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); +end: + uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code)); + tDeleteMqDataRsp(&rspObj.dataRsp); + tDecoderClear(&decoder); + qDestroyQuery(pQuery); + destroyRequest(pRequest); + return code; +} - launchQueryImpl(pRequest, pQuery, true, NULL); - code = pRequest->code; +static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) { + int32_t code = TSDB_CODE_SUCCESS; + SQuery* pQuery = NULL; + SMqRspObj rspObj = {0}; + SDecoder decoder = {0}; + SHashObj* pCreateTbHash = NULL; + + SRequestObj* pRequest = NULL; + SCatalog* pCatalog = NULL; + SRequestConnInfo conn = {0}; + + RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn)); + uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); + RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeSTaosxRsp, &rspObj)); + + pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + RAW_NULL_CHECK(pCreateTbHash); + RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); + + SHashObj* pVgHash = NULL; + SHashObj* pNameHash = NULL; + SHashObj* pMetaHash = NULL; + RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos)); + int retry = 0; + while (1) { + RAW_RETURN_CHECK(smlInitHandle(&pQuery)); + uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum); + while (++rspObj.resIter < rspObj.dataRsp.blockNum) { + if (!rspObj.dataRsp.withSchema) { + goto end; + } + + const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); + RAW_NULL_CHECK(tbName); + SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); + RAW_NULL_CHECK(pSW); + void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); + RAW_NULL_CHECK(pRetrieve); + void* rawData = getRawDataFromRes(pRetrieve); + RAW_NULL_CHECK(rawData); + + uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName); + SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; + tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN); + tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); + + // find schema data info + SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, pName.tname, strlen(pName.tname)); + STableMeta* pTableMeta = NULL; + RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, pCreateReqDst, pCatalog, &conn, &pName, + &pTableMeta, pSW, rawData, retry)); + char err[ERR_MSG_LEN] = {0}; + code = + rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); + if (code != TSDB_CODE_SUCCESS) { + SET_ERROR_MSG("table:%s, err:%s", pName.tname, err); + goto end; + } + } + RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); + launchQueryImpl(pRequest, pQuery, true, NULL); + code = pRequest->code; + + if (NEED_CLIENT_HANDLE_ERROR(code) && retry++ < 3) { + uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code)); + qDestroyQuery(pQuery); + pQuery = NULL; + rspObj.resIter = -1; + continue; + } + break; + } end: uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code)); + tDeleteSTaosxRsp(&rspObj.dataRsp); void* pIter = taosHashIterate(pCreateTbHash, NULL); while (pIter) { tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE); pIter = taosHashIterate(pCreateTbHash, pIter); } taosHashCleanup(pCreateTbHash); - tDeleteSTaosxRsp(&rspObj.dataRsp); tDecoderClear(&decoder); qDestroyQuery(pQuery); destroyRequest(pRequest); - taosHashCleanup(pVgHash); - taosMemoryFreeClear(pTableMeta); return code; } @@ -2076,18 +2162,18 @@ char* tmq_get_json_meta(TAOS_RES* res) { return NULL; } - char* string = NULL; + char* string = NULL; SMqRspObj* rspObj = (SMqRspObj*)res; if (TD_RES_TMQ_METADATA(res)) { processAutoCreateTable(&rspObj->dataRsp, &string); } else if (TD_RES_TMQ_BATCH_META(res)) { processBatchMetaToJson(&rspObj->batchMetaRsp, &string); } else if (TD_RES_TMQ_META(res)) { - cJSON* pJson = NULL; + cJSON* pJson = NULL; processSimpleMeta(&rspObj->metaRsp, &pJson); string = cJSON_PrintUnformatted(pJson); cJSON_Delete(pJson); - } else{ + } else { uError("tmq_get_json_meta res:%d, invalid type", *(int8_t*)res); } @@ -2098,7 +2184,7 @@ char* tmq_get_json_meta(TAOS_RES* res) { void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); } static int32_t getOffSetLen(const SMqDataRsp* pRsp) { - SEncoder coder = {0}; + SEncoder coder = {0}; tEncoderInit(&coder, NULL, 0); if (tEncodeSTqOffsetVal(&coder, &pRsp->reqOffset) < 0) return -1; if (tEncodeSTqOffsetVal(&coder, &pRsp->rspOffset) < 0) return -1; @@ -2108,7 +2194,7 @@ static int32_t getOffSetLen(const SMqDataRsp* pRsp) { } typedef int32_t __encode_func__(SEncoder* pEncoder, const SMqDataRsp* pRsp); -static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) { +static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) { int32_t len = 0; int32_t code = 0; SEncoder encoder = {0}; @@ -2164,7 +2250,7 @@ int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) { raw->raw_type = rspObj->metaRsp.resMsgType; uDebug("tmq get raw type meta:%p", raw); } else if (TD_RES_TMQ(res)) { - int32_t code = encodeMqDataRsp(tEncodeMqDataRsp, &rspObj->dataRsp, raw); + int32_t code = encodeMqDataRsp(tEncodeMqDataRsp, &rspObj->dataRsp, raw); if (code != 0) { uError("tmq get raw type error:%d", terrno); return code; @@ -2199,7 +2285,31 @@ void tmq_free_raw(tmq_raw_data raw) { (void)memset(terrMsg, 0, ERR_MSG_LEN); } +static int32_t writeRawInit() { + while (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_START) { + int8_t old = atomic_val_compare_exchange_8(&initFlag, 0, 1); + if (old == 0) { + int32_t code = initRawCacheHash(); + if (code != 0) { + uError("tmq writeRawImpl init error:%d", code); + atomic_store_8(&initedFlag, WRITE_RAW_INIT_FAIL); + return code; + } + atomic_store_8(&initedFlag, WRITE_RAW_INIT_OK); + } + } + + if (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_FAIL) { + return TSDB_CODE_INTERNAL_ERROR; + } + return 0; +} + static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) { + if (writeRawInit() != 0) { + return TSDB_CODE_INTERNAL_ERROR; + } + if (type == TDMT_VND_CREATE_STB) { return taosCreateStb(taos, buf, len); } else if (type == TDMT_VND_ALTER_STB) { @@ -2214,10 +2324,10 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) return taosDropTable(taos, buf, len); } else if (type == TDMT_VND_DELETE) { return taosDeleteData(taos, buf, len); - } else if (type == RES_TYPE__TMQ) { - return tmqWriteRawDataImpl(taos, buf, len); } else if (type == RES_TYPE__TMQ_METADATA) { return tmqWriteRawMetaDataImpl(taos, buf, len); + } else if (type == RES_TYPE__TMQ) { + return tmqWriteRawDataImpl(taos, buf, len); } else if (type == RES_TYPE__TMQ_BATCH_META) { return tmqWriteBatchMetaDataImpl(taos, buf, len); } @@ -2225,7 +2335,8 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) } int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) { - if (!taos) { + if (taos == NULL || raw.raw == NULL || raw.raw_len <= 0) { + SET_ERROR_MSG("taos:%p or data:%p is NULL or raw_len <= 0", taos, raw.raw); return TSDB_CODE_INVALID_PARA; } @@ -2269,4 +2380,4 @@ static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen end: tDeleteMqBatchMetaRsp(&rsp); return code; -} +} \ No newline at end of file diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index d64f390e412..e4c2d6302cc 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -104,6 +104,9 @@ kvVal->type = TSDB_DATA_TYPE_UTINYINT; \ kvVal->u = result; +#define IS_COMMENT(protocol,data) \ + (protocol == TSDB_SML_LINE_PROTOCOL && data == '#') + int64_t smlToMilli[] = {3600000LL, 60000LL, 1000LL}; int64_t smlFactorNS[] = {NANOSECOND_PER_MSEC, NANOSECOND_PER_USEC, 1}; int64_t smlFactorS[] = {1000LL, 1000000LL, 1000000000LL}; @@ -135,7 +138,7 @@ void smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2 if (pBuf->buf == NULL) { return; } - (void)memset(pBuf->buf, 0, pBuf->len); + pBuf->buf[0] = 0; if (msg1) { (void)strncat(pBuf->buf, msg1, pBuf->len - 1); } @@ -166,23 +169,23 @@ int64_t smlGetTimeValue(const char *value, int32_t len, uint8_t fromPrecision, u } int32_t smlBuildTableInfo(int numRows, const char *measure, int32_t measureLen, SSmlTableInfo **tInfo) { + int32_t code = 0; + int32_t lino = 0; SSmlTableInfo *tag = (SSmlTableInfo *)taosMemoryCalloc(sizeof(SSmlTableInfo), 1); - if (!tag) { - return terrno; - } + SML_CHECK_NULL(tag) tag->sTableName = measure; tag->sTableNameLen = measureLen; tag->cols = taosArrayInit(numRows, POINTER_BYTES); - if (tag->cols == NULL) { - uError("SML:smlBuildTableInfo failed to allocate memory"); - taosMemoryFree(tag); - return terrno; - } - + SML_CHECK_NULL(tag->cols) *tInfo = tag; - return TSDB_CODE_SUCCESS; + return code; + +END: + taosMemoryFree(tag); + uError("%s failed code:%d line:%d", __FUNCTION__ , code, lino); + return code; } void smlBuildTsKv(SSmlKv *kv, int64_t ts) { @@ -194,7 +197,13 @@ void smlBuildTsKv(SSmlKv *kv, int64_t ts) { } static void smlDestroySTableMeta(void *para) { + if (para == NULL) { + return; + } SSmlSTableMeta *meta = *(SSmlSTableMeta **)para; + if (meta == NULL) { + return; + } taosHashCleanup(meta->tagHash); taosHashCleanup(meta->colHash); taosArrayDestroy(meta->tags); @@ -204,39 +213,26 @@ static void smlDestroySTableMeta(void *para) { } int32_t smlBuildSuperTableInfo(SSmlHandle *info, SSmlLineInfo *currElement, SSmlSTableMeta **sMeta) { - int32_t code = TSDB_CODE_SUCCESS; - char *measure = currElement->measure; - int measureLen = currElement->measureLen; + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + STableMeta *pTableMeta = NULL; + + int measureLen = currElement->measureLen; + char *measure = (char *)taosMemoryMalloc(measureLen); + SML_CHECK_NULL(measure); + (void)memcpy(measure, currElement->measure, measureLen); if (currElement->measureEscaped) { - measure = (char *)taosMemoryMalloc(measureLen); - if (measure == NULL) { - return terrno; - } - (void)memcpy(measure, currElement->measure, measureLen); PROCESS_SLASH_IN_MEASUREMENT(measure, measureLen); - smlStrReplace(measure, measureLen); } - STableMeta *pTableMeta = NULL; + smlStrReplace(measure, measureLen); code = smlGetMeta(info, measure, measureLen, &pTableMeta); - if (currElement->measureEscaped) { - taosMemoryFree(measure); - } + taosMemoryFree(measure); if (code != TSDB_CODE_SUCCESS) { info->dataFormat = false; info->reRun = true; - return code; - } - code = smlBuildSTableMeta(info->dataFormat, sMeta); - if (code != TSDB_CODE_SUCCESS) { - taosMemoryFreeClear(pTableMeta); - return code; - } - (*sMeta)->tableMeta = pTableMeta; - code = taosHashPut(info->superTables, currElement->measure, currElement->measureLen, sMeta, POINTER_BYTES); - if (code != TSDB_CODE_SUCCESS) { - smlDestroySTableMeta(*sMeta); - return code; + goto END; } + SML_CHECK_CODE(smlBuildSTableMeta(info->dataFormat, sMeta)); for (int i = 1; i < pTableMeta->tableInfo.numOfTags + pTableMeta->tableInfo.numOfColumns; i++) { SSchema *col = pTableMeta->schema + i; SSmlKv kv = {.key = col->name, .keyLen = strlen(col->name), .type = col->type}; @@ -250,16 +246,19 @@ int32_t smlBuildSuperTableInfo(SSmlHandle *info, SSmlLineInfo *currElement, SSml } if (i < pTableMeta->tableInfo.numOfColumns) { - if (taosArrayPush((*sMeta)->cols, &kv) == NULL) { - return terrno; - } + SML_CHECK_NULL(taosArrayPush((*sMeta)->cols, &kv)); } else { - if (taosArrayPush((*sMeta)->tags, &kv) == NULL) { - return terrno; - } + SML_CHECK_NULL(taosArrayPush((*sMeta)->tags, &kv)); } } - return TSDB_CODE_SUCCESS; + SML_CHECK_CODE(taosHashPut(info->superTables, currElement->measure, currElement->measureLen, sMeta, POINTER_BYTES)); + (*sMeta)->tableMeta = pTableMeta; + return code; + +END: + smlDestroySTableMeta(sMeta); + taosMemoryFreeClear(pTableMeta); + RETURN } bool isSmlColAligned(SSmlHandle *info, int cnt, SSmlKv *kv) { @@ -305,7 +304,6 @@ bool isSmlTagAligned(SSmlHandle *info, int cnt, SSmlKv *kv) { goto END; } SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->maxTagKVs, cnt); - if (maxKV == NULL) { goto END; } @@ -376,103 +374,70 @@ int32_t smlProcessSuperTable(SSmlHandle *info, SSmlLineInfo *elements) { int32_t smlProcessChildTable(SSmlHandle *info, SSmlLineInfo *elements) { int32_t code = TSDB_CODE_SUCCESS; - SSmlTableInfo **oneTable = - (SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag, elements->measureTagsLen); + int32_t lino = 0; + SSmlTableInfo **oneTable = (SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag, elements->measureTagsLen); SSmlTableInfo *tinfo = NULL; if (unlikely(oneTable == NULL)) { - code = smlBuildTableInfo(1, elements->measure, elements->measureLen, &tinfo); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - code = taosHashPut(info->childTables, elements->measureTag, elements->measureTagsLen, &tinfo, POINTER_BYTES); - if (code != 0) { - smlDestroyTableInfo(&tinfo); - return code; - } + SML_CHECK_CODE(smlBuildTableInfo(1, elements->measure, elements->measureLen, &tinfo)); + SML_CHECK_CODE(taosHashPut(info->childTables, elements->measureTag, elements->measureTagsLen, &tinfo, POINTER_BYTES)); tinfo->tags = taosArrayDup(info->preLineTagKV, NULL); - if (tinfo->tags == NULL) { - smlDestroyTableInfo(&tinfo); - return terrno; - } + SML_CHECK_NULL(tinfo->tags); for (size_t i = 0; i < taosArrayGetSize(info->preLineTagKV); i++) { SSmlKv *kv = (SSmlKv *)taosArrayGet(info->preLineTagKV, i); - if (kv == NULL) { - smlDestroyTableInfo(&tinfo); - return TSDB_CODE_SML_INVALID_DATA; - } + SML_CHECK_NULL(kv); if (kv->keyEscaped) kv->key = NULL; if (kv->valueEscaped) kv->value = NULL; } - code = smlSetCTableName(tinfo, info->tbnameKey); - if (code != TSDB_CODE_SUCCESS) { - smlDestroyTableInfo(&tinfo); - return code; - } - code = getTableUid(info, elements, tinfo); - if (code != TSDB_CODE_SUCCESS) { - smlDestroyTableInfo(&tinfo); - return code; - } + SML_CHECK_CODE(smlSetCTableName(tinfo, info->tbnameKey)); + SML_CHECK_CODE(getTableUid(info, elements, tinfo)); if (info->dataFormat) { info->currSTableMeta->uid = tinfo->uid; - code = smlInitTableDataCtx(info->pQuery, info->currSTableMeta, &tinfo->tableDataCtx); - if (code != TSDB_CODE_SUCCESS) { - smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL); - smlDestroyTableInfo(&tinfo); - return code; - } + SML_CHECK_CODE(smlInitTableDataCtx(info->pQuery, info->currSTableMeta, &tinfo->tableDataCtx)); } } else { tinfo = *oneTable; } - if (tinfo == NULL) { - uError("smlProcessChildTable failed to get child table info"); - return TSDB_CODE_SML_INTERNAL_ERROR; - } if (info->dataFormat) info->currTableDataCtx = tinfo->tableDataCtx; return TSDB_CODE_SUCCESS; + +END: + smlDestroyTableInfo(&tinfo); + RETURN } -int32_t smlParseEndTelnetJson(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv) { - if (info->dataFormat) { - uDebug("SML:0x%" PRIx64 " smlParseEndTelnetJson format true, ts:%" PRId64, info->id, kvTs->i); - int32_t ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kvTs, 0); - if (ret == TSDB_CODE_SUCCESS) { - ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kv, 1); - } - if (ret == TSDB_CODE_SUCCESS) { - ret = smlBuildRow(info->currTableDataCtx); - } - clearColValArraySml(info->currTableDataCtx->pValues); - if (unlikely(ret != TSDB_CODE_SUCCESS)) { - smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL); - return ret; - } - } else { - uDebug("SML:0x%" PRIx64 " smlParseEndTelnetJson format false, ts:%" PRId64, info->id, kvTs->i); - if (elements->colArray == NULL) { - elements->colArray = taosArrayInit(16, sizeof(SSmlKv)); - if (elements->colArray == NULL) { - return terrno; - } - } - if (taosArrayPush(elements->colArray, kvTs) == NULL) { - return terrno; - } - if (taosArrayPush(elements->colArray, kv) == NULL) { - return terrno; - } +int32_t smlParseEndTelnetJsonFormat(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv) { + int32_t code = 0; + int32_t lino = 0; + uDebug("SML:0x%" PRIx64 " %s format true, ts:%" PRId64, info->id, __FUNCTION__ , kvTs->i); + SML_CHECK_CODE(smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kvTs, 0)); + SML_CHECK_CODE(smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kv, 1)); + SML_CHECK_CODE(smlBuildRow(info->currTableDataCtx)); + +END: + clearColValArraySml(info->currTableDataCtx->pValues); + RETURN +} + +int32_t smlParseEndTelnetJsonUnFormat(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv) { + int32_t code = 0; + int32_t lino = 0; + uDebug("SML:0x%" PRIx64 " %s format false, ts:%" PRId64, info->id, __FUNCTION__, kvTs->i); + if (elements->colArray == NULL) { + elements->colArray = taosArrayInit(16, sizeof(SSmlKv)); + SML_CHECK_NULL(elements->colArray); } - info->preLine = *elements; + SML_CHECK_NULL(taosArrayPush(elements->colArray, kvTs)); + SML_CHECK_NULL (taosArrayPush(elements->colArray, kv)); - return TSDB_CODE_SUCCESS; +END: + RETURN } int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs) { if (info->dataFormat) { - uDebug("SML:0x%" PRIx64 " smlParseEndLine format true, ts:%" PRId64, info->id, kvTs->i); + uDebug("SML:0x%" PRIx64 " %s format true, ts:%" PRId64, info->id, __FUNCTION__, kvTs->i); int32_t ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kvTs, 0); if (ret == TSDB_CODE_SUCCESS) { ret = smlBuildRow(info->currTableDataCtx); @@ -481,11 +446,11 @@ int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs) clearColValArraySml(info->currTableDataCtx->pValues); taosArrayClearP(info->escapedStringList, NULL); if (unlikely(ret != TSDB_CODE_SUCCESS)) { - smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL); + uError("SML:0x%" PRIx64 " %s smlBuildCol error:%d", info->id, __FUNCTION__, ret); return ret; } } else { - uDebug("SML:0x%" PRIx64 " smlParseEndLine format false, ts:%" PRId64, info->id, kvTs->i); + uDebug("SML:0x%" PRIx64 " %s format false, ts:%" PRId64, info->id, __FUNCTION__, kvTs->i); taosArraySet(elements->colArray, 0, kvTs); } info->preLine = *elements; @@ -494,15 +459,15 @@ int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs) } static int32_t smlParseTableName(SArray *tags, char *childTableName, char *tbnameKey) { - bool autoChildName = false; - size_t delimiter = strlen(tsSmlAutoChildTableNameDelimiter); + int32_t code = 0; + int32_t lino = 0; + bool autoChildName = false; + size_t delimiter = strlen(tsSmlAutoChildTableNameDelimiter); if (delimiter > 0 && tbnameKey == NULL) { size_t totalNameLen = delimiter * (taosArrayGetSize(tags) - 1); for (int i = 0; i < taosArrayGetSize(tags); i++) { SSmlKv *tag = (SSmlKv *)taosArrayGet(tags, i); - if (tag == NULL) { - return TSDB_CODE_SML_INVALID_DATA; - } + SML_CHECK_NULL(tag); totalNameLen += tag->length; } if (totalNameLen < TSDB_TABLE_NAME_LEN) { @@ -510,12 +475,10 @@ static int32_t smlParseTableName(SArray *tags, char *childTableName, char *tbnam } } if (autoChildName) { - (void)memset(childTableName, 0, TSDB_TABLE_NAME_LEN); + childTableName[0] = '\0'; for (int i = 0; i < taosArrayGetSize(tags); i++) { SSmlKv *tag = (SSmlKv *)taosArrayGet(tags, i); - if (tag == NULL) { - return TSDB_CODE_SML_INVALID_DATA; - } + SML_CHECK_NULL(tag); (void)strncat(childTableName, tag->value, TMIN(tag->length, TSDB_TABLE_NAME_LEN - 1 - strlen(childTableName))); if (i != taosArrayGetSize(tags) - 1) { (void)strncat(childTableName, tsSmlAutoChildTableNameDelimiter, TSDB_TABLE_NAME_LEN - 1 - strlen(childTableName)); @@ -533,12 +496,9 @@ static int32_t smlParseTableName(SArray *tags, char *childTableName, char *tbnam for (int i = 0; i < taosArrayGetSize(tags); i++) { SSmlKv *tag = (SSmlKv *)taosArrayGet(tags, i); - if (tag == NULL) { - return TSDB_CODE_SML_INVALID_DATA; - } + SML_CHECK_NULL(tag); // handle child table name if (childTableNameLen == tag->keyLen && strncmp(tag->key, tbnameKey, tag->keyLen) == 0) { - (void)memset(childTableName, 0, TSDB_TABLE_NAME_LEN); tstrncpy(childTableName, tag->value, TMIN(TSDB_TABLE_NAME_LEN, tag->length + 1)); if (tsSmlDot2Underline) { smlStrReplace(childTableName, strlen(childTableName)); @@ -549,24 +509,21 @@ static int32_t smlParseTableName(SArray *tags, char *childTableName, char *tbnam } } - return TSDB_CODE_SUCCESS; +END: + RETURN } int32_t smlSetCTableName(SSmlTableInfo *oneTable, char *tbnameKey) { - int32_t code = smlParseTableName(oneTable->tags, oneTable->childTableName, tbnameKey); - if (code != TSDB_CODE_SUCCESS) { - return code; - } + int32_t code = 0; + int32_t lino = 0; + SArray *dst = NULL; + SML_CHECK_CODE(smlParseTableName(oneTable->tags, oneTable->childTableName, tbnameKey)); if (strlen(oneTable->childTableName) == 0) { - SArray *dst = taosArrayDup(oneTable->tags, NULL); - if (dst == NULL) { - return terrno; - } + dst = taosArrayDup(oneTable->tags, NULL); + SML_CHECK_NULL(dst); if (oneTable->sTableNameLen >= TSDB_TABLE_NAME_LEN) { - uError("SML:smlSetCTableName super table name is too long"); - taosArrayDestroy(dst); - return TSDB_CODE_SML_INTERNAL_ERROR; + SML_CHECK_CODE(TSDB_CODE_SML_INTERNAL_ERROR); } char superName[TSDB_TABLE_NAME_LEN] = {0}; RandTableName rName = {dst, NULL, (uint8_t)oneTable->sTableNameLen, oneTable->childTableName}; @@ -578,13 +535,12 @@ int32_t smlSetCTableName(SSmlTableInfo *oneTable, char *tbnameKey) { rName.stbFullName = oneTable->sTableName; } - code = buildChildTableName(&rName); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - taosArrayDestroy(dst); + SML_CHECK_CODE(buildChildTableName(&rName)); } - return TSDB_CODE_SUCCESS; + +END: + taosArrayDestroy(dst); + RETURN } int32_t getTableUid(SSmlHandle *info, SSmlLineInfo *currElement, SSmlTableInfo *tinfo) { @@ -608,42 +564,29 @@ int32_t getTableUid(SSmlHandle *info, SSmlLineInfo *currElement, SSmlTableInfo * } int32_t smlBuildSTableMeta(bool isDataFormat, SSmlSTableMeta **sMeta) { + int32_t code = 0; + int32_t lino = 0; SSmlSTableMeta *meta = (SSmlSTableMeta *)taosMemoryCalloc(sizeof(SSmlSTableMeta), 1); - if (!meta) { - return terrno; - } - + SML_CHECK_NULL(meta); if (unlikely(!isDataFormat)) { meta->tagHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - if (meta->tagHash == NULL) { - uError("SML:smlBuildSTableMeta failed to allocate memory"); - goto cleanup; - } - + SML_CHECK_NULL(meta->tagHash); meta->colHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - if (meta->colHash == NULL) { - uError("SML:smlBuildSTableMeta failed to allocate memory"); - goto cleanup; - } + SML_CHECK_NULL(meta->colHash); } meta->tags = taosArrayInit(32, sizeof(SSmlKv)); - if (meta->tags == NULL) { - uError("SML:smlBuildSTableMeta failed to allocate memory"); - goto cleanup; - } + SML_CHECK_NULL(meta->tags); meta->cols = taosArrayInit(32, sizeof(SSmlKv)); - if (meta->cols == NULL) { - uError("SML:smlBuildSTableMeta failed to allocate memory"); - goto cleanup; - } + SML_CHECK_NULL(meta->cols); *sMeta = meta; return TSDB_CODE_SUCCESS; -cleanup: - smlDestroySTableMeta(meta); - return TSDB_CODE_OUT_OF_MEMORY; +END: + smlDestroySTableMeta(&meta); + uError("%s failed code:%d line:%d", __FUNCTION__ , code, lino); + return code; } int32_t smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg) { @@ -720,104 +663,6 @@ int32_t smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg) { return true; } -bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg) { - const char *pVal = kvVal->value; - int32_t len = kvVal->length; - char *endptr = NULL; - double result = taosStr2Double(pVal, &endptr); - if (pVal == endptr) { - smlBuildInvalidDataMsg(msg, "invalid data", pVal); - return false; - } - - int32_t left = len - (endptr - pVal); - if (left == 0 || (left == 3 && strncasecmp(endptr, "f64", left) == 0)) { - kvVal->type = TSDB_DATA_TYPE_DOUBLE; - kvVal->d = result; - } else if ((left == 3 && strncasecmp(endptr, "f32", left) == 0)) { - if (!IS_VALID_FLOAT(result)) { - smlBuildInvalidDataMsg(msg, "float out of range[-3.402823466e+38,3.402823466e+38]", pVal); - return false; - } - kvVal->type = TSDB_DATA_TYPE_FLOAT; - kvVal->f = (float)result; - } else if ((left == 1 && *endptr == 'i') || (left == 3 && strncasecmp(endptr, "i64", left) == 0)) { - if (smlDoubleToInt64OverFlow(result)) { - errno = 0; - int64_t tmp = taosStr2Int64(pVal, &endptr, 10); - if (errno == ERANGE) { - smlBuildInvalidDataMsg(msg, "big int out of range[-9223372036854775808,9223372036854775807]", pVal); - return false; - } - kvVal->type = TSDB_DATA_TYPE_BIGINT; - kvVal->i = tmp; - return true; - } - kvVal->type = TSDB_DATA_TYPE_BIGINT; - kvVal->i = (int64_t)result; - } else if ((left == 1 && *endptr == 'u') || (left == 3 && strncasecmp(endptr, "u64", left) == 0)) { - if (result >= (double)UINT64_MAX || result < 0) { - errno = 0; - uint64_t tmp = taosStr2UInt64(pVal, &endptr, 10); - if (errno == ERANGE || result < 0) { - smlBuildInvalidDataMsg(msg, "unsigned big int out of range[0,18446744073709551615]", pVal); - return false; - } - kvVal->type = TSDB_DATA_TYPE_UBIGINT; - kvVal->u = tmp; - return true; - } - kvVal->type = TSDB_DATA_TYPE_UBIGINT; - kvVal->u = result; - } else if (left == 3 && strncasecmp(endptr, "i32", left) == 0) { - if (!IS_VALID_INT(result)) { - smlBuildInvalidDataMsg(msg, "int out of range[-2147483648,2147483647]", pVal); - return false; - } - kvVal->type = TSDB_DATA_TYPE_INT; - kvVal->i = result; - } else if (left == 3 && strncasecmp(endptr, "u32", left) == 0) { - if (!IS_VALID_UINT(result)) { - smlBuildInvalidDataMsg(msg, "unsigned int out of range[0,4294967295]", pVal); - return false; - } - kvVal->type = TSDB_DATA_TYPE_UINT; - kvVal->u = result; - } else if (left == 3 && strncasecmp(endptr, "i16", left) == 0) { - if (!IS_VALID_SMALLINT(result)) { - smlBuildInvalidDataMsg(msg, "small int our of range[-32768,32767]", pVal); - return false; - } - kvVal->type = TSDB_DATA_TYPE_SMALLINT; - kvVal->i = result; - } else if (left == 3 && strncasecmp(endptr, "u16", left) == 0) { - if (!IS_VALID_USMALLINT(result)) { - smlBuildInvalidDataMsg(msg, "unsigned small int out of rang[0,65535]", pVal); - return false; - } - kvVal->type = TSDB_DATA_TYPE_USMALLINT; - kvVal->u = result; - } else if (left == 2 && strncasecmp(endptr, "i8", left) == 0) { - if (!IS_VALID_TINYINT(result)) { - smlBuildInvalidDataMsg(msg, "tiny int out of range[-128,127]", pVal); - return false; - } - kvVal->type = TSDB_DATA_TYPE_TINYINT; - kvVal->i = result; - } else if (left == 2 && strncasecmp(endptr, "u8", left) == 0) { - if (!IS_VALID_UTINYINT(result)) { - smlBuildInvalidDataMsg(msg, "unsigned tiny int out of range[0,255]", pVal); - return false; - } - kvVal->type = TSDB_DATA_TYPE_UTINYINT; - kvVal->u = result; - } else { - smlBuildInvalidDataMsg(msg, "invalid data", pVal); - return false; - } - return true; -} - int32_t smlGetMeta(SSmlHandle *info, const void *measure, int32_t measureLen, STableMeta **pTableMeta) { *pTableMeta = NULL; @@ -829,14 +674,11 @@ int32_t smlGetMeta(SSmlHandle *info, const void *measure, int32_t measureLen, ST conn.requestId = info->pRequest->requestId; conn.requestObjRefId = info->pRequest->self; conn.mgmtEps = getEpSet_s(&info->taos->pAppInfo->mgmtEp); - (void)memset(pName.tname, 0, TSDB_TABLE_NAME_LEN); + int32_t len = TMIN(measureLen, TSDB_TABLE_NAME_LEN - 1); (void)memcpy(pName.tname, measure, measureLen); + pName.tname[len] = 0; - int32_t code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, pTableMeta); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - return TSDB_CODE_SUCCESS; + return catalogGetSTableMeta(info->pCatalog, &conn, &pName, pTableMeta); } static int64_t smlGenId() { @@ -855,8 +697,9 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm uint16_t *index = colHash ? (uint16_t *)taosHashGet(colHash, kv->key, kv->keyLen) : NULL; if (index) { if (colField[*index].type != kv->type) { - uError("SML:0x%" PRIx64 " point type and db type mismatch. db type: %d, point type: %d, key: %s", info->id, - colField[*index].type, kv->type, kv->key); + snprintf(info->msgBuf.buf, info->msgBuf.len, "SML:0x%" PRIx64 " %s point type and db type mismatch. db type: %s, point type: %s, key: %s", + info->id, __FUNCTION__, tDataTypes[colField[*index].type].name, tDataTypes[kv->type].name, kv->key); + uError("%s", info->msgBuf.buf); return TSDB_CODE_SML_INVALID_DATA; } @@ -910,65 +753,46 @@ static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) { static int32_t smlProcessSchemaAction(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols, SArray *checkDumplicateCols, ESchemaAction *action, bool isTag) { int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; for (int j = 0; j < taosArrayGetSize(cols); ++j) { if (j == 0 && !isTag) continue; SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, j); - if (kv == NULL) { - return TSDB_CODE_SML_INVALID_DATA; - } - code = smlGenerateSchemaAction(schemaField, schemaHash, kv, isTag, action, info); - if (code != TSDB_CODE_SUCCESS) { - return code; - } + SML_CHECK_NULL(kv); + SML_CHECK_CODE(smlGenerateSchemaAction(schemaField, schemaHash, kv, isTag, action, info)); } for (int j = 0; j < taosArrayGetSize(checkDumplicateCols); ++j) { SSmlKv *kv = (SSmlKv *)taosArrayGet(checkDumplicateCols, j); - if (kv == NULL) { - return TSDB_CODE_SML_INVALID_DATA; - } + SML_CHECK_NULL(kv); if (taosHashGet(schemaHash, kv->key, kv->keyLen) != NULL) { - return TSDB_CODE_PAR_DUPLICATED_COLUMN; + SML_CHECK_CODE(TSDB_CODE_PAR_DUPLICATED_COLUMN); } } - return TSDB_CODE_SUCCESS; +END: + RETURN } static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols, bool isTag) { int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; SHashObj *hashTmp = taosHashInit(length, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - if (hashTmp == NULL) { - code = terrno; - goto END; - } + SML_CHECK_NULL(hashTmp); int32_t i = 0; for (; i < length; i++) { - code = taosHashPut(hashTmp, schema[i].name, strlen(schema[i].name), &i, SHORT_BYTES); - if (code != 0) { - goto END; - } - } - - if (isTag) { - i = 0; - } else { - i = 1; + SML_CHECK_CODE(taosHashPut(hashTmp, schema[i].name, strlen(schema[i].name), &i, SHORT_BYTES)); } + i = isTag ? 0 : 1; for (; i < taosArrayGetSize(cols); i++) { SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i); - if (kv == NULL) { - code = terrno; - goto END; - } + SML_CHECK_NULL(kv); if (taosHashGet(hashTmp, kv->key, kv->keyLen) == NULL) { - code = TSDB_CODE_SML_INVALID_DATA; - goto END; + SML_CHECK_CODE(TSDB_CODE_SML_INVALID_DATA); } } END: taosHashCleanup(hashTmp); - return code; + RETURN } static int32_t getBytes(uint8_t type, int32_t length) { @@ -982,36 +806,28 @@ static int32_t getBytes(uint8_t type, int32_t length) { static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols, SArray *results, int32_t numOfCols, bool isTag) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; for (int j = 0; j < taosArrayGetSize(cols); ++j) { SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, j); - if (kv == NULL) { - return TSDB_CODE_SML_INVALID_DATA; - } + SML_CHECK_NULL(kv); ESchemaAction action = SCHEMA_ACTION_NULL; - int code = smlGenerateSchemaAction(schemaField, schemaHash, kv, isTag, &action, info); - if (code != 0) { - return code; - } + SML_CHECK_CODE(smlGenerateSchemaAction(schemaField, schemaHash, kv, isTag, &action, info)); if (action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_ADD_TAG) { SField field = {0}; field.type = kv->type; field.bytes = getBytes(kv->type, kv->length); - (void)memcpy(field.name, kv->key, kv->keyLen); - if (taosArrayPush(results, &field) == NULL) { - return terrno; - } + (void)memcpy(field.name, kv->key, TMIN(kv->keyLen, sizeof(field.name) - 1)); + SML_CHECK_NULL(taosArrayPush(results, &field)); } else if (action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE || action == SCHEMA_ACTION_CHANGE_TAG_SIZE) { uint16_t *index = (uint16_t *)taosHashGet(schemaHash, kv->key, kv->keyLen); if (index == NULL) { - uError("smlBuildFieldsList get error, key:%s", kv->key); - return TSDB_CODE_SML_INVALID_DATA; + SML_CHECK_CODE(TSDB_CODE_SML_INVALID_DATA); } uint16_t newIndex = *index; if (isTag) newIndex -= numOfCols; SField *field = (SField *)taosArrayGet(results, newIndex); - if (field == NULL) { - return TSDB_CODE_SML_INVALID_DATA; - } + SML_CHECK_NULL(field); field->bytes = getBytes(kv->type, kv->length); } } @@ -1020,84 +836,67 @@ static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashO int32_t len = 0; for (int j = 0; j < taosArrayGetSize(results); ++j) { SField *field = taosArrayGet(results, j); - if (field == NULL) { - return TSDB_CODE_SML_INVALID_DATA; - } + SML_CHECK_NULL(field); len += field->bytes; } if (len > maxLen) { return isTag ? TSDB_CODE_PAR_INVALID_TAGS_LENGTH : TSDB_CODE_PAR_INVALID_ROW_LENGTH; } - return TSDB_CODE_SUCCESS; +END: + RETURN } -static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns, SArray *pTags, STableMeta *pTableMeta, +static FORCE_INLINE void smlBuildCreateStbReq(SMCreateStbReq *pReq, int32_t colVer, int32_t tagVer, tb_uid_t suid, int8_t source){ + pReq->colVer = colVer; + pReq->tagVer = tagVer; + pReq->suid = suid; + pReq->source = source; +} +static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns, SArray **pTags, STableMeta *pTableMeta, ESchemaAction action) { SRequestObj *pRequest = NULL; SMCreateStbReq pReq = {0}; int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; SCmdMsgInfo pCmdMsg = {0}; char *pSql = NULL; // put front for free pReq.numOfColumns = taosArrayGetSize(pColumns); - pReq.pTags = pTags; - pReq.numOfTags = taosArrayGetSize(pTags); + pReq.pTags = *pTags; + pReq.numOfTags = taosArrayGetSize(*pTags); + *pTags = NULL; pReq.pColumns = taosArrayInit(pReq.numOfColumns, sizeof(SFieldWithOptions)); - if (pReq.pColumns == NULL) { - code = terrno; - goto end; - } + SML_CHECK_NULL(pReq.pColumns); for (int32_t i = 0; i < pReq.numOfColumns; ++i) { SField *pField = taosArrayGet(pColumns, i); - if (pField == NULL) { - code = terrno; - goto end; - } + SML_CHECK_NULL(pField); SFieldWithOptions fieldWithOption = {0}; setFieldWithOptions(&fieldWithOption, pField); setDefaultOptionsForField(&fieldWithOption); - if (taosArrayPush(pReq.pColumns, &fieldWithOption) == NULL) { - code = terrno; - goto end; - } + SML_CHECK_NULL(taosArrayPush(pReq.pColumns, &fieldWithOption)); } if (action == SCHEMA_ACTION_CREATE_STABLE) { - pReq.colVer = 1; - pReq.tagVer = 1; - pReq.suid = 0; - pReq.source = TD_REQ_FROM_APP; pSql = "sml_create_stable"; + smlBuildCreateStbReq(&pReq, 1, 1, 0, TD_REQ_FROM_APP); } else if (action == SCHEMA_ACTION_ADD_TAG || action == SCHEMA_ACTION_CHANGE_TAG_SIZE) { - pReq.colVer = pTableMeta->sversion; - pReq.tagVer = pTableMeta->tversion + 1; - pReq.suid = pTableMeta->uid; - pReq.source = TD_REQ_FROM_TAOX; pSql = (action == SCHEMA_ACTION_ADD_TAG) ? "sml_add_tag" : "sml_modify_tag_size"; + smlBuildCreateStbReq(&pReq, pTableMeta->sversion, pTableMeta->tversion + 1, pTableMeta->uid, TD_REQ_FROM_TAOX); } else if (action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE) { - pReq.colVer = pTableMeta->sversion + 1; - pReq.tagVer = pTableMeta->tversion; - pReq.suid = pTableMeta->uid; - pReq.source = TD_REQ_FROM_TAOX; pSql = (action == SCHEMA_ACTION_ADD_COLUMN) ? "sml_add_column" : "sml_modify_column_size"; + smlBuildCreateStbReq(&pReq, pTableMeta->sversion + 1, pTableMeta->tversion, pTableMeta->uid, TD_REQ_FROM_TAOX); } else { - uError("SML:0x%" PRIx64 " invalid action:%d", info->id, action); - code = TSDB_CODE_SML_INVALID_DATA; - goto end; + SML_CHECK_CODE(TSDB_CODE_SML_INVALID_DATA); } - code = buildRequest(info->taos->id, pSql, strlen(pSql), NULL, false, &pRequest, 0); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } + SML_CHECK_CODE(buildRequest(info->taos->id, pSql, strlen(pSql), NULL, false, &pRequest, 0)); pRequest->syncQuery = true; if (!pRequest->pDb) { - code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; - goto end; + SML_CHECK_CODE(TSDB_CODE_PAR_DB_NOT_SPECIFIED); } if (pReq.numOfTags == 0) { @@ -1106,40 +905,30 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns, field.type = TSDB_DATA_TYPE_NCHAR; field.bytes = TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE; tstrncpy(field.name, tsSmlTagName, sizeof(field.name)); - if (taosArrayPush(pReq.pTags, &field) == NULL) { - code = terrno; - goto end; - } + SML_CHECK_NULL(taosArrayPush(pReq.pTags, &field)); } pReq.commentLen = -1; pReq.igExists = true; - code = tNameExtractFullName(pName, pReq.name); - if (TSDB_CODE_SUCCESS != code) { - goto end; - } + SML_CHECK_CODE(tNameExtractFullName(pName, pReq.name)); pCmdMsg.epSet = getEpSet_s(&info->taos->pAppInfo->mgmtEp); pCmdMsg.msgType = TDMT_MND_CREATE_STB; pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq); if (pCmdMsg.msgLen < 0) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; + uError("failed to serialize create stable request1, code:%d, terrno:%d", pCmdMsg.msgLen, terrno); + SML_CHECK_CODE(TSDB_CODE_SML_INVALID_DATA); } pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen); - if (NULL == pCmdMsg.pMsg) { - code = terrno; - goto end; - } - - if (tSerializeSMCreateStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq) < 0) { - code = TSDB_CODE_OUT_OF_MEMORY; + SML_CHECK_NULL(pCmdMsg.pMsg); + code = tSerializeSMCreateStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq); + if (code < 0) { taosMemoryFree(pCmdMsg.pMsg); - goto end; + uError("failed to serialize create stable request2, code:%d, terrno:%d", code, terrno); + SML_CHECK_CODE(TSDB_CODE_SML_INVALID_DATA); } SQuery pQuery = {0}; - (void)memset(&pQuery, 0, sizeof(pQuery)); pQuery.execMode = QUERY_EXEC_MODE_RPC; pQuery.pCmdMsg = &pCmdMsg; pQuery.msgType = pQuery.pCmdMsg->msgType; @@ -1148,26 +937,144 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns, launchQueryImpl(pRequest, &pQuery, true, NULL); // no need to check return value if (pRequest->code == TSDB_CODE_SUCCESS) { - code = catalogRemoveTableMeta(info->pCatalog, pName); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } + SML_CHECK_CODE(catalogRemoveTableMeta(info->pCatalog, pName)); } code = pRequest->code; -end: +END: destroyRequest(pRequest); tFreeSMCreateStbReq(&pReq); + RETURN +} + +static int32_t smlCreateTable(SSmlHandle *info, SRequestConnInfo *conn, SSmlSTableMeta *sTableData, + SName *pName, STableMeta **pTableMeta){ + int32_t code = 0; + int32_t lino = 0; + SArray *pColumns = NULL; + SArray *pTags = NULL; + SML_CHECK_CODE(smlCheckAuth(info, conn, NULL, AUTH_TYPE_WRITE)); + uDebug("SML:0x%" PRIx64 " %s create table:%s", info->id, __FUNCTION__, pName->tname); + pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols), sizeof(SField)); + SML_CHECK_NULL(pColumns); + pTags = taosArrayInit(taosArrayGetSize(sTableData->tags), sizeof(SField)); + SML_CHECK_NULL(pTags); + SML_CHECK_CODE(smlBuildFieldsList(info, NULL, NULL, sTableData->tags, pTags, 0, true)); + SML_CHECK_CODE(smlBuildFieldsList(info, NULL, NULL, sTableData->cols, pColumns, 0, false)); + SML_CHECK_CODE(smlSendMetaMsg(info, pName, pColumns, &pTags, NULL, SCHEMA_ACTION_CREATE_STABLE)); + info->cost.numOfCreateSTables++; + taosMemoryFreeClear(*pTableMeta); + + SML_CHECK_CODE(catalogGetSTableMeta(info->pCatalog, conn, pName, pTableMeta)); + +END: + taosArrayDestroy(pColumns); + taosArrayDestroy(pTags); + RETURN +} + +static int32_t smlBuildFields(SArray **pColumns, SArray **pTags, STableMeta *pTableMeta, SSmlSTableMeta *sTableData){ + int32_t code = 0; + int32_t lino = 0; + *pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols) + (pTableMeta)->tableInfo.numOfColumns, sizeof(SField)); + SML_CHECK_NULL(pColumns); + *pTags = taosArrayInit(taosArrayGetSize(sTableData->tags) + (pTableMeta)->tableInfo.numOfTags, sizeof(SField)); + SML_CHECK_NULL(pTags); + for (uint16_t i = 0; i < (pTableMeta)->tableInfo.numOfColumns + (pTableMeta)->tableInfo.numOfTags; i++) { + SField field = {0}; + field.type = (pTableMeta)->schema[i].type; + field.bytes = (pTableMeta)->schema[i].bytes; + tstrncpy(field.name, (pTableMeta)->schema[i].name, sizeof(field.name)); + if (i < (pTableMeta)->tableInfo.numOfColumns) { + SML_CHECK_NULL(taosArrayPush(*pColumns, &field)); + } else { + SML_CHECK_NULL(taosArrayPush(*pTags, &field)); + } + } +END: + RETURN +} +static int32_t smlModifyTag(SSmlHandle *info, SHashObj* hashTmp, SRequestConnInfo *conn, + SSmlSTableMeta *sTableData, SName *pName, STableMeta **pTableMeta){ + ESchemaAction action = SCHEMA_ACTION_NULL; + SArray *pColumns = NULL; + SArray *pTags = NULL; + int32_t code = 0; + int32_t lino = 0; + SML_CHECK_CODE(smlProcessSchemaAction(info, (*pTableMeta)->schema, hashTmp, sTableData->tags, sTableData->cols, &action, true)); + + if (action != SCHEMA_ACTION_NULL) { + SML_CHECK_CODE(smlCheckAuth(info, conn, pName->tname, AUTH_TYPE_WRITE)); + uDebug("SML:0x%" PRIx64 " %s change table tag, table:%s, action:%d", info->id, __FUNCTION__, pName->tname, + action); + SML_CHECK_CODE(smlBuildFields(&pColumns, &pTags, *pTableMeta, sTableData)); + SML_CHECK_CODE(smlBuildFieldsList(info, (*pTableMeta)->schema, hashTmp, sTableData->tags, pTags, + (*pTableMeta)->tableInfo.numOfColumns, true)); + + SML_CHECK_CODE(smlSendMetaMsg(info, pName, pColumns, &pTags, (*pTableMeta), action)); + + info->cost.numOfAlterTagSTables++; + taosMemoryFreeClear(*pTableMeta); + SML_CHECK_CODE(catalogRefreshTableMeta(info->pCatalog, conn, pName, -1)); + SML_CHECK_CODE(catalogGetSTableMeta(info->pCatalog, conn, pName, pTableMeta)); + } + +END: + taosArrayDestroy(pColumns); + taosArrayDestroy(pTags); + RETURN +} + +static int32_t smlModifyCols(SSmlHandle *info, SHashObj* hashTmp, SRequestConnInfo *conn, + SSmlSTableMeta *sTableData, SName *pName, STableMeta **pTableMeta){ + ESchemaAction action = SCHEMA_ACTION_NULL; + SArray *pColumns = NULL; + SArray *pTags = NULL; + int32_t code = 0; + int32_t lino = 0; + SML_CHECK_CODE(smlProcessSchemaAction(info, (*pTableMeta)->schema, hashTmp, sTableData->cols, sTableData->tags, &action, false)); + + if (action != SCHEMA_ACTION_NULL) { + SML_CHECK_CODE(smlCheckAuth(info, conn, pName->tname, AUTH_TYPE_WRITE)); + uDebug("SML:0x%" PRIx64 " %s change table col, table:%s, action:%d", info->id, __FUNCTION__, pName->tname, + action); + SML_CHECK_CODE(smlBuildFields(&pColumns, &pTags, *pTableMeta, sTableData)); + SML_CHECK_CODE(smlBuildFieldsList(info, (*pTableMeta)->schema, hashTmp, sTableData->cols, pColumns, + (*pTableMeta)->tableInfo.numOfColumns, false)); + + SML_CHECK_CODE(smlSendMetaMsg(info, pName, pColumns, &pTags, (*pTableMeta), action)); + + info->cost.numOfAlterColSTables++; + taosMemoryFreeClear(*pTableMeta); + SML_CHECK_CODE(catalogRefreshTableMeta(info->pCatalog, conn, pName, -1)); + SML_CHECK_CODE(catalogGetSTableMeta(info->pCatalog, conn, pName, pTableMeta)); + } + +END: + taosArrayDestroy(pColumns); + taosArrayDestroy(pTags); + RETURN +} + +static int32_t smlBuildTempHash(SHashObj *hashTmp, STableMeta *pTableMeta, uint16_t start, uint16_t end){ + int32_t code = 0; + int32_t lino = 0; + for (uint16_t i = start; i < end; i++) { + SML_CHECK_CODE(taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES)); + } + +END: return code; } static int32_t smlModifyDBSchemas(SSmlHandle *info) { - uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas start, format:%d, needModifySchema:%d", info->id, info->dataFormat, + uDebug("SML:0x%" PRIx64 " %s start, format:%d, needModifySchema:%d", info->id, __FUNCTION__, info->dataFormat, info->needModifySchema); if (info->dataFormat && !info->needModifySchema) { return TSDB_CODE_SUCCESS; } int32_t code = 0; + int32_t lino = 0; SHashObj *hashTmp = NULL; STableMeta *pTableMeta = NULL; @@ -1188,345 +1095,102 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { size_t superTableLen = 0; void *superTable = taosHashGetKey(tmp, &superTableLen); char *measure = taosMemoryMalloc(superTableLen); - if (measure == NULL) { - code = terrno; - goto end; - } + SML_CHECK_NULL(measure); (void)memcpy(measure, superTable, superTableLen); - PROCESS_SLASH_IN_MEASUREMENT(measure, superTableLen); + if (info->protocol == TSDB_SML_LINE_PROTOCOL){ + PROCESS_SLASH_IN_MEASUREMENT(measure, superTableLen); + } smlStrReplace(measure, superTableLen); - (void)memset(pName.tname, 0, TSDB_TABLE_NAME_LEN); - (void)memcpy(pName.tname, measure, superTableLen); + size_t nameLen = TMIN(superTableLen, TSDB_TABLE_NAME_LEN - 1); + (void)memcpy(pName.tname, measure, nameLen); + pName.tname[nameLen] = '\0'; taosMemoryFree(measure); code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta); if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST || code == TSDB_CODE_MND_STB_NOT_EXIST) { - code = smlCheckAuth(info, &conn, NULL, AUTH_TYPE_WRITE); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas create table:%s", info->id, pName.tname); - SArray *pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols), sizeof(SField)); - if (pColumns == NULL) { - code = terrno; - goto end; - } - SArray *pTags = taosArrayInit(taosArrayGetSize(sTableData->tags), sizeof(SField)); - if (pTags == NULL) { - code = terrno; - taosArrayDestroy(pColumns); - goto end; - } - code = smlBuildFieldsList(info, NULL, NULL, sTableData->tags, pTags, 0, true); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlBuildFieldsList tag1 failed. %s", info->id, pName.tname); - taosArrayDestroy(pColumns); - taosArrayDestroy(pTags); - goto end; - } - code = smlBuildFieldsList(info, NULL, NULL, sTableData->cols, pColumns, 0, false); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlBuildFieldsList col1 failed. %s", info->id, pName.tname); - taosArrayDestroy(pColumns); - taosArrayDestroy(pTags); - goto end; - } - code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE); - taosArrayDestroy(pColumns); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname); - goto end; - } - info->cost.numOfCreateSTables++; - taosMemoryFreeClear(pTableMeta); - - code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " catalogGetSTableMeta failed. super table name %s", info->id, pName.tname); - goto end; - } + SML_CHECK_CODE(smlCreateTable(info, &conn, sTableData, &pName, &pTableMeta)); } else if (code == TSDB_CODE_SUCCESS) { if (smlIsPKTable(pTableMeta)) { - code = TSDB_CODE_SML_NOT_SUPPORT_PK; - goto end; - } - - hashTmp = taosHashInit(pTableMeta->tableInfo.numOfTags, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, - HASH_NO_LOCK); - if (hashTmp == NULL) { - code = terrno; - goto end; - } - for (uint16_t i = pTableMeta->tableInfo.numOfColumns; - i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) { - code = taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES); - if (code != 0) { - goto end; - } - } - - ESchemaAction action = SCHEMA_ACTION_NULL; - code = - smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->tags, sTableData->cols, &action, true); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - if (action != SCHEMA_ACTION_NULL) { - code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas change table tag, table:%s, action:%d", info->id, pName.tname, - action); - SArray *pColumns = - taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField)); - if (pColumns == NULL) { - code = terrno; - goto end; - } - SArray *pTags = - taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField)); - if (pTags == NULL) { - taosArrayDestroy(pColumns); - code = terrno; - goto end; - } - for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) { - SField field = {0}; - field.type = pTableMeta->schema[i].type; - field.bytes = pTableMeta->schema[i].bytes; - tstrncpy(field.name, pTableMeta->schema[i].name, sizeof(field.name)); - if (i < pTableMeta->tableInfo.numOfColumns) { - if (taosArrayPush(pColumns, &field) == NULL) { - taosArrayDestroy(pColumns); - taosArrayDestroy(pTags); - code = terrno; - goto end; - } - } else { - if (taosArrayPush(pTags, &field) == NULL) { - taosArrayDestroy(pColumns); - taosArrayDestroy(pTags); - code = terrno; - goto end; - } - } - } - code = smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->tags, pTags, - pTableMeta->tableInfo.numOfColumns, true); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlBuildFieldsList tag2 failed. %s", info->id, pName.tname); - taosArrayDestroy(pColumns); - taosArrayDestroy(pTags); - goto end; - } - - if (taosArrayGetSize(pTags) + pTableMeta->tableInfo.numOfColumns > TSDB_MAX_COLUMNS) { - uError("SML:0x%" PRIx64 " too many columns than 4096", info->id); - code = TSDB_CODE_PAR_TOO_MANY_COLUMNS; - taosArrayDestroy(pColumns); - taosArrayDestroy(pTags); - goto end; - } - if (taosArrayGetSize(pTags) > TSDB_MAX_TAGS) { - uError("SML:0x%" PRIx64 " too many tags than 128", info->id); - code = TSDB_CODE_PAR_INVALID_TAGS_NUM; - taosArrayDestroy(pColumns); - taosArrayDestroy(pTags); - goto end; - } - - code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action); - taosArrayDestroy(pColumns); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname); - goto end; - } - - info->cost.numOfAlterTagSTables++; - taosMemoryFreeClear(pTableMeta); - code = catalogRefreshTableMeta(info->pCatalog, &conn, &pName, -1); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } + SML_CHECK_CODE(TSDB_CODE_SML_NOT_SUPPORT_PK); } + hashTmp = taosHashInit(pTableMeta->tableInfo.numOfTags, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SML_CHECK_NULL(hashTmp); + SML_CHECK_CODE(smlBuildTempHash(hashTmp, pTableMeta, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags)); + SML_CHECK_CODE(smlModifyTag(info, hashTmp, &conn, sTableData, &pName, &pTableMeta)); taosHashClear(hashTmp); - for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) { - code = taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - } - action = SCHEMA_ACTION_NULL; - code = - smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->cols, sTableData->tags, &action, false); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - if (action != SCHEMA_ACTION_NULL) { - code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas change table col, table:%s, action:%d", info->id, pName.tname, - action); - SArray *pColumns = - taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField)); - if (pColumns == NULL) { - code = terrno; - goto end; - } - SArray *pTags = - taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField)); - if (pTags == NULL) { - taosArrayDestroy(pColumns); - code = terrno; - goto end; - } - for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) { - SField field = {0}; - field.type = pTableMeta->schema[i].type; - field.bytes = pTableMeta->schema[i].bytes; - tstrncpy(field.name, pTableMeta->schema[i].name, sizeof(field.name)); - if (i < pTableMeta->tableInfo.numOfColumns) { - if (taosArrayPush(pColumns, &field) == NULL) { - taosArrayDestroy(pColumns); - taosArrayDestroy(pTags); - code = terrno; - goto end; - } - } else { - if (taosArrayPush(pTags, &field) == NULL) { - taosArrayDestroy(pColumns); - taosArrayDestroy(pTags); - code = terrno; - goto end; - } - } - } - - code = smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->cols, pColumns, - pTableMeta->tableInfo.numOfColumns, false); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlBuildFieldsList col2 failed. %s", info->id, pName.tname); - taosArrayDestroy(pColumns); - taosArrayDestroy(pTags); - goto end; - } - - if (taosArrayGetSize(pColumns) + pTableMeta->tableInfo.numOfTags > TSDB_MAX_COLUMNS) { - uError("SML:0x%" PRIx64 " too many columns than 4096", info->id); - code = TSDB_CODE_PAR_TOO_MANY_COLUMNS; - taosArrayDestroy(pColumns); - taosArrayDestroy(pTags); - goto end; - } - - code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action); - taosArrayDestroy(pColumns); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname); - goto end; - } - - info->cost.numOfAlterColSTables++; - taosMemoryFreeClear(pTableMeta); - code = catalogRefreshTableMeta(info->pCatalog, &conn, &pName, -1); - if (code != TSDB_CODE_SUCCESS) { - goto end; - } - code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " catalogGetSTableMeta failed. super table name %s", info->id, pName.tname); - goto end; - } - } + SML_CHECK_CODE(smlBuildTempHash(hashTmp, pTableMeta, 0, pTableMeta->tableInfo.numOfColumns)); + SML_CHECK_CODE(smlModifyCols(info, hashTmp, &conn, sTableData, &pName, &pTableMeta)); needCheckMeta = true; taosHashCleanup(hashTmp); hashTmp = NULL; } else { - uError("SML:0x%" PRIx64 " load table meta error: %s", info->id, tstrerror(code)); - goto end; + uError("SML:0x%" PRIx64 " %s load table meta error: %s", info->id, __FUNCTION__, tstrerror(code)); + goto END; } if (needCheckMeta) { - code = smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags, - sTableData->tags, true); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " check tag failed. super table name %s", info->id, pName.tname); - goto end; - } - code = smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols, false); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " check cols failed. super table name %s", info->id, pName.tname); - goto end; - } + SML_CHECK_CODE(smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags, sTableData->tags, true)); + SML_CHECK_CODE(smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols, false)); } taosMemoryFreeClear(sTableData->tableMeta); sTableData->tableMeta = pTableMeta; - uDebug("SML:0x%" PRIx64 "modify schema uid:%" PRIu64 ", sversion:%d, tversion:%d", info->id, pTableMeta->uid, + uDebug("SML:0x%" PRIx64 " %s modify schema uid:%" PRIu64 ", sversion:%d, tversion:%d", info->id, __FUNCTION__, pTableMeta->uid, pTableMeta->sversion, pTableMeta->tversion); tmp = (SSmlSTableMeta **)taosHashIterate(info->superTables, tmp); } - uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas end success, format:%d, needModifySchema:%d", info->id, info->dataFormat, + uDebug("SML:0x%" PRIx64 " %s end success, format:%d, needModifySchema:%d", info->id, __FUNCTION__, info->dataFormat, info->needModifySchema); return TSDB_CODE_SUCCESS; -end: +END: taosHashCancelIterate(info->superTables, tmp); taosHashCleanup(hashTmp); taosMemoryFreeClear(pTableMeta); (void)catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1); // ignore refresh meta code if there is an error - uError("SML:0x%" PRIx64 " smlModifyDBSchemas end failed:%d:%s, format:%d, needModifySchema:%d", info->id, code, + uError("SML:0x%" PRIx64 " %s end failed:%d:%s, format:%d, needModifySchema:%d", info->id, __FUNCTION__, code, tstrerror(code), info->dataFormat, info->needModifySchema); return code; } static int32_t smlInsertMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols, SHashObj *checkDuplicate) { + int32_t code = 0; + int32_t lino = 0; terrno = 0; for (int16_t i = 0; i < taosArrayGetSize(cols); ++i) { SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i); - if (kv == NULL) { - return TSDB_CODE_SML_INVALID_DATA; - } + SML_CHECK_NULL(kv); int ret = taosHashPut(metaHash, kv->key, kv->keyLen, &i, SHORT_BYTES); if (ret == 0) { - if (taosArrayPush(metaArray, kv) == NULL) { - return terrno; - } + SML_CHECK_NULL(taosArrayPush(metaArray, kv)); if (taosHashGet(checkDuplicate, kv->key, kv->keyLen) != NULL) { - return TSDB_CODE_PAR_DUPLICATED_COLUMN; + SML_CHECK_CODE(TSDB_CODE_PAR_DUPLICATED_COLUMN); } } else if (terrno == TSDB_CODE_DUP_KEY) { return TSDB_CODE_PAR_DUPLICATED_COLUMN; } } - return TSDB_CODE_SUCCESS; + +END: + RETURN } static int32_t smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols, bool isTag, SSmlMsgBuf *msg, SHashObj *checkDuplicate) { + int32_t code = 0; + int32_t lino = 0; for (int i = 0; i < taosArrayGetSize(cols); ++i) { SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i); - if (kv == NULL) { - return TSDB_CODE_SML_INVALID_DATA; - } + SML_CHECK_NULL(kv); int16_t *index = (int16_t *)taosHashGet(metaHash, kv->key, kv->keyLen); if (index) { SSmlKv *value = (SSmlKv *)taosArrayGet(metaArray, *index); - if (value == NULL) { - return TSDB_CODE_SML_INVALID_DATA; - } + SML_CHECK_NULL(value); if (isTag) { if (kv->length > value->length) { @@ -1536,7 +1200,7 @@ static int32_t smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols } if (kv->type != value->type) { smlBuildInvalidDataMsg(msg, "the type is not the same like before", kv->key); - return TSDB_CODE_SML_NOT_SAME_TYPE; + SML_CHECK_CODE(TSDB_CODE_SML_NOT_SAME_TYPE); } if (IS_VAR_DATA_TYPE(kv->type) && (kv->length > value->length)) { // update string len, if bigger @@ -1546,25 +1210,19 @@ static int32_t smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols size_t tmp = taosArrayGetSize(metaArray); if (tmp > INT16_MAX) { smlBuildInvalidDataMsg(msg, "too many cols or tags", kv->key); - uError("too many cols or tags"); - return TSDB_CODE_SML_INVALID_DATA; + SML_CHECK_CODE(TSDB_CODE_SML_INVALID_DATA); } int16_t size = tmp; - int ret = taosHashPut(metaHash, kv->key, kv->keyLen, &size, SHORT_BYTES); - if (ret == 0) { - if (taosArrayPush(metaArray, kv) == NULL) { - return terrno; - } - if (taosHashGet(checkDuplicate, kv->key, kv->keyLen) != NULL) { - return TSDB_CODE_PAR_DUPLICATED_COLUMN; - } - } else { - return ret; + SML_CHECK_CODE(taosHashPut(metaHash, kv->key, kv->keyLen, &size, SHORT_BYTES)); + SML_CHECK_NULL(taosArrayPush(metaArray, kv)); + if (taosHashGet(checkDuplicate, kv->key, kv->keyLen) != NULL) { + SML_CHECK_CODE(TSDB_CODE_PAR_DUPLICATED_COLUMN); } } } - return TSDB_CODE_SUCCESS; +END: + RETURN } void smlDestroyTableInfo(void *para) { @@ -1588,8 +1246,7 @@ void freeSSmlKv(void *data) { } void smlDestroyInfo(SSmlHandle *info) { - if (!info) return; - // qDestroyQuery(info->pQuery); + if (info == NULL) return; taosHashCleanup(info->pVgHash); taosHashCleanup(info->childTables); @@ -1614,111 +1271,89 @@ void smlDestroyInfo(SSmlHandle *info) { if (!info->dataFormat) { for (int i = 0; i < info->lineNum; i++) { taosArrayDestroyEx(info->lines[i].colArray, freeSSmlKv); - if (info->parseJsonByLib) { - taosMemoryFree(info->lines[i].tags); - } if (info->lines[i].measureTagsLen != 0 && info->protocol != TSDB_SML_LINE_PROTOCOL) { taosMemoryFree(info->lines[i].measureTag); } } taosMemoryFree(info->lines); } - + if(info->protocol == TSDB_SML_JSON_PROTOCOL) { + taosMemoryFreeClear(info->preLine.tags); + } cJSON_Delete(info->root); taosMemoryFreeClear(info); } int32_t smlBuildSmlInfo(TAOS *taos, SSmlHandle **handle) { int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; SSmlHandle *info = (SSmlHandle *)taosMemoryCalloc(1, sizeof(SSmlHandle)); - if (NULL == info) { - return terrno; - } - if (taos != NULL) { + SML_CHECK_NULL(info); + if (taos != NULL){ info->taos = acquireTscObj(*(int64_t *)taos); - if (info->taos == NULL) { - code = TSDB_CODE_TSC_DISCONNECTED; - goto FAILED; - } - code = catalogGetHandle(info->taos->pAppInfo->clusterId, &info->pCatalog); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " get catalog error %d", info->id, code); - goto FAILED; - } + SML_CHECK_NULL(info->taos); + SML_CHECK_CODE(catalogGetHandle(info->taos->pAppInfo->clusterId, &info->pCatalog)); } info->pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + SML_CHECK_NULL(info->pVgHash); info->childTables = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SML_CHECK_NULL(info->childTables); info->tableUids = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + SML_CHECK_NULL(info->tableUids); info->superTables = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - if (info->pVgHash == NULL || info->childTables == NULL || info->superTables == NULL || info->tableUids == NULL) { - uError("create SSmlHandle hash obj failed"); - code = terrno; - goto FAILED; - } + SML_CHECK_NULL(info->superTables); taosHashSetFreeFp(info->superTables, smlDestroySTableMeta); taosHashSetFreeFp(info->childTables, smlDestroyTableInfo); info->id = smlGenId(); - code = smlInitHandle(&info->pQuery); - if (code != TSDB_CODE_SUCCESS) { - goto FAILED; - } + SML_CHECK_CODE(smlInitHandle(&info->pQuery)); info->dataFormat = true; - info->tagJsonArray = taosArrayInit(8, POINTER_BYTES); + SML_CHECK_NULL(info->tagJsonArray); info->valueJsonArray = taosArrayInit(8, POINTER_BYTES); + SML_CHECK_NULL(info->valueJsonArray); info->preLineTagKV = taosArrayInit(8, sizeof(SSmlKv)); + SML_CHECK_NULL(info->preLineTagKV); info->escapedStringList = taosArrayInit(8, POINTER_BYTES); - if (info->tagJsonArray == NULL || info->valueJsonArray == NULL || - info->preLineTagKV == NULL || info->escapedStringList == NULL) { - uError("SML:0x%" PRIx64 " failed to allocate memory", info->id); - code = terrno; - goto FAILED; - } + SML_CHECK_NULL(info->escapedStringList); *handle = info; - return code; + info = NULL; -FAILED: +END: smlDestroyInfo(info); - return code; + RETURN } static int32_t smlPushCols(SArray *colsArray, SArray *cols) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; SHashObj *kvHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - if (!kvHash) { - uError("SML:smlDealCols failed to allocate memory"); - return terrno; - } + SML_CHECK_NULL(kvHash); for (size_t i = 0; i < taosArrayGetSize(cols); i++) { SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i); - if (kv == NULL) { - taosHashCleanup(kvHash); - return TSDB_CODE_SML_INVALID_DATA; - } + SML_CHECK_NULL(kv); terrno = 0; - int32_t code = taosHashPut(kvHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); + code = taosHashPut(kvHash, kv->key, kv->keyLen, &kv, POINTER_BYTES); if (terrno == TSDB_CODE_DUP_KEY) { - taosHashCleanup(kvHash); - return TSDB_CODE_PAR_DUPLICATED_COLUMN; - } - if (code != TSDB_CODE_SUCCESS) { - taosHashCleanup(kvHash); - return code; + SML_CHECK_CODE(TSDB_CODE_PAR_DUPLICATED_COLUMN); } + SML_CHECK_CODE(code); } - if (taosArrayPush(colsArray, &kvHash) == NULL) { - taosHashCleanup(kvHash); - return terrno; - } - return TSDB_CODE_SUCCESS; + SML_CHECK_NULL(taosArrayPush(colsArray, &kvHash)); + return code; +END: + taosHashCleanup(kvHash); + RETURN } -static int32_t smlParseLineBottom(SSmlHandle *info) { - uDebug("SML:0x%" PRIx64 " smlParseLineBottom start, format:%d, linenum:%d", info->id, info->dataFormat, +static int32_t smlParseEnd(SSmlHandle *info) { + uDebug("SML:0x%" PRIx64 " %s start, format:%d, linenum:%d", info->id, __FUNCTION__, info->dataFormat, info->lineNum); + int32_t code = 0; + int32_t lino = 0; if (info->dataFormat) return TSDB_CODE_SUCCESS; for (int32_t i = 0; i < info->lineNum; i++) { @@ -1728,10 +1363,6 @@ static int32_t smlParseLineBottom(SSmlHandle *info) { SSmlTableInfo **tmp = (SSmlTableInfo **)taosHashGet(info->childTables, elements->measure, elements->measureTagsLen); if (tmp) tinfo = *tmp; - } else if (info->protocol == TSDB_SML_TELNET_PROTOCOL) { - SSmlTableInfo **tmp = (SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag, - elements->measureLen + elements->tagsLen); - if (tmp) tinfo = *tmp; } else { SSmlTableInfo **tmp = (SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen); @@ -1754,97 +1385,74 @@ static int32_t smlParseLineBottom(SSmlHandle *info) { return TSDB_CODE_PAR_TOO_MANY_COLUMNS; } - int ret = smlPushCols(tinfo->cols, elements->colArray); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } + SML_CHECK_CODE(smlPushCols(tinfo->cols, elements->colArray)); SSmlSTableMeta **tableMeta = (SSmlSTableMeta **)taosHashGet(info->superTables, elements->measure, elements->measureLen); if (tableMeta) { // update meta - uDebug("SML:0x%" PRIx64 " smlParseLineBottom update meta, format:%d, linenum:%d", info->id, info->dataFormat, + uDebug("SML:0x%" PRIx64 " %s update meta, format:%d, linenum:%d", info->id, __FUNCTION__, info->dataFormat, info->lineNum); - ret = smlUpdateMeta((*tableMeta)->colHash, (*tableMeta)->cols, elements->colArray, false, &info->msgBuf, - (*tableMeta)->tagHash); - if (ret == TSDB_CODE_SUCCESS) { - ret = smlUpdateMeta((*tableMeta)->tagHash, (*tableMeta)->tags, tinfo->tags, true, &info->msgBuf, - (*tableMeta)->colHash); - } - if (ret != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlUpdateMeta failed, ret:%d", info->id, ret); - return ret; - } + SML_CHECK_CODE(smlUpdateMeta((*tableMeta)->colHash, (*tableMeta)->cols, elements->colArray, false, &info->msgBuf, + (*tableMeta)->tagHash)); + SML_CHECK_CODE(smlUpdateMeta((*tableMeta)->tagHash, (*tableMeta)->tags, tinfo->tags, true, &info->msgBuf, + (*tableMeta)->colHash)); } else { - uDebug("SML:0x%" PRIx64 " smlParseLineBottom add meta, format:%d, linenum:%d", info->id, info->dataFormat, + uDebug("SML:0x%" PRIx64 " %s add meta, format:%d, linenum:%d", info->id, __FUNCTION__, info->dataFormat, info->lineNum); SSmlSTableMeta *meta = NULL; - ret = smlBuildSTableMeta(info->dataFormat, &meta); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - ret = taosHashPut(info->superTables, elements->measure, elements->measureLen, &meta, POINTER_BYTES); - if (ret != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " put measuer to hash failed", info->id); - return ret; - } - ret = smlInsertMeta(meta->tagHash, meta->tags, tinfo->tags, NULL); - if (ret == TSDB_CODE_SUCCESS) { - ret = smlInsertMeta(meta->colHash, meta->cols, elements->colArray, meta->tagHash); - } - if (ret != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " insert meta failed:%s", info->id, tstrerror(ret)); - return ret; + SML_CHECK_CODE(smlBuildSTableMeta(info->dataFormat, &meta)); + code = taosHashPut(info->superTables, elements->measure, elements->measureLen, &meta, POINTER_BYTES); + if (code != TSDB_CODE_SUCCESS) { + smlDestroySTableMeta(&meta); + SML_CHECK_CODE(code); } + SML_CHECK_CODE(smlInsertMeta(meta->tagHash, meta->tags, tinfo->tags, NULL)); + SML_CHECK_CODE(smlInsertMeta(meta->colHash, meta->cols, elements->colArray, meta->tagHash)); } } - uDebug("SML:0x%" PRIx64 " smlParseLineBottom end, format:%d, linenum:%d", info->id, info->dataFormat, info->lineNum); + uDebug("SML:0x%" PRIx64 " %s end, format:%d, linenum:%d", info->id, __FUNCTION__, info->dataFormat, info->lineNum); - return TSDB_CODE_SUCCESS; +END: + RETURN } static int32_t smlInsertData(SSmlHandle *info) { - int32_t code = TSDB_CODE_SUCCESS; - uDebug("SML:0x%" PRIx64 " smlInsertData start, format:%d", info->id, info->dataFormat); + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + char *measure = NULL; + SSmlTableInfo **oneTable = NULL; + uDebug("SML:0x%" PRIx64 " %s start, format:%d", info->id, __FUNCTION__, info->dataFormat); if (info->pRequest->dbList == NULL) { info->pRequest->dbList = taosArrayInit(1, TSDB_DB_FNAME_LEN); - if (info->pRequest->dbList == NULL) { - return terrno; - } + SML_CHECK_NULL(info->pRequest->dbList); } char *data = (char *)taosArrayReserve(info->pRequest->dbList, 1); - if (data == NULL) { - return terrno; - } + SML_CHECK_NULL(data); SName pName = {TSDB_TABLE_NAME_T, info->taos->acctId, {0}, {0}}; tstrncpy(pName.dbname, info->pRequest->pDb, sizeof(pName.dbname)); (void)tNameGetFullDbName(&pName, data); // ignore - SSmlTableInfo **oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, NULL); + oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, NULL); while (oneTable) { SSmlTableInfo *tableData = *oneTable; int measureLen = tableData->sTableNameLen; - char *measure = (char *)taosMemoryMalloc(tableData->sTableNameLen); - if (measure == NULL) { - return terrno; - } + measure = (char *)taosMemoryMalloc(tableData->sTableNameLen); + SML_CHECK_NULL(measure); (void)memcpy(measure, tableData->sTableName, tableData->sTableNameLen); - PROCESS_SLASH_IN_MEASUREMENT(measure, measureLen); + if (info->protocol == TSDB_SML_LINE_PROTOCOL){ + PROCESS_SLASH_IN_MEASUREMENT(measure, measureLen); + } smlStrReplace(measure, measureLen); - (void)memset(pName.tname, 0, TSDB_TABLE_NAME_LEN); (void)memcpy(pName.tname, measure, measureLen); + pName.tname[measureLen] = '\0'; if (info->pRequest->tableList == NULL) { info->pRequest->tableList = taosArrayInit(1, sizeof(SName)); - if (info->pRequest->tableList == NULL) { - return terrno; - } - } - if (taosArrayPush(info->pRequest->tableList, &pName) == NULL) { - return terrno; + SML_CHECK_NULL(info->pRequest->tableList); } - + SML_CHECK_NULL(taosArrayPush(info->pRequest->tableList, &pName)); tstrncpy(pName.tname, tableData->childTableName, sizeof(pName.tname)); SRequestConnInfo conn = {0}; @@ -1853,61 +1461,33 @@ static int32_t smlInsertData(SSmlHandle *info) { conn.requestObjRefId = info->pRequest->self; conn.mgmtEps = getEpSet_s(&info->taos->pAppInfo->mgmtEp); - code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE); - if (code != TSDB_CODE_SUCCESS) { - taosMemoryFree(measure); - taosHashCancelIterate(info->childTables, oneTable); - return code; - } + SML_CHECK_CODE(smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE)); - SVgroupInfo vg; - code = catalogGetTableHashVgroup(info->pCatalog, &conn, &pName, &vg); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " catalogGetTableHashVgroup failed. table name: %s", info->id, tableData->childTableName); - taosMemoryFree(measure); - taosHashCancelIterate(info->childTables, oneTable); - return code; - } - code = taosHashPut(info->pVgHash, (const char *)&vg.vgId, sizeof(vg.vgId), (char *)&vg, sizeof(vg)); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " taosHashPut failed. table name: %s", info->id, tableData->childTableName); - taosMemoryFree(measure); - taosHashCancelIterate(info->childTables, oneTable); - return code; - } + SVgroupInfo vg = {0}; + SML_CHECK_CODE(catalogGetTableHashVgroup(info->pCatalog, &conn, &pName, &vg)); + SML_CHECK_CODE(taosHashPut(info->pVgHash, (const char *)&vg.vgId, sizeof(vg.vgId), (char *)&vg, sizeof(vg))); SSmlSTableMeta **pMeta = (SSmlSTableMeta **)taosHashGet(info->superTables, tableData->sTableName, tableData->sTableNameLen); - if (unlikely(NULL == pMeta || NULL == (*pMeta)->tableMeta)) { - uError("SML:0x%" PRIx64 " NULL == pMeta. table name: %s", info->id, tableData->childTableName); - taosMemoryFree(measure); - taosHashCancelIterate(info->childTables, oneTable); - return TSDB_CODE_SML_INTERNAL_ERROR; + if (unlikely(NULL == pMeta || NULL == *pMeta || NULL == (*pMeta)->tableMeta)) { + uError("SML:0x%" PRIx64 " %s NULL == pMeta. table name: %s", info->id, __FUNCTION__, tableData->childTableName); + SML_CHECK_CODE(TSDB_CODE_SML_INTERNAL_ERROR); } // use tablemeta of stable to save vgid and uid of child table (*pMeta)->tableMeta->vgId = vg.vgId; (*pMeta)->tableMeta->uid = tableData->uid; // one table merge data block together according uid - uDebug("SML:0x%" PRIx64 " smlInsertData table:%s, uid:%" PRIu64 ", format:%d", info->id, pName.tname, + uDebug("SML:0x%" PRIx64 " %s table:%s, uid:%" PRIu64 ", format:%d", info->id, __FUNCTION__, pName.tname, tableData->uid, info->dataFormat); - code = smlBindData(info->pQuery, info->dataFormat, tableData->tags, (*pMeta)->cols, tableData->cols, + SML_CHECK_CODE(smlBindData(info->pQuery, info->dataFormat, tableData->tags, (*pMeta)->cols, tableData->cols, (*pMeta)->tableMeta, tableData->childTableName, measure, measureLen, info->ttl, info->msgBuf.buf, - info->msgBuf.len); - taosMemoryFree(measure); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlBindData failed", info->id); - taosHashCancelIterate(info->childTables, oneTable); - return code; - } + info->msgBuf.len)); + taosMemoryFreeClear(measure); oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, oneTable); } - code = smlBuildOutput(info->pQuery, info->pVgHash); - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlBuildOutput failed", info->id); - return code; - } + SML_CHECK_CODE(smlBuildOutput(info->pQuery, info->pVgHash)); info->cost.insertRpcTime = taosGetTimestampUs(); SAppClusterSummary *pActivity = &info->taos->pAppInfo->summary; @@ -1915,10 +1495,15 @@ static int32_t smlInsertData(SSmlHandle *info) { launchQueryImpl(info->pRequest, info->pQuery, true, NULL); // no need to check return code - uDebug("SML:0x%" PRIx64 " smlInsertData end, format:%d, code:%d,%s", info->id, info->dataFormat, info->pRequest->code, + uDebug("SML:0x%" PRIx64 " %s end, format:%d, code:%d,%s", info->id, __FUNCTION__, info->dataFormat, info->pRequest->code, tstrerror(info->pRequest->code)); return info->pRequest->code; + +END: + taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); + RETURN } static void smlPrintStatisticInfo(SSmlHandle *info) { @@ -1934,6 +1519,8 @@ static void smlPrintStatisticInfo(SSmlHandle *info) { } int32_t smlClearForRerun(SSmlHandle *info) { + int32_t code = 0; + int32_t lino = 0; info->reRun = false; taosHashClear(info->childTables); @@ -1941,18 +1528,14 @@ int32_t smlClearForRerun(SSmlHandle *info) { taosHashClear(info->tableUids); if (!info->dataFormat) { - if (unlikely(info->lines != NULL)) { - uError("SML:0x%" PRIx64 " info->lines != NULL", info->id); - return TSDB_CODE_SML_INVALID_DATA; - } info->lines = (SSmlLineInfo *)taosMemoryCalloc(info->lineNum, sizeof(SSmlLineInfo)); - if (unlikely(info->lines == NULL)) { - uError("SML:0x%" PRIx64 " info->lines == NULL", info->id); - return terrno; - } + SML_CHECK_NULL(info->lines); } taosArrayClearP(info->escapedStringList, NULL); + if(info->protocol == TSDB_SML_JSON_PROTOCOL) { + taosMemoryFreeClear(info->preLine.tags); + } (void)memset(&info->preLine, 0, sizeof(SSmlLineInfo)); info->currSTableMeta = NULL; info->currTableDataCtx = NULL; @@ -1960,7 +1543,26 @@ int32_t smlClearForRerun(SSmlHandle *info) { SVnodeModifyOpStmt *stmt = (SVnodeModifyOpStmt *)(info->pQuery->pRoot); stmt->freeHashFunc(stmt->pTableBlockHashObj); stmt->pTableBlockHashObj = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); - return TSDB_CODE_SUCCESS; + SML_CHECK_NULL(stmt->pTableBlockHashObj); + +END: + RETURN +} + +static void printRaw(int64_t id, int lineNum, int numLines, ELogLevel level, char* data, int32_t len){ + char *print = taosMemoryMalloc(len + 1); + if (print == NULL) { + uError("SML:0x%" PRIx64 " smlParseLine failed. code : %d", id, terrno); + return; + } + (void)memcpy(print, data, len); + print[len] = '\0'; + if (level == DEBUG_DEBUG){ + uDebug("SML:0x%" PRIx64 " smlParseLine is raw, line %d/%d : %s", id, lineNum, numLines, print); + }else if (level == DEBUG_ERROR){ + uError("SML:0x%" PRIx64 " smlParseLine failed. line %d/%d : %s", id, lineNum, numLines, print); + } + taosMemoryFree(print); } static bool getLine(SSmlHandle *info, char *lines[], char **rawLine, char *rawLineEnd, int numLines, int i, char **tmp, @@ -1976,42 +1578,38 @@ static bool getLine(SSmlHandle *info, char *lines[], char **rawLine, char *rawLi } (*len)++; } - if (info->protocol == TSDB_SML_LINE_PROTOCOL && (*tmp)[0] == '#') { // this line is comment + if (IS_COMMENT(info->protocol,(*tmp)[0])) { // this line is comment return false; } } if (*rawLine != NULL && (uDebugFlag & DEBUG_DEBUG)) { - char *print = taosMemoryCalloc(*len + 1, 1); - if (print != NULL) { - (void)memcpy(print, *tmp, *len); - uDebug("SML:0x%" PRIx64 " smlParseLine is raw, numLines:%d, protocol:%d, len:%d, data:%s", info->id, numLines, - info->protocol, *len, print); - taosMemoryFree(print); - } else { - uError("SML:0x%" PRIx64 " smlParseLine taosMemoryCalloc failed", info->id); - } + printRaw(info->id, i, numLines, DEBUG_DEBUG, *tmp, *len); } else { - uDebug("SML:0x%" PRIx64 " smlParseLine is not numLines:%d, protocol:%d, len:%d, data:%s", info->id, numLines, - info->protocol, *len, *tmp); + uDebug("SML:0x%" PRIx64 " smlParseLine is not raw, line %d/%d : %s", info->id, i, numLines, *tmp); } return true; } -static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char *rawLineEnd, int numLines) { - uDebug("SML:0x%" PRIx64 " smlParseLine start", info->id); + +static int32_t smlParseJson(SSmlHandle *info, char *lines[], char *rawLine) { + int32_t code = TSDB_CODE_SUCCESS; + if (lines) { + code = smlParseJSONExt(info, *lines); + } else if (rawLine) { + code = smlParseJSONExt(info, rawLine); + } + if (code != TSDB_CODE_SUCCESS) { + uError("%s failed code:%d", __FUNCTION__ , code); + } + return code; +} + +static int32_t smlParseStart(SSmlHandle *info, char *lines[], char *rawLine, char *rawLineEnd, int numLines) { + uDebug("SML:0x%" PRIx64 " %s start", info->id, __FUNCTION__); int32_t code = TSDB_CODE_SUCCESS; if (info->protocol == TSDB_SML_JSON_PROTOCOL) { - if (lines) { - code = smlParseJSON(info, *lines); - } else if (rawLine) { - code = smlParseJSON(info, rawLine); - } - if (code != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " smlParseJSON failed:%s", info->id, lines ? *lines : rawLine); - return code; - } - return code; + return smlParseJson(info, lines, rawLine); } char *oldRaw = rawLine; @@ -2037,26 +1635,17 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char } else { code = smlParseTelnetString(info, (char *)tmp, (char *)tmp + len, info->lines + i); } - } else { - code = TSDB_CODE_SML_INVALID_PROTOCOL_TYPE; } if (code != TSDB_CODE_SUCCESS) { if (rawLine != NULL) { - char *print = taosMemoryCalloc(len + 1, 1); - if (print == NULL) { - uError("SML:0x%" PRIx64 " smlParseLine failed. out of memory", info->id); - return code; - } - (void)memcpy(print, tmp, len); - uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, print); - taosMemoryFree(print); + printRaw(info->id, i, numLines, DEBUG_ERROR, tmp, len); } else { - uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, tmp); + uError("SML:0x%" PRIx64 " %s failed. line %d : %s", info->id, __FUNCTION__, i, tmp); } return code; } if (info->reRun) { - uDebug("SML:0x%" PRIx64 " smlParseLine re run", info->id); + uDebug("SML:0x%" PRIx64 " %s re run", info->id, __FUNCTION__); i = 0; rawLine = oldRaw; code = smlClearForRerun(info); @@ -2067,32 +1656,24 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char } i++; } - uDebug("SML:0x%" PRIx64 " smlParseLine end", info->id); + uDebug("SML:0x%" PRIx64 " %s end", info->id, __FUNCTION__); return code; } static int smlProcess(SSmlHandle *info, char *lines[], char *rawLine, char *rawLineEnd, int numLines) { int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; int32_t retryNum = 0; info->cost.parseTime = taosGetTimestampUs(); - code = smlParseLine(info, lines, rawLine, rawLineEnd, numLines); - if (code != 0) { - uError("SML:0x%" PRIx64 " smlParseLine error : %s", info->id, tstrerror(code)); - return code; - } - code = smlParseLineBottom(info); - if (code != 0) { - uError("SML:0x%" PRIx64 " smlParseLineBottom error : %s", info->id, tstrerror(code)); - return code; - } + SML_CHECK_CODE(smlParseStart(info, lines, rawLine, rawLineEnd, numLines)); + SML_CHECK_CODE(smlParseEnd(info)); info->cost.lineNum = info->lineNum; info->cost.numOfSTables = taosHashGetSize(info->superTables); info->cost.numOfCTables = taosHashGetSize(info->childTables); - info->cost.schemaTime = taosGetTimestampUs(); do { @@ -2105,23 +1686,16 @@ static int smlProcess(SSmlHandle *info, char *lines[], char *rawLine, char *rawL uInfo("SML:0x%" PRIx64 " smlModifyDBSchemas retry code:%s, times:%d", info->id, tstrerror(code), retryNum); } while (retryNum++ < taosHashGetSize(info->superTables) * MAX_RETRY_TIMES); - if (code != 0) { - uError("SML:0x%" PRIx64 " smlModifyDBSchemas error : %s", info->id, tstrerror(code)); - return code; - } - + SML_CHECK_CODE(code); info->cost.insertBindTime = taosGetTimestampUs(); - code = smlInsertData(info); - if (code != 0) { - uError("SML:0x%" PRIx64 " smlInsertData error : %s", info->id, tstrerror(code)); - return code; - } + SML_CHECK_CODE(smlInsertData(info)); - return code; +END: + RETURN } void smlSetReqSQL(SRequestObj *request, char *lines[], char *rawLine, char *rawLineEnd) { - if (request->pTscObj->pAppInfo->monitorParas.tsSlowLogScope & SLOW_LOG_TYPE_INSERT) { + if (request->pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & SLOW_LOG_TYPE_INSERT) { int32_t len = 0; int32_t rlen = 0; char *p = NULL; @@ -2160,29 +1734,17 @@ void smlSetReqSQL(SRequestObj *request, char *lines[], char *rawLine, char *rawL TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine, char *rawLineEnd, int numLines, int protocol, int precision, int32_t ttl, int64_t reqid, char *tbnameKey) { - int32_t code = TSDB_CODE_SUCCESS; - if (NULL == taos) { - uError("SML:taos_schemaless_insert error taos is null"); - return NULL; - } + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; SRequestObj *request = NULL; - SSmlHandle *info = NULL; - int cnt = 0; + SSmlHandle *info = NULL; + int cnt = 0; while (1) { - code = createRequest(*(int64_t *)taos, TSDB_SQL_INSERT, reqid, &request); - if (TSDB_CODE_SUCCESS != code) { - uError("SML:taos_schemaless_insert error request is null"); - return NULL; - } + SML_CHECK_CODE(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &request, reqid)); + SSmlMsgBuf msg = {request->msgBufLen, request->msgBuf}; + request->code = smlBuildSmlInfo(taos, &info); + SML_CHECK_CODE(request->code); - SSmlMsgBuf msg = {ERROR_MSG_BUF_DEFAULT_SIZE, request->msgBuf}; - code = smlBuildSmlInfo(taos, &info); - if (code != TSDB_CODE_SUCCESS) { - request->code = code; - smlBuildInvalidDataMsg(&msg, "init SSmlHandle failed", NULL); - uError("SML:taos_schemaless_insert error SSmlHandle is null, err msg:%s", tstrerror(code)); - goto end; - } info->pRequest = request; info->pRequest->pQuery = info->pQuery; info->ttl = ttl; @@ -2198,20 +1760,20 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine, if (request->pDb == NULL) { request->code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; smlBuildInvalidDataMsg(&msg, "Database not specified", NULL); - goto end; + goto END; } if (protocol < TSDB_SML_LINE_PROTOCOL || protocol > TSDB_SML_JSON_PROTOCOL) { request->code = TSDB_CODE_SML_INVALID_PROTOCOL_TYPE; smlBuildInvalidDataMsg(&msg, "protocol invalidate", NULL); - goto end; + goto END; } if (protocol == TSDB_SML_LINE_PROTOCOL && (precision < TSDB_SML_TIMESTAMP_NOT_CONFIGURED || precision > TSDB_SML_TIMESTAMP_NANO_SECONDS)) { request->code = TSDB_CODE_SML_INVALID_PRECISION_TYPE; smlBuildInvalidDataMsg(&msg, "precision invalidate for line protocol", NULL); - goto end; + goto END; } if (protocol == TSDB_SML_JSON_PROTOCOL) { @@ -2219,7 +1781,7 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine, } else if (numLines <= 0) { request->code = TSDB_CODE_SML_INVALID_DATA; smlBuildInvalidDataMsg(&msg, "line num is invalid", NULL); - goto end; + goto END; } code = smlProcess(info, lines, rawLine, rawLineEnd, numLines); @@ -2249,7 +1811,7 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine, break; } -end: +END: smlDestroyInfo(info); return (TAOS_RES *)request; } @@ -2275,6 +1837,16 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine, TAOS_RES *taos_schemaless_insert_ttl_with_reqid_tbname_key(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int32_t ttl, int64_t reqid, char *tbnameKey) { + if (taos == NULL || lines == NULL || numLines < 0) { + terrno = TSDB_CODE_INVALID_PARA; + return NULL; + } + for (int i = 0; i < numLines; i++){ + if (lines[i] == NULL){ + terrno = TSDB_CODE_INVALID_PARA; + return NULL; + } + } return taos_schemaless_insert_inner(taos, lines, NULL, NULL, numLines, protocol, precision, ttl, reqid, tbnameKey); } @@ -2298,26 +1870,32 @@ TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLi reqid); } -static void getRawLineLen(char *lines, int len, int32_t *totalRows, int protocol) { +static int32_t getRawLineLen(char *lines, int len, int protocol) { int numLines = 0; - *totalRows = 0; char *tmp = lines; for (int i = 0; i < len; i++) { if (lines[i] == '\n' || i == len - 1) { - numLines++; - if (tmp[0] != '#' || protocol != TSDB_SML_LINE_PROTOCOL) { // ignore comment - (*totalRows)++; + if (!IS_COMMENT(protocol, tmp[0])) { // ignore comment + numLines++; } tmp = lines + i + 1; } } + return numLines; } TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid_tbname_key(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int32_t ttl, int64_t reqid, char *tbnameKey) { - getRawLineLen(lines, len, totalRows, protocol); - return taos_schemaless_insert_inner(taos, NULL, lines, lines + len, *totalRows, protocol, precision, ttl, reqid, + if (taos == NULL || lines == NULL || len < 0) { + terrno = TSDB_CODE_INVALID_PARA; + return NULL; + } + int numLines = getRawLineLen(lines, len, protocol); + if (totalRows != NULL){ + *totalRows = numLines; + } + return taos_schemaless_insert_inner(taos, NULL, lines, lines + len, numLines, protocol, precision, ttl, reqid, tbnameKey); } diff --git a/source/client/src/clientSmlJson.c b/source/client/src/clientSmlJson.c index ece1ddf61f4..9a27a30d84f 100644 --- a/source/client/src/clientSmlJson.c +++ b/source/client/src/clientSmlJson.c @@ -21,259 +21,10 @@ #define OTD_JSON_SUB_FIELDS_NUM 2 -#define JUMP_JSON_SPACE(start) \ - while (*(start)) { \ - if (unlikely(*(start) > 32)) \ - break; \ - else \ - (start)++; \ - } - -static int32_t smlJsonGetObj(char **payload) { - int leftBracketCnt = 0; - bool isInQuote = false; - while (**payload) { - if (**payload == '"' && *((*payload) - 1) != '\\') { - isInQuote = !isInQuote; - } else if (!isInQuote && unlikely(**payload == '{')) { - leftBracketCnt++; - (*payload)++; - continue; - } else if (!isInQuote && unlikely(**payload == '}')) { - leftBracketCnt--; - (*payload)++; - if (leftBracketCnt == 0) { - return 0; - } else if (leftBracketCnt < 0) { - return -1; - } - continue; - } - (*payload)++; - } - return -1; -} - -int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset) { - int index = 0; - while (*(*start)) { - if ((*start)[0] != '"') { - (*start)++; - continue; - } - - if (unlikely(index >= OTD_JSON_FIELDS_NUM)) { - uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start); - return TSDB_CODE_TSC_INVALID_JSON; - } - - char *sTmp = *start; - if ((*start)[1] == 'm' && (*start)[2] == 'e' && (*start)[3] == 't' && (*start)[4] == 'r' && (*start)[5] == 'i' && - (*start)[6] == 'c' && (*start)[7] == '"') { - (*start) += 8; - bool isInQuote = false; - while (*(*start)) { - if (unlikely(!isInQuote && *(*start) == '"')) { - (*start)++; - offset[index++] = *start - sTmp; - element->measure = (*start); - isInQuote = true; - continue; - } - if (unlikely(isInQuote && *(*start) == '"')) { - element->measureLen = (*start) - element->measure; - (*start)++; - break; - } - (*start)++; - } - } else if ((*start)[1] == 't' && (*start)[2] == 'i' && (*start)[3] == 'm' && (*start)[4] == 'e' && - (*start)[5] == 's' && (*start)[6] == 't' && (*start)[7] == 'a' && (*start)[8] == 'm' && - (*start)[9] == 'p' && (*start)[10] == '"') { - (*start) += 11; - bool hasColon = false; - while (*(*start)) { - if (unlikely(!hasColon && *(*start) == ':')) { - (*start)++; - JUMP_JSON_SPACE((*start)) - offset[index++] = *start - sTmp; - element->timestamp = (*start); - if (*(*start) == '{') { - char *tmp = *start; - int32_t code = smlJsonGetObj(&tmp); - if (code == 0) { - element->timestampLen = tmp - (*start); - *start = tmp; - } - break; - } - hasColon = true; - continue; - } - if (unlikely(hasColon && (*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32))) { - element->timestampLen = (*start) - element->timestamp; - break; - } - (*start)++; - } - } else if ((*start)[1] == 'v' && (*start)[2] == 'a' && (*start)[3] == 'l' && (*start)[4] == 'u' && - (*start)[5] == 'e' && (*start)[6] == '"') { - (*start) += 7; - - bool hasColon = false; - while (*(*start)) { - if (unlikely(!hasColon && *(*start) == ':')) { - (*start)++; - JUMP_JSON_SPACE((*start)) - offset[index++] = *start - sTmp; - element->cols = (*start); - if (*(*start) == '{') { - char *tmp = *start; - int32_t code = smlJsonGetObj(&tmp); - if (code == 0) { - element->colsLen = tmp - (*start); - *start = tmp; - } - break; - } - hasColon = true; - continue; - } - if (unlikely(hasColon && (*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32))) { - element->colsLen = (*start) - element->cols; - break; - } - (*start)++; - } - } else if ((*start)[1] == 't' && (*start)[2] == 'a' && (*start)[3] == 'g' && (*start)[4] == 's' && - (*start)[5] == '"') { - (*start) += 6; - - while (*(*start)) { - if (unlikely(*(*start) == ':')) { - (*start)++; - JUMP_JSON_SPACE((*start)) - offset[index++] = *start - sTmp; - element->tags = (*start); - char *tmp = *start; - int32_t code = smlJsonGetObj(&tmp); - if (code == 0) { - element->tagsLen = tmp - (*start); - *start = tmp; - } - break; - } - (*start)++; - } - } - if (*(*start) == '\0') { - break; - } - if (*(*start) == '}') { - (*start)++; - break; - } - (*start)++; - } - - if (unlikely(index != OTD_JSON_FIELDS_NUM) || element->tags == NULL || element->cols == NULL || - element->measure == NULL || element->timestamp == NULL) { - uError("elements != %d or element parse null", OTD_JSON_FIELDS_NUM); - return TSDB_CODE_TSC_INVALID_JSON; - } - return 0; -} - -int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset) { - int index = 0; - while (*(*start)) { - if ((*start)[0] != '"') { - (*start)++; - continue; - } - - if (unlikely(index >= OTD_JSON_FIELDS_NUM)) { - uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start); - return TSDB_CODE_TSC_INVALID_JSON; - } - - if ((*start)[1] == 'm') { - (*start) += offset[index++]; - element->measure = *start; - while (*(*start)) { - if (unlikely(*(*start) == '"')) { - element->measureLen = (*start) - element->measure; - (*start)++; - break; - } - (*start)++; - } - } else if ((*start)[1] == 't' && (*start)[2] == 'i') { - (*start) += offset[index++]; - element->timestamp = *start; - if (*(*start) == '{') { - char *tmp = *start; - int32_t code = smlJsonGetObj(&tmp); - if (code == 0) { - element->timestampLen = tmp - (*start); - *start = tmp; - } - } else { - while (*(*start)) { - if (unlikely(*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)) { - element->timestampLen = (*start) - element->timestamp; - break; - } - (*start)++; - } - } - } else if ((*start)[1] == 'v') { - (*start) += offset[index++]; - element->cols = *start; - if (*(*start) == '{') { - char *tmp = *start; - int32_t code = smlJsonGetObj(&tmp); - if (code == 0) { - element->colsLen = tmp - (*start); - *start = tmp; - } - } else { - while (*(*start)) { - if (unlikely(*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)) { - element->colsLen = (*start) - element->cols; - break; - } - (*start)++; - } - } - } else if ((*start)[1] == 't' && (*start)[2] == 'a') { - (*start) += offset[index++]; - element->tags = (*start); - char *tmp = *start; - int32_t code = smlJsonGetObj(&tmp); - if (code == 0) { - element->tagsLen = tmp - (*start); - *start = tmp; - } - } - if (*(*start) == '}') { - (*start)++; - break; - } - (*start)++; - } - - if (unlikely(index != 0 && index != OTD_JSON_FIELDS_NUM)) { - uError("elements != %d", OTD_JSON_FIELDS_NUM); - return TSDB_CODE_TSC_INVALID_JSON; - } - return TSDB_CODE_SUCCESS; -} - static inline int32_t smlParseMetricFromJSON(SSmlHandle *info, cJSON *metric, SSmlLineInfo *elements) { elements->measureLen = strlen(metric->valuestring); if (IS_INVALID_TABLE_LEN(elements->measureLen)) { - uError("OTD:0x%" PRIx64 " Metric length is 0 or large than 192", info->id); + uError("SML:0x%" PRIx64 " Metric length is 0 or large than 192", info->id); return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; } @@ -293,7 +44,7 @@ static int32_t smlGetJsonElements(cJSON *root, cJSON ***marks) { child = child->next; } if (*marks[i] == NULL) { - uError("smlGetJsonElements error, not find mark:%d:%s", i, jsonName[i]); + uError("SML %s error, not find mark:%d:%s", __FUNCTION__, i, jsonName[i]); return TSDB_CODE_TSC_INVALID_JSON; } } @@ -302,7 +53,7 @@ static int32_t smlGetJsonElements(cJSON *root, cJSON ***marks) { static int32_t smlConvertJSONBool(SSmlKv *pVal, char *typeStr, cJSON *value) { if (strcasecmp(typeStr, "bool") != 0) { - uError("OTD:invalid type(%s) for JSON Bool", typeStr); + uError("SML:invalid type(%s) for JSON Bool", typeStr); return TSDB_CODE_TSC_INVALID_JSON_TYPE; } pVal->type = TSDB_DATA_TYPE_BOOL; @@ -316,7 +67,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) { // tinyint if (strcasecmp(typeStr, "i8") == 0 || strcasecmp(typeStr, "tinyint") == 0) { if (!IS_VALID_TINYINT(value->valuedouble)) { - uError("OTD:JSON value(%f) cannot fit in type(tinyint)", value->valuedouble); + uError("SML:JSON value(%f) cannot fit in type(tinyint)", value->valuedouble); return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; } pVal->type = TSDB_DATA_TYPE_TINYINT; @@ -327,7 +78,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) { // smallint if (strcasecmp(typeStr, "i16") == 0 || strcasecmp(typeStr, "smallint") == 0) { if (!IS_VALID_SMALLINT(value->valuedouble)) { - uError("OTD:JSON value(%f) cannot fit in type(smallint)", value->valuedouble); + uError("SML:JSON value(%f) cannot fit in type(smallint)", value->valuedouble); return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; } pVal->type = TSDB_DATA_TYPE_SMALLINT; @@ -338,7 +89,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) { // int if (strcasecmp(typeStr, "i32") == 0 || strcasecmp(typeStr, "int") == 0) { if (!IS_VALID_INT(value->valuedouble)) { - uError("OTD:JSON value(%f) cannot fit in type(int)", value->valuedouble); + uError("SML:JSON value(%f) cannot fit in type(int)", value->valuedouble); return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; } pVal->type = TSDB_DATA_TYPE_INT; @@ -362,7 +113,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) { // float if (strcasecmp(typeStr, "f32") == 0 || strcasecmp(typeStr, "float") == 0) { if (!IS_VALID_FLOAT(value->valuedouble)) { - uError("OTD:JSON value(%f) cannot fit in type(float)", value->valuedouble); + uError("SML:JSON value(%f) cannot fit in type(float)", value->valuedouble); return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE; } pVal->type = TSDB_DATA_TYPE_FLOAT; @@ -379,7 +130,7 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) { } // if reach here means type is unsupported - uError("OTD:invalid type(%s) for JSON Number", typeStr); + uError("SML:invalid type(%s) for JSON Number", typeStr); return TSDB_CODE_TSC_INVALID_JSON_TYPE; } @@ -391,7 +142,7 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) { } else if (strcasecmp(typeStr, "nchar") == 0) { pVal->type = TSDB_DATA_TYPE_NCHAR; } else { - uError("OTD:invalid type(%s) for JSON String", typeStr); + uError("SML:invalid type(%s) for JSON String", typeStr); return TSDB_CODE_TSC_INVALID_JSON_TYPE; } pVal->length = strlen(value->valuestring); @@ -474,7 +225,7 @@ static int32_t smlParseValueFromJSON(cJSON *root, SSmlKv *kv) { case cJSON_String: { int32_t ret = smlConvertJSONString(kv, "binary", root); if (ret != TSDB_CODE_SUCCESS) { - uError("OTD:Failed to parse binary value from JSON Obj"); + uError("SML:Failed to parse binary value from JSON Obj"); return ret; } break; @@ -482,7 +233,7 @@ static int32_t smlParseValueFromJSON(cJSON *root, SSmlKv *kv) { case cJSON_Object: { int32_t ret = smlParseValueFromJSONObj(root, kv); if (ret != TSDB_CODE_SUCCESS) { - uError("OTD:Failed to parse value from JSON Obj"); + uError("SML:Failed to parse value from JSON Obj"); return ret; } break; @@ -511,7 +262,7 @@ static int32_t smlProcessTagJson(SSmlHandle *info, cJSON *tags){ } size_t keyLen = strlen(tag->string); if (unlikely(IS_INVALID_COL_LEN(keyLen))) { - uError("OTD:Tag key length is 0 or too large than 64"); + uError("SML:Tag key length is 0 or too large than 64"); return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH; } @@ -539,28 +290,24 @@ static int32_t smlProcessTagJson(SSmlHandle *info, cJSON *tags){ } static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo *elements) { - int32_t ret = 0; - if(info->dataFormat){ - ret = smlProcessSuperTable(info, elements); - if(ret != 0){ - if(info->reRun){ - return TSDB_CODE_SUCCESS; - } - return ret; - } - } - ret = smlProcessTagJson(info, tags); - if(ret != 0){ - if(info->reRun){ - return TSDB_CODE_SUCCESS; - } - return ret; + if (is_same_child_table_telnet(elements, &info->preLine) == 0) { + elements->measureTag = info->preLine.measureTag; + return TSDB_CODE_SUCCESS; } - ret = smlJoinMeasureTag(elements); - if(ret != 0){ - return ret; + int32_t code = 0; + int32_t lino = 0; + if(info->dataFormat){ + SML_CHECK_CODE(smlProcessSuperTable(info, elements)); } + SML_CHECK_CODE(smlProcessTagJson(info, tags)); + SML_CHECK_CODE(smlJoinMeasureTag(elements)); return smlProcessChildTable(info, elements); + +END: + if(info->reRun){ + return TSDB_CODE_SUCCESS; + } + RETURN } static int64_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int32_t toPrecision) { @@ -678,7 +425,8 @@ static int64_t smlParseTSFromJSON(SSmlHandle *info, cJSON *timestamp) { } static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo *elements) { - int32_t ret = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; cJSON *metricJson = NULL; cJSON *tsJson = NULL; @@ -688,57 +436,27 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo int32_t size = cJSON_GetArraySize(root); // outmost json fields has to be exactly 4 if (size != OTD_JSON_FIELDS_NUM) { - uError("OTD:0x%" PRIx64 " Invalid number of JSON fields in data point %d", info->id, size); + uError("SML:0x%" PRIx64 " Invalid number of JSON fields in data point %d", info->id, size); return TSDB_CODE_TSC_INVALID_JSON; } cJSON **marks[OTD_JSON_FIELDS_NUM] = {&metricJson, &tsJson, &valueJson, &tagsJson}; - ret = smlGetJsonElements(root, marks); - if (unlikely(ret != TSDB_CODE_SUCCESS)) { - return ret; - } - + SML_CHECK_CODE(smlGetJsonElements(root, marks)); // Parse metric - ret = smlParseMetricFromJSON(info, metricJson, elements); - if (unlikely(ret != TSDB_CODE_SUCCESS)) { - uError("OTD:0x%" PRIx64 " Unable to parse metric from JSON payload", info->id); - return ret; - } - + SML_CHECK_CODE(smlParseMetricFromJSON(info, metricJson, elements)); // Parse metric value SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN}; - ret = smlParseValueFromJSON(valueJson, &kv); - if (unlikely(ret)) { - uError("OTD:0x%" PRIx64 " Unable to parse metric value from JSON payload", info->id); - return ret; - } + SML_CHECK_CODE(smlParseValueFromJSON(valueJson, &kv)); // Parse tags - bool needFree = info->dataFormat; elements->tags = cJSON_PrintUnformatted(tagsJson); - if (elements->tags == NULL){ - return TSDB_CODE_OUT_OF_MEMORY; - } - elements->tagsLen = strlen(elements->tags); - if (is_same_child_table_telnet(elements, &info->preLine) != 0) { - ret = smlParseTagsFromJSON(info, tagsJson, elements); - if (unlikely(ret)) { - uError("OTD:0x%" PRIx64 " Unable to parse tags from JSON payload", info->id); - taosMemoryFree(elements->tags); - elements->tags = NULL; - return ret; - } - } else { - elements->measureTag = info->preLine.measureTag; - } + SML_CHECK_NULL(elements->tags); - if (needFree) { - taosMemoryFree(elements->tags); - elements->tags = NULL; - } + elements->tagsLen = strlen(elements->tags); + SML_CHECK_CODE(smlParseTagsFromJSON(info, tagsJson, elements)); if (unlikely(info->reRun)) { - return TSDB_CODE_SUCCESS; + goto END; } // Parse timestamp @@ -747,29 +465,34 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo if (unlikely(ts < 0)) { char* tmp = cJSON_PrintUnformatted(tsJson); if (tmp == NULL) { - uError("cJSON_PrintUnformatted failed since %s", tstrerror(TSDB_CODE_OUT_OF_MEMORY)); - uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload %s %" PRId64, info->id, info->msgBuf.buf, ts); + uError("SML:0x%" PRIx64 " Unable to parse timestamp from JSON payload %s %" PRId64, info->id, info->msgBuf.buf, ts); } else { - uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload %s %s %" PRId64, info->id, info->msgBuf.buf,tmp, ts); + uError("SML:0x%" PRIx64 " Unable to parse timestamp from JSON payload %s %s %" PRId64, info->id, info->msgBuf.buf,tmp, ts); taosMemoryFree(tmp); } - return TSDB_CODE_INVALID_TIMESTAMP; + SML_CHECK_CODE(TSDB_CODE_INVALID_TIMESTAMP); } SSmlKv kvTs = {0}; smlBuildTsKv(&kvTs, ts); + if (info->dataFormat){ + code = smlParseEndTelnetJsonFormat(info, elements, &kvTs, &kv); + } else { + code = smlParseEndTelnetJsonUnFormat(info, elements, &kvTs, &kv); + } + SML_CHECK_CODE(code); + taosMemoryFreeClear(info->preLine.tags); + info->preLine = *elements; + elements->tags = NULL; - return smlParseEndTelnetJson(info, elements, &kvTs, &kv); +END: + taosMemoryFree(elements->tags); + RETURN } -static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) { +int32_t smlParseJSONExt(SSmlHandle *info, char *payload) { int32_t payloadNum = 0; int32_t ret = TSDB_CODE_SUCCESS; - if (unlikely(payload == NULL)) { - uError("SML:0x%" PRIx64 " empty JSON Payload", info->id); - return TSDB_CODE_TSC_INVALID_JSON; - } - info->root = cJSON_Parse(payload); if (unlikely(info->root == NULL)) { uError("SML:0x%" PRIx64 " parse json failed:%s", info->id, payload); @@ -782,27 +505,11 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) { } else if (cJSON_IsObject(info->root)) { payloadNum = 1; } else { - uError("SML:0x%" PRIx64 " Invalid JSON Payload 3:%s", info->id, payload); + uError("SML:0x%" PRIx64 " Invalid JSON type:%s", info->id, payload); return TSDB_CODE_TSC_INVALID_JSON; } - if (unlikely(info->lines != NULL)) { - for (int i = 0; i < info->lineNum; i++) { - taosArrayDestroyEx(info->lines[i].colArray, freeSSmlKv); - if (info->lines[i].measureTagsLen != 0) taosMemoryFree(info->lines[i].measureTag); - } - taosMemoryFree(info->lines); - info->lines = NULL; - } info->lineNum = payloadNum; - info->dataFormat = true; - - ret = smlClearForRerun(info); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - - info->parseJsonByLib = true; cJSON *head = (payloadNum == 1 && cJSON_IsObject(info->root)) ? info->root : info->root->child; int cnt = 0; @@ -811,6 +518,7 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) { if (info->dataFormat) { SSmlLineInfo element = {0}; ret = smlParseJSONStringExt(info, dataPoint, &element); + if (element.measureTagsLen != 0) taosMemoryFree(element.measureTag); } else { ret = smlParseJSONStringExt(info, dataPoint, info->lines + cnt); } @@ -836,164 +544,3 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) { return TSDB_CODE_SUCCESS; } -static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo *elements) { - int32_t ret = TSDB_CODE_SUCCESS; - - if (info->offset[0] == 0) { - ret = smlJsonParseObjFirst(start, elements, info->offset); - } else { - ret = smlJsonParseObj(start, elements, info->offset); - } - - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - - if (unlikely(**start == '\0' && elements->measure == NULL)) return TSDB_CODE_SUCCESS; - - if (unlikely(IS_INVALID_TABLE_LEN(elements->measureLen))) { - smlBuildInvalidDataMsg(&info->msgBuf, "measure is empty or too large than 192", NULL); - return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; - } - - SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN, .value = elements->cols, .length = (size_t)elements->colsLen}; - - if (unlikely(elements->colsLen == 0)) { - uError("SML:colsLen == 0"); - return TSDB_CODE_TSC_INVALID_VALUE; - } else if (unlikely(elements->cols[0] == '{')) { - char tmp = elements->cols[elements->colsLen]; - elements->cols[elements->colsLen] = '\0'; - cJSON *valueJson = cJSON_Parse(elements->cols); - if (unlikely(valueJson == NULL)) { - uError("SML:0x%" PRIx64 " parse json cols failed:%s", info->id, elements->cols); - elements->cols[elements->colsLen] = tmp; - return TSDB_CODE_TSC_INVALID_JSON; - } - if (taosArrayPush(info->tagJsonArray, &valueJson) == NULL){ - cJSON_Delete(valueJson); - elements->cols[elements->colsLen] = tmp; - return terrno; - } - ret = smlParseValueFromJSONObj(valueJson, &kv); - if (ret != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " Failed to parse value from JSON Obj:%s", info->id, elements->cols); - elements->cols[elements->colsLen] = tmp; - return TSDB_CODE_TSC_INVALID_VALUE; - } - elements->cols[elements->colsLen] = tmp; - } else if (smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS) { - uError("SML:0x%" PRIx64 " cols invalidate:%s", info->id, elements->cols); - return TSDB_CODE_TSC_INVALID_VALUE; - } - - // Parse tags - if (is_same_child_table_telnet(elements, &info->preLine) != 0) { - char tmp = *(elements->tags + elements->tagsLen); - *(elements->tags + elements->tagsLen) = 0; - cJSON *tagsJson = cJSON_Parse(elements->tags); - *(elements->tags + elements->tagsLen) = tmp; - if (unlikely(tagsJson == NULL)) { - uError("SML:0x%" PRIx64 " parse json tag failed:%s", info->id, elements->tags); - return TSDB_CODE_TSC_INVALID_JSON; - } - - if (taosArrayPush(info->tagJsonArray, &tagsJson) == NULL){ - cJSON_Delete(tagsJson); - uError("SML:0x%" PRIx64 " taosArrayPush failed", info->id); - return terrno; - } - ret = smlParseTagsFromJSON(info, tagsJson, elements); - if (unlikely(ret)) { - uError("OTD:0x%" PRIx64 " Unable to parse tags from JSON payload", info->id); - return ret; - } - } else { - elements->measureTag = info->preLine.measureTag; - } - - if (unlikely(info->reRun)) { - return TSDB_CODE_SUCCESS; - } - - // Parse timestamp - // notice!!! put ts back to tag to ensure get meta->precision - int64_t ts = 0; - if (unlikely(elements->timestampLen == 0)) { - uError("OTD:0x%" PRIx64 " elements->timestampLen == 0", info->id); - return TSDB_CODE_INVALID_TIMESTAMP; - } else if (elements->timestamp[0] == '{') { - char tmp = elements->timestamp[elements->timestampLen]; - elements->timestamp[elements->timestampLen] = '\0'; - cJSON *tsJson = cJSON_Parse(elements->timestamp); - ts = smlParseTSFromJSON(info, tsJson); - if (unlikely(ts < 0)) { - uError("SML:0x%" PRIx64 " Unable to parse timestamp from JSON payload:%s", info->id, elements->timestamp); - elements->timestamp[elements->timestampLen] = tmp; - cJSON_Delete(tsJson); - return TSDB_CODE_INVALID_TIMESTAMP; - } - elements->timestamp[elements->timestampLen] = tmp; - cJSON_Delete(tsJson); - } else { - ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen); - if (unlikely(ts < 0)) { - uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload", info->id); - return TSDB_CODE_INVALID_TIMESTAMP; - } - } - SSmlKv kvTs = {0}; - smlBuildTsKv(&kvTs, ts); - - return smlParseEndTelnetJson(info, elements, &kvTs, &kv); -} - -int32_t smlParseJSON(SSmlHandle *info, char *payload) { - int32_t payloadNum = 1 << 15; - int32_t ret = TSDB_CODE_SUCCESS; - - uDebug("SML:0x%" PRIx64 "json:%s", info->id, payload); - int cnt = 0; - char *dataPointStart = payload; - while (1) { - if (info->dataFormat) { - SSmlLineInfo element = {0}; - ret = smlParseJSONString(info, &dataPointStart, &element); - if (element.measureTagsLen != 0) taosMemoryFree(element.measureTag); - } else { - if (cnt >= payloadNum) { - payloadNum = payloadNum << 1; - void *tmp = taosMemoryRealloc(info->lines, payloadNum * sizeof(SSmlLineInfo)); - if (tmp == NULL) { - ret = terrno; - return ret; - } - info->lines = (SSmlLineInfo *)tmp; - (void)memset(info->lines + cnt, 0, (payloadNum - cnt) * sizeof(SSmlLineInfo)); - } - ret = smlParseJSONString(info, &dataPointStart, info->lines + cnt); - if ((info->lines + cnt)->measure == NULL) break; - } - if (unlikely(ret != TSDB_CODE_SUCCESS)) { - uError("SML:0x%" PRIx64 " Invalid JSON Payload 1:%s", info->id, payload); - return smlParseJSONExt(info, payload); - } - - if (unlikely(info->reRun)) { - cnt = 0; - dataPointStart = payload; - info->lineNum = payloadNum; - ret = smlClearForRerun(info); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - continue; - } - - cnt++; - if (*dataPointStart == '\0') break; - } - info->lineNum = cnt; - - return TSDB_CODE_SUCCESS; -} diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index c1f3431698d..b54f4e0beb4 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -63,7 +63,7 @@ static int64_t smlParseInfluxTime(SSmlHandle *info, const char *data, int32_t le int64_t ts = smlGetTimeValue(data, len, fromPrecision, toPrecision); if (unlikely(ts == -1)) { - smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", data); + smlBuildInvalidDataMsg(&info->msgBuf, "SML line invalid timestamp", data); return TSDB_CODE_SML_INVALID_DATA; } return ts; @@ -84,7 +84,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { } if (pVal->value[0] == 'l' || pVal->value[0] == 'L') { // nchar - if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3) { + if (pVal->length >= NCHAR_ADD_LEN && pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"') { pVal->type = TSDB_DATA_TYPE_NCHAR; pVal->length -= NCHAR_ADD_LEN; if (pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { @@ -97,7 +97,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { } if (pVal->value[0] == 'g' || pVal->value[0] == 'G') { // geometry - if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= sizeof("POINT")+3) { + if (pVal->length >= NCHAR_ADD_LEN && pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"') { int32_t code = initCtxGeomFromText(); if (code != TSDB_CODE_SUCCESS) { return code; @@ -124,7 +124,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { } if (pVal->value[0] == 'b' || pVal->value[0] == 'B') { // varbinary - if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3) { + if (pVal->length >= NCHAR_ADD_LEN && pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"') { pVal->type = TSDB_DATA_TYPE_VARBINARY; if(isHex(pVal->value + NCHAR_ADD_LEN - 1, pVal->length - NCHAR_ADD_LEN)){ if(!isValidateHex(pVal->value + NCHAR_ADD_LEN - 1, pVal->length - NCHAR_ADD_LEN)){ @@ -298,7 +298,7 @@ static int32_t smlProcessTagLine(SSmlHandle *info, char **sql, char *sqlEnd){ } if (info->dataFormat && !isSmlTagAligned(info, cnt, &kv)) { - return TSDB_CODE_TSC_INVALID_JSON; + return TSDB_CODE_SML_INVALID_DATA; } cnt++; @@ -311,31 +311,24 @@ static int32_t smlProcessTagLine(SSmlHandle *info, char **sql, char *sqlEnd){ } static int32_t smlParseTagLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *elements) { + int32_t code = 0; + int32_t lino = 0; bool isSameCTable = IS_SAME_CHILD_TABLE; if(isSameCTable){ return TSDB_CODE_SUCCESS; } - int32_t ret = 0; if(info->dataFormat){ - ret = smlProcessSuperTable(info, elements); - if(ret != 0){ - if(info->reRun){ - return TSDB_CODE_SUCCESS; - } - return ret; - } + SML_CHECK_CODE(smlProcessSuperTable(info, elements)); } + SML_CHECK_CODE(smlProcessTagLine(info, sql, sqlEnd)); + return smlProcessChildTable(info, elements); - ret = smlProcessTagLine(info, sql, sqlEnd); - if(ret != 0){ - if (info->reRun){ - return TSDB_CODE_SUCCESS; - } - return ret; +END: + if(info->reRun){ + return TSDB_CODE_SUCCESS; } - - return smlProcessChildTable(info, elements); + RETURN } static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *currElement) { @@ -353,7 +346,7 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL const char *escapeChar = NULL; while (*sql < sqlEnd) { if (unlikely(IS_SPACE(*sql,escapeChar) || IS_COMMA(*sql,escapeChar))) { - smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql); + smlBuildInvalidDataMsg(&info->msgBuf, "SML line invalid data", *sql); return TSDB_CODE_SML_INVALID_DATA; } if (unlikely(IS_EQUAL(*sql,escapeChar))) { @@ -370,7 +363,7 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL } if (unlikely(IS_INVALID_COL_LEN(keyLen - keyLenEscaped))) { - smlBuildInvalidDataMsg(&info->msgBuf, "invalid key or key is too long than 64", key); + smlBuildInvalidDataMsg(&info->msgBuf, "SML line invalid key or key is too long than 64", key); return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH; } @@ -404,18 +397,18 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL valueLen = *sql - value; if (unlikely(quoteNum != 0 && quoteNum != 2)) { - smlBuildInvalidDataMsg(&info->msgBuf, "unbalanced quotes", value); + smlBuildInvalidDataMsg(&info->msgBuf, "SML line unbalanced quotes", value); return TSDB_CODE_SML_INVALID_DATA; } if (unlikely(valueLen == 0)) { - smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", value); + smlBuildInvalidDataMsg(&info->msgBuf, "SML line invalid value", value); return TSDB_CODE_SML_INVALID_DATA; } SSmlKv kv = {.key = key, .keyLen = keyLen, .value = value, .length = valueLen}; int32_t ret = smlParseValue(&kv, &info->msgBuf); if (ret != TSDB_CODE_SUCCESS) { - smlBuildInvalidDataMsg(&info->msgBuf, "smlParseValue error", value); + uError("SML:0x%" PRIx64 " %s parse value error:%d.", info->id, __FUNCTION__, ret); return ret; } @@ -437,11 +430,6 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL } (void)memcpy(tmp, kv.value, kv.length); PROCESS_SLASH_IN_FIELD_VALUE(tmp, kv.length); - if(kv.type == TSDB_DATA_TYPE_GEOMETRY) { - uError("SML:0x%" PRIx64 " smlParseColLine error, invalid GEOMETRY type.", info->id); - taosMemoryFree((void*)kv.value); - return TSDB_CODE_TSC_INVALID_VALUE; - } if(kv.type == TSDB_DATA_TYPE_VARBINARY){ taosMemoryFree((void*)kv.value); } @@ -510,7 +498,7 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine } elements->measureLen = sql - elements->measure; if (unlikely(IS_INVALID_TABLE_LEN(elements->measureLen - measureLenEscaped))) { - smlBuildInvalidDataMsg(&info->msgBuf, "measure is empty or too large than 192", NULL); + smlBuildInvalidDataMsg(&info->msgBuf, "SML line measure is empty or too large than 192", NULL); return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; } @@ -557,7 +545,7 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine elements->colsLen = sql - elements->cols; if (unlikely(elements->colsLen == 0)) { - smlBuildInvalidDataMsg(&info->msgBuf, "cols is empty", NULL); + smlBuildInvalidDataMsg(&info->msgBuf, "SML line cols is empty", NULL); return TSDB_CODE_SML_INVALID_DATA; } @@ -574,7 +562,7 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine int64_t ts = smlParseInfluxTime(info, elements->timestamp, elements->timestampLen); if (unlikely(ts <= 0)) { - uError("SML:0x%" PRIx64 " smlParseTS error:%" PRId64, info->id, ts); + uError("SML:0x%" PRIx64 " %s error:%" PRId64, info->id, __FUNCTION__, ts); return TSDB_CODE_INVALID_TIMESTAMP; } diff --git a/source/client/src/clientSmlTelnet.c b/source/client/src/clientSmlTelnet.c index e8601e33bc8..dd264da11e0 100644 --- a/source/client/src/clientSmlTelnet.c +++ b/source/client/src/clientSmlTelnet.c @@ -148,31 +148,21 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS return TSDB_CODE_SUCCESS; } - int32_t ret = 0; + int32_t code = 0; + int32_t lino = 0; if(info->dataFormat){ - ret = smlProcessSuperTable(info, elements); - if(ret != 0){ - if(info->reRun){ - return TSDB_CODE_SUCCESS; - } - return ret; - } + SML_CHECK_CODE(smlProcessSuperTable(info, elements)); } + SML_CHECK_CODE(smlProcessTagTelnet(info, data, sqlEnd)); + SML_CHECK_CODE(smlJoinMeasureTag(elements)); - ret = smlProcessTagTelnet(info, data, sqlEnd); - if(ret != 0){ - if (info->reRun){ - return TSDB_CODE_SUCCESS; - } - return ret; - } + code = smlProcessChildTable(info, elements); - ret = smlJoinMeasureTag(elements); - if(ret != 0){ - return ret; +END: + if(info->reRun){ + return TSDB_CODE_SUCCESS; } - - return smlProcessChildTable(info, elements); + RETURN } // format: =[ =] @@ -182,14 +172,14 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine // parse metric smlParseTelnetElement(&sql, sqlEnd, &elements->measure, &elements->measureLen); if (unlikely((!(elements->measure) || IS_INVALID_TABLE_LEN(elements->measureLen)))) { - smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", sql); + smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet invalid measure", sql); return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; } // parse timestamp smlParseTelnetElement(&sql, sqlEnd, &elements->timestamp, &elements->timestampLen); if (unlikely(!elements->timestamp || elements->timestampLen == 0)) { - smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql); + smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet invalid timestamp", sql); return TSDB_CODE_SML_INVALID_DATA; } @@ -199,19 +189,21 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine } int64_t ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen); if (unlikely(ts < 0)) { - smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql); + smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet parse timestamp failed", sql); return TSDB_CODE_INVALID_TIMESTAMP; } // parse value smlParseTelnetElement(&sql, sqlEnd, &elements->cols, &elements->colsLen); if (unlikely(!elements->cols || elements->colsLen == 0)) { - smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", sql); + smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet invalid value", sql); return TSDB_CODE_TSC_INVALID_VALUE; } SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN, .value = elements->cols, .length = (size_t)elements->colsLen}; - if (smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS) { + int ret = smlParseValue(&kv, &info->msgBuf); + if (ret != TSDB_CODE_SUCCESS) { + uError("SML:0x%" PRIx64 " %s parse value error:%d.", info->id, __FUNCTION__, ret); return TSDB_CODE_TSC_INVALID_VALUE; } @@ -220,11 +212,11 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine elements->tags = sql; elements->tagsLen = sqlEnd - sql; if (unlikely(!elements->tags || elements->tagsLen == 0)) { - smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", sql); + smlBuildInvalidDataMsg(&info->msgBuf, "SML telnet invalid tag value", sql); return TSDB_CODE_TSC_INVALID_VALUE; } - int ret = smlParseTelnetTags(info, sql, sqlEnd, elements); + ret = smlParseTelnetTags(info, sql, sqlEnd, elements); if (unlikely(ret != TSDB_CODE_SUCCESS)) { return ret; } @@ -239,5 +231,12 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine kvTs.i = convertTimePrecision(kvTs.i, TSDB_TIME_PRECISION_NANO, info->currSTableMeta->tableInfo.precision); } - return smlParseEndTelnetJson(info, elements, &kvTs, &kv); + if (info->dataFormat){ + ret = smlParseEndTelnetJsonFormat(info, elements, &kvTs, &kv); + } else { + ret = smlParseEndTelnetJsonUnFormat(info, elements, &kvTs, &kv); + } + info->preLine = *elements; + + return ret; } \ No newline at end of file diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index 2f046b61d62..4bbfc6afaa7 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -1068,6 +1068,34 @@ static int stmtFetchColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E return TSDB_CODE_SUCCESS; } + +static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_STB** fields) { + if (pStmt->errCode != TSDB_CODE_SUCCESS) { + return pStmt->errCode; + } + + if (STMT_TYPE_QUERY == pStmt->sql.type) { + tscError("invalid operation to get query column fileds"); + STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR); + } + + STableDataCxt** pDataBlock = NULL; + + if (pStmt->sql.stbInterlaceMode) { + pDataBlock = &pStmt->sql.siInfo.pDataCtx; + } else { + pDataBlock = + (STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName)); + if (NULL == pDataBlock) { + tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName); + STMT_ERR_RET(TSDB_CODE_APP_ERROR); + } + } + + STMT_ERR_RET(qBuildStmtStbColFields(*pDataBlock, fieldNum, fields)); + + return TSDB_CODE_SUCCESS; +} /* SArray* stmtGetFreeCol(STscStmt2* pStmt, int32_t* idx) { while (true) { @@ -1808,7 +1836,7 @@ int stmtGetTagFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) { return code; } -int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) { +int stmtParseColFields2(TAOS_STMT2* stmt) { int32_t code = 0; STscStmt2* pStmt = (STscStmt2*)stmt; int32_t preCode = pStmt->errCode; @@ -1842,8 +1870,6 @@ int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) { STMT_ERRI_JRET(stmtParseSql(pStmt)); } - STMT_ERRI_JRET(stmtFetchColFields2(stmt, nums, fields)); - _return: pStmt->errCode = preCode; @@ -1851,6 +1877,24 @@ int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) { return code; } +int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) { + int32_t code = stmtParseColFields2(stmt); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + return stmtFetchColFields2(stmt, nums, fields); +} + +int stmtGetStbColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_STB** fields) { + int32_t code = stmtParseColFields2(stmt); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + return stmtFetchStbColFields2(stmt, nums, fields); +} + int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) { STscStmt2* pStmt = (STscStmt2*)stmt; diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 232c4a57751..bdca3882d9b 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -24,12 +24,9 @@ #include "tref.h" #include "ttimer.h" -#define tqFatalC(...) do { if (cDebugFlag & DEBUG_FATAL || tqClientDebug) { taosPrintLog("TQ FATAL ", DEBUG_FATAL, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebug) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqWarnC(...) do { if (cDebugFlag & DEBUG_WARN || tqClientDebug) { taosPrintLog("TQ WARN ", DEBUG_WARN, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebug) { taosPrintLog("TQ ", DEBUG_INFO, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebug) { taosPrintLog("TQ ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqTraceC(...) do { if (cDebugFlag & DEBUG_TRACE || tqClientDebug) { taosPrintLog("TQ ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0) +#define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebugFlag & DEBUG_ERROR) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0) +#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebugFlag & DEBUG_INFO) { taosPrintLog("TQ ", DEBUG_INFO, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0) +#define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebugFlag & DEBUG_DEBUG) { taosPrintLog("TQ ", DEBUG_DEBUG, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0) #define EMPTY_BLOCK_POLL_IDLE_DURATION 10 #define DEFAULT_AUTO_COMMIT_INTERVAL 5000 @@ -831,8 +828,8 @@ static int32_t innerCommitAll(tmq_t* tmq, SMqCommitCbParamSet* pParamSet){ } code = innerCommit(tmq, pTopic->topicName, &pVg->offsetInfo.endOffset, pVg, pParamSet); - if (code != 0){ - tqDebugC("consumer:0x%" PRIx64 " topic:%s vgId:%d, no commit, code:%s, current offset version:%" PRId64 ", ordinal:%d/%d", + if (code != 0 && code != TSDB_CODE_TMQ_SAME_COMMITTED_VALUE){ + tqErrorC("consumer:0x%" PRIx64 " topic:%s vgId:%d, no commit, code:%s, current offset version:%" PRId64 ", ordinal:%d/%d", tmq->consumerId, pTopic->topicName, pVg->vgId, tstrerror(code), pVg->offsetInfo.endOffset.version, j + 1, numOfVgroups); } } @@ -857,7 +854,7 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us return; } code = innerCommitAll(tmq, pParamSet); - if (code != 0){ + if (code != 0 && code != TSDB_CODE_TMQ_SAME_COMMITTED_VALUE){ tqErrorC("consumer:0x%" PRIx64 " innerCommitAll failed, code:%s", tmq->consumerId, tstrerror(code)); } @@ -957,7 +954,8 @@ int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { } } - tqClientDebug = rsp.debugFlag; + tqClientDebugFlag = rsp.debugFlag; + tDestroySMqHbRsp(&rsp); END: @@ -978,6 +976,7 @@ void tmqSendHbReq(void* param, void* tmrId) { req.consumerId = tmq->consumerId; req.epoch = tmq->epoch; req.pollFlag = atomic_load_8(&tmq->pollFlag); + tqDebugC("consumer:0x%" PRIx64 " send heartbeat, pollFlag:%d", tmq->consumerId, req.pollFlag); req.topics = taosArrayInit(taosArrayGetSize(tmq->clientTopics), sizeof(TopicOffsetRows)); if (req.topics == NULL) { goto END; @@ -1063,7 +1062,7 @@ void tmqSendHbReq(void* param, void* tmrId) { tDestroySMqHbReq(&req); if (tmrId != NULL) { bool ret = taosTmrReset(tmqSendHbReq, tmq->heartBeatIntervalMs, param, tmqMgmt.timer, &tmq->hbLiveTimer); - tqDebugC("reset timer fo tmq hb:%d", ret); + tqDebugC("consumer:0x%" PRIx64 " reset timer for tmq heartbeat:%d, pollFlag:%d", tmq->consumerId, ret, tmq->pollFlag); } int32_t ret = taosReleaseRef(tmqMgmt.rsetId, refId); if (ret != 0){ @@ -1269,7 +1268,9 @@ static int32_t askEpCb(void* param, SDataBuf* pMsg, int32_t code) { } if (code != TSDB_CODE_SUCCESS) { - tqErrorC("consumer:0x%" PRIx64 ", get topic endpoint error, code:%s", tmq->consumerId, tstrerror(code)); + if (code != TSDB_CODE_MND_CONSUMER_NOT_READY){ + tqErrorC("consumer:0x%" PRIx64 ", get topic endpoint error, code:%s", tmq->consumerId, tstrerror(code)); + } goto END; } @@ -1422,7 +1423,7 @@ void tmqHandleAllDelayedTask(tmq_t* pTmq) { tqDebugC("consumer:0x%" PRIx64 " retrieve ep from mnode in 1s", pTmq->consumerId); bool ret = taosTmrReset(tmqAssignAskEpTask, DEFAULT_ASKEP_INTERVAL, (void*)(pTmq->refId), tmqMgmt.timer, &pTmq->epTimer); - tqDebugC("reset timer fo tmq ask ep:%d", ret); + tqDebugC("reset timer for tmq ask ep:%d", ret); } else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) { tmq_commit_cb* pCallbackFn = (pTmq->commitCb != NULL) ? pTmq->commitCb : defaultCommitCbFn; asyncCommitAllOffsets(pTmq, pCallbackFn, pTmq->commitCbUserParam); @@ -1430,7 +1431,7 @@ void tmqHandleAllDelayedTask(tmq_t* pTmq) { pTmq->autoCommitInterval / 1000.0); bool ret = taosTmrReset(tmqAssignDelayedCommitTask, pTmq->autoCommitInterval, (void*)(pTmq->refId), tmqMgmt.timer, &pTmq->commitTimer); - tqDebugC("reset timer fo commit:%d", ret); + tqDebugC("reset timer for commit:%d", ret); } else { tqErrorC("consumer:0x%" PRIx64 " invalid task type:%d", pTmq->consumerId, *pTaskType); } @@ -2869,8 +2870,7 @@ int32_t tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4, SReqResultInfo** pRes pRspObj->resInfo.precision = precision; pRspObj->resInfo.totalRows += pRspObj->resInfo.numOfRows; - int32_t code = setResultDataPtr(&pRspObj->resInfo, pRspObj->resInfo.fields, pRspObj->resInfo.numOfCols, - pRspObj->resInfo.numOfRows, convertUcs4); + int32_t code = setResultDataPtr(&pRspObj->resInfo, convertUcs4); if (code != 0) { return code; } diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp index dc6a302924f..338457bec45 100644 --- a/source/client/test/smlTest.cpp +++ b/source/client/test/smlTest.cpp @@ -68,6 +68,15 @@ TEST(testCase, smlParseInfluxString_Test) { taosArrayDestroy(elements.colArray); elements.colArray = nullptr; + // case 0 false + tmp = "st,t1=3 c3=\""; + (void)memcpy(sql, tmp, strlen(tmp) + 1); + (void)memset(&elements, 0, sizeof(SSmlLineInfo)); + ret = smlParseInfluxString(info, sql, sql + strlen(sql), &elements); + ASSERT_NE(ret, 0); + taosArrayDestroy(elements.colArray); + elements.colArray = nullptr; + // case 2 false tmp = "st,t1=3,t2=4,t3=t3 c1=3i64,c3=\"passit hello,c1=2,c2=false,c4=4f64 1626006833639000000"; (void)memcpy(sql, tmp, strlen(tmp) + 1); @@ -591,6 +600,104 @@ TEST(testCase, smlParseTelnetLine_Test) { // smlDestroyInfo(info); //} +bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg) { + const char *pVal = kvVal->value; + int32_t len = kvVal->length; + char *endptr = NULL; + double result = taosStr2Double(pVal, &endptr); + if (pVal == endptr) { + smlBuildInvalidDataMsg(msg, "invalid data", pVal); + return false; + } + + int32_t left = len - (endptr - pVal); + if (left == 0 || (left == 3 && strncasecmp(endptr, "f64", left) == 0)) { + kvVal->type = TSDB_DATA_TYPE_DOUBLE; + kvVal->d = result; + } else if ((left == 3 && strncasecmp(endptr, "f32", left) == 0)) { + if (!IS_VALID_FLOAT(result)) { + smlBuildInvalidDataMsg(msg, "float out of range[-3.402823466e+38,3.402823466e+38]", pVal); + return false; + } + kvVal->type = TSDB_DATA_TYPE_FLOAT; + kvVal->f = (float)result; + } else if ((left == 1 && *endptr == 'i') || (left == 3 && strncasecmp(endptr, "i64", left) == 0)) { + if (smlDoubleToInt64OverFlow(result)) { + errno = 0; + int64_t tmp = taosStr2Int64(pVal, &endptr, 10); + if (errno == ERANGE) { + smlBuildInvalidDataMsg(msg, "big int out of range[-9223372036854775808,9223372036854775807]", pVal); + return false; + } + kvVal->type = TSDB_DATA_TYPE_BIGINT; + kvVal->i = tmp; + return true; + } + kvVal->type = TSDB_DATA_TYPE_BIGINT; + kvVal->i = (int64_t)result; + } else if ((left == 1 && *endptr == 'u') || (left == 3 && strncasecmp(endptr, "u64", left) == 0)) { + if (result >= (double)UINT64_MAX || result < 0) { + errno = 0; + uint64_t tmp = taosStr2UInt64(pVal, &endptr, 10); + if (errno == ERANGE || result < 0) { + smlBuildInvalidDataMsg(msg, "unsigned big int out of range[0,18446744073709551615]", pVal); + return false; + } + kvVal->type = TSDB_DATA_TYPE_UBIGINT; + kvVal->u = tmp; + return true; + } + kvVal->type = TSDB_DATA_TYPE_UBIGINT; + kvVal->u = result; + } else if (left == 3 && strncasecmp(endptr, "i32", left) == 0) { + if (!IS_VALID_INT(result)) { + smlBuildInvalidDataMsg(msg, "int out of range[-2147483648,2147483647]", pVal); + return false; + } + kvVal->type = TSDB_DATA_TYPE_INT; + kvVal->i = result; + } else if (left == 3 && strncasecmp(endptr, "u32", left) == 0) { + if (!IS_VALID_UINT(result)) { + smlBuildInvalidDataMsg(msg, "unsigned int out of range[0,4294967295]", pVal); + return false; + } + kvVal->type = TSDB_DATA_TYPE_UINT; + kvVal->u = result; + } else if (left == 3 && strncasecmp(endptr, "i16", left) == 0) { + if (!IS_VALID_SMALLINT(result)) { + smlBuildInvalidDataMsg(msg, "small int our of range[-32768,32767]", pVal); + return false; + } + kvVal->type = TSDB_DATA_TYPE_SMALLINT; + kvVal->i = result; + } else if (left == 3 && strncasecmp(endptr, "u16", left) == 0) { + if (!IS_VALID_USMALLINT(result)) { + smlBuildInvalidDataMsg(msg, "unsigned small int out of rang[0,65535]", pVal); + return false; + } + kvVal->type = TSDB_DATA_TYPE_USMALLINT; + kvVal->u = result; + } else if (left == 2 && strncasecmp(endptr, "i8", left) == 0) { + if (!IS_VALID_TINYINT(result)) { + smlBuildInvalidDataMsg(msg, "tiny int out of range[-128,127]", pVal); + return false; + } + kvVal->type = TSDB_DATA_TYPE_TINYINT; + kvVal->i = result; + } else if (left == 2 && strncasecmp(endptr, "u8", left) == 0) { + if (!IS_VALID_UTINYINT(result)) { + smlBuildInvalidDataMsg(msg, "unsigned tiny int out of range[0,255]", pVal); + return false; + } + kvVal->type = TSDB_DATA_TYPE_UTINYINT; + kvVal->u = result; + } else { + smlBuildInvalidDataMsg(msg, "invalid data", pVal); + return false; + } + return true; +} + TEST(testCase, smlParseNumber_performance_Test) { char msg[256] = {0}; SSmlMsgBuf msgBuf; diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index 42a7c2c6154..f10eb6a6114 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -6,6 +6,8 @@ endif() add_library(common STATIC ${COMMON_SRC}) +add_dependencies(common lemon_sql) + if(DEFINED GRANT_CFG_INCLUDE_DIR) add_definitions(-DGRANTS_CFG) endif() diff --git a/source/common/src/systable.c b/source/common/src/systable.c index eef38bf18ee..4993ece7c16 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -118,7 +118,7 @@ static const SSysDbTableSchema userDBSchema[] = { {.name = "table_suffix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true}, {.name = "tsdb_pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, {.name = "keep_time_offset", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, - {.name = "s3_chunksize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "s3_chunkpages", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, {.name = "s3_keeplocal", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, {.name = "s3_compact", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, {.name = "with_arbitrator", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, @@ -165,8 +165,8 @@ static const SSysDbTableSchema userStbsSchema[] = { static const SSysDbTableSchema streamSchema[] = { {.name = "stream_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, - {.name = "stream_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "history_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "stream_id", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "history_id", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, @@ -190,9 +190,9 @@ static const SSysDbTableSchema streamTaskSchema[] = { {.name = "stage", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, {.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "process_total", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "process_throughput", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "out_total", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "out_throughput", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "process_throughput", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "out_total", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "out_throughput", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, // {.name = "dispatch_throughput", .bytes = 12, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, // {.name = "dispatch_total", .bytes = 12, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, // {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, @@ -437,6 +437,7 @@ static const SSysDbTableSchema userGrantsLogsSchema[] = { {.name = "state", .bytes = 1536 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, {.name = "active", .bytes = 512 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, {.name = "machine", .bytes = TSDB_GRANT_LOG_COL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, + {.name = "active_info", .bytes = 512 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, }; static const SSysDbTableSchema userMachinesSchema[] = { diff --git a/source/common/src/tcol.c b/source/common/src/tcol.c index 84027c25b6b..923aab12ca0 100644 --- a/source/common/src/tcol.c +++ b/source/common/src/tcol.c @@ -363,6 +363,9 @@ int8_t validColEncode(uint8_t type, uint8_t l1) { if (l1 == TSDB_COLVAL_ENCODE_NOCHANGE) { return 1; } + if (l1 == TSDB_COLVAL_ENCODE_DISABLED) { + return 1; + } if (type == TSDB_DATA_TYPE_BOOL) { return TSDB_COLVAL_ENCODE_RLE == l1 ? 1 : 0; } else if (type >= TSDB_DATA_TYPE_TINYINT && type <= TSDB_DATA_TYPE_INT) { diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 8654463eb14..501380e8a5a 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -18,6 +18,7 @@ #include "tcompare.h" #include "tlog.h" #include "tname.h" +#include "tglobal.h" #define MALLOC_ALIGN_BYTES 32 @@ -86,8 +87,18 @@ int32_t getJsonValueLen(const char* data) { return dataLen; } -int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) { - if (isNull || pData == NULL) { +static int32_t getDataLen(int32_t type, const char* pData) { + int32_t dataLen = 0; + if (type == TSDB_DATA_TYPE_JSON) { + dataLen = getJsonValueLen(pData); + } else { + dataLen = varDataTLen(pData); + } + return dataLen; +} + +static int32_t colDataSetValHelp(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) { + if (isNull || pData == NULL) { // There is a placehold for each NULL value of binary or nchar type. if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { pColumnInfoData->varmeta.offset[rowIndex] = -1; // it is a null value of VAR type. @@ -101,11 +112,9 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const int32_t type = pColumnInfoData->info.type; if (IS_VAR_DATA_TYPE(type)) { - int32_t dataLen = 0; - if (type == TSDB_DATA_TYPE_JSON) { - dataLen = getJsonValueLen(pData); - } else { - dataLen = varDataTLen(pData); + int32_t dataLen = getDataLen(type, pData); + if (pColumnInfoData->varmeta.offset[rowIndex] > 0) { + pColumnInfoData->varmeta.length = pColumnInfoData->varmeta.offset[rowIndex]; } SVarColAttr* pAttr = &pColumnInfoData->varmeta; @@ -134,7 +143,7 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const uint32_t len = pColumnInfoData->varmeta.length; pColumnInfoData->varmeta.offset[rowIndex] = len; - (void) memmove(pColumnInfoData->pData + len, pData, dataLen); + (void)memmove(pColumnInfoData->pData + len, pData, dataLen); pColumnInfoData->varmeta.length += dataLen; } else { memcpy(pColumnInfoData->pData + pColumnInfoData->info.bytes * rowIndex, pData, pColumnInfoData->info.bytes); @@ -144,6 +153,18 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const return 0; } +int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) { + if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { + pColumnInfoData->varmeta.offset[rowIndex] = -1; + } + + return colDataSetValHelp(pColumnInfoData, rowIndex, pData, isNull); +} + +int32_t colDataSetValOrCover(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) { + return colDataSetValHelp(pColumnInfoData, rowIndex, pData, isNull); +} + int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData) { int32_t type = pColumnInfoData->info.type; @@ -3043,8 +3064,12 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha } // return length of encoded data, return -1 if failed -int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { - blockDataCheck(pBlock, false); +int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, int32_t numOfCols) { + int32_t code = blockDataCheck(pBlock); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return -1; + } int32_t dataLen = 0; @@ -3108,9 +3133,11 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { size_t metaSize = 0; if (IS_VAR_DATA_TYPE(pColRes->info.type)) { metaSize = numOfRows * sizeof(int32_t); + if(dataLen + metaSize > dataBuflen) goto _exit; memcpy(data, pColRes->varmeta.offset, metaSize); } else { metaSize = BitmapLen(numOfRows); + if(dataLen + metaSize > dataBuflen) goto _exit; memcpy(data, pColRes->nullbitmap, metaSize); } @@ -3129,12 +3156,14 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { } colSizes[col] += colSize; dataLen += colSize; + if(dataLen > dataBuflen) goto _exit; (void) memmove(data, pColData, colSize); data += colSize; } } else { colSizes[col] = colDataGetLength(pColRes, numOfRows); dataLen += colSizes[col]; + if(dataLen > dataBuflen) goto _exit; if (pColRes->pData != NULL) { (void) memmove(data, pColRes->pData, colSizes[col]); } @@ -3158,7 +3187,14 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { *actualLen = dataLen; *groupId = pBlock->info.id.groupId; + if (dataLen > dataBuflen) goto _exit; + return dataLen; + +_exit: + uError("blockEncode dataLen:%d, dataBuflen:%zu", dataLen, dataBuflen); + terrno = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; + return -1; } int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos) { @@ -3288,9 +3324,13 @@ int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos *pEndPos = pStart; - blockDataCheck(pBlock, false); + code = blockDataCheck(pBlock); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return code; + } - return code; + return TSDB_CODE_SUCCESS; } int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList) { @@ -3500,20 +3540,19 @@ int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo) { return nextRowIdx; } -void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) { - return; - - if (NULL == pDataBlock || pDataBlock->info.rows == 0) { - return; +#define BLOCK_DATA_CHECK_TRESSA(o) \ + if (!(o)) { \ + uError("blockDataCheck failed! line:%d", __LINE__); \ + return TSDB_CODE_INTERNAL_ERROR; \ + } +int32_t blockDataCheck(const SSDataBlock* pDataBlock) { + if (tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER || NULL == pDataBlock || pDataBlock->info.rows == 0) { + return TSDB_CODE_SUCCESS; } - -#define BLOCK_DATA_CHECK_TRESSA(o) ; -//#define BLOCK_DATA_CHECK_TRESSA(o) A S S E R T(o) BLOCK_DATA_CHECK_TRESSA(pDataBlock->info.rows > 0); - - if (!pDataBlock->info.dataLoad && !forceChk) { - return; + if (!pDataBlock->info.dataLoad) { + return TSDB_CODE_SUCCESS; } bool isVarType = false; @@ -3524,8 +3563,10 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) { int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock); for (int32_t i = 0; i < colNum; ++i) { SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pDataBlock->pDataBlock, i); + BLOCK_DATA_CHECK_TRESSA(pCol != NULL); isVarType = IS_VAR_DATA_TYPE(pCol->info.type); checkRows = pDataBlock->info.rows; + if (pCol->info.noData == true) continue; if (isVarType) { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset); @@ -3533,27 +3574,39 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) { BLOCK_DATA_CHECK_TRESSA(pCol->nullbitmap); } - nextPos = 0; + nextPos = -1; for (int64_t r = 0; r < checkRows; ++r) { + if (tsSafetyCheckLevel <= TSDB_SAFETY_CHECK_LEVELL_NORMAL) break; if (!colDataIsNull_s(pCol, r)) { BLOCK_DATA_CHECK_TRESSA(pCol->pData); BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.length <= pCol->varmeta.allocLen); - + if (isVarType) { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.allocLen > 0); - BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] < pCol->varmeta.length); + BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] <= pCol->varmeta.length); if (pCol->reassigned) { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] >= 0); - } else if (0 == r) { + } else if (0 == r || nextPos == -1) { nextPos = pCol->varmeta.offset[r]; } else { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] == nextPos); } - - colLen = varDataTLen(pCol->pData + pCol->varmeta.offset[r]); - BLOCK_DATA_CHECK_TRESSA(colLen >= VARSTR_HEADER_SIZE); + + char* pColData = pCol->pData + pCol->varmeta.offset[r]; + int32_t colSize = 0; + if (pCol->info.type == TSDB_DATA_TYPE_JSON) { + colLen = getJsonValueLen(pColData); + } else { + colLen = varDataTLen(pColData); + } + + if (pCol->info.type == TSDB_DATA_TYPE_JSON) { + BLOCK_DATA_CHECK_TRESSA(colLen >= CHAR_BYTES); + } else { + BLOCK_DATA_CHECK_TRESSA(colLen >= VARSTR_HEADER_SIZE); + } BLOCK_DATA_CHECK_TRESSA(colLen <= pCol->info.bytes); - + if (pCol->reassigned) { BLOCK_DATA_CHECK_TRESSA((pCol->varmeta.offset[r] + colLen) <= pCol->varmeta.length); } else { @@ -3563,13 +3616,21 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) { typeValue = *(char*)(pCol->pData + pCol->varmeta.offset[r] + colLen - 1); } else { - GET_TYPED_DATA(typeValue, int64_t, pCol->info.type, colDataGetNumData(pCol, r)); + if (TSDB_DATA_TYPE_FLOAT == pCol->info.type) { + float v = 0; + GET_TYPED_DATA(v, float, pCol->info.type, colDataGetNumData(pCol, r)); + } else if (TSDB_DATA_TYPE_DOUBLE == pCol->info.type) { + double v = 0; + GET_TYPED_DATA(v, double, pCol->info.type, colDataGetNumData(pCol, r)); + } else { + GET_TYPED_DATA(typeValue, int64_t, pCol->info.type, colDataGetNumData(pCol, r)); + } } } } } - return; + return TSDB_CODE_SUCCESS; } diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 4a2fd7bad9c..fe2748ad2d4 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -131,6 +131,7 @@ bool tsMonitorForceV2 = true; // audit bool tsEnableAudit = true; bool tsEnableAuditCreateTable = true; +bool tsEnableAuditDelete = true; int32_t tsAuditInterval = 5000; // telem @@ -149,8 +150,9 @@ bool tsEnableCrashReport = false; #else bool tsEnableCrashReport = true; #endif -char *tsClientCrashReportUri = "/ccrashreport"; -char *tsSvrCrashReportUri = "/dcrashreport"; +char *tsClientCrashReportUri = "/ccrashreport"; +char *tsSvrCrashReportUri = "/dcrashreport"; +int8_t tsSafetyCheckLevel = TSDB_SAFETY_CHECK_LEVELL_NORMAL; // schemaless bool tsSmlDot2Underline = true; @@ -299,7 +301,7 @@ int32_t tsTtlUnit = 86400; int32_t tsTtlPushIntervalSec = 10; int32_t tsTrimVDbIntervalSec = 60 * 60; // interval of trimming db in all vgroups int32_t tsS3MigrateIntervalSec = 60 * 60; // interval of s3migrate db in all vgroups -bool tsS3MigrateEnabled = 1; +bool tsS3MigrateEnabled = 0; int32_t tsGrantHBInterval = 60; int32_t tsUptimeInterval = 300; // seconds char tsUdfdResFuncs[512] = ""; // udfd resident funcs that teardown when udfd exits @@ -560,7 +562,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "sDebugFlag", sDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tsdbDebugFlag", tsdbDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebug", tqClientDebug, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebugFlag", tqClientDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "udfDebugFlag", udfDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); @@ -622,6 +624,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { TAOS_CHECK_RETURN( cfgAddInt64(pCfg, "randErrorDivisor", tsRandErrDivisor, 1, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorScope", tsRandErrScope, 0, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "safetyCheckLevel", tsSafetyCheckLevel, 0, 5, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); tsNumOfRpcThreads = tsNumOfCores / 2; tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS); @@ -687,10 +690,10 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddString(pCfg, "os version", info.version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddString(pCfg, "os machine", info.machine, CFG_SCOPE_BOTH, CFG_DYN_NONE)); - TAOS_CHECK_RETURN(cfgAddString(pCfg, "version", version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); - TAOS_CHECK_RETURN(cfgAddString(pCfg, "compatible_version", compatible_version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); - TAOS_CHECK_RETURN(cfgAddString(pCfg, "gitinfo", gitinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE)); - TAOS_CHECK_RETURN(cfgAddString(pCfg, "buildinfo", buildinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE)); + TAOS_CHECK_RETURN(cfgAddString(pCfg, "version", td_version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); + TAOS_CHECK_RETURN(cfgAddString(pCfg, "compatible_version", td_compatible_version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); + TAOS_CHECK_RETURN(cfgAddString(pCfg, "gitinfo", td_gitinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE)); + TAOS_CHECK_RETURN(cfgAddString(pCfg, "buildinfo", td_buildinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE)); TAOS_RETURN(TSDB_CODE_SUCCESS); } @@ -794,6 +797,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddBool(pCfg, "monitorForceV2", tsMonitorForceV2, CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); + TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableAuditDelete", tsEnableAuditDelete, CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE)); @@ -1322,6 +1326,9 @@ static int32_t taosSetClientCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "tsmaDataDeleteMark"); tsmaDataDeleteMark = pItem->i32; + + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "safetyCheckLevel"); + tsSafetyCheckLevel = pItem->i32; TAOS_RETURN(TSDB_CODE_SUCCESS); } @@ -1507,6 +1514,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "auditCreateTable"); tsEnableAuditCreateTable = pItem->bval; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "enableAuditDelete"); + tsEnableAuditDelete = pItem->bval; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "auditInterval"); tsAuditInterval = pItem->i32; @@ -1663,6 +1673,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "checkpointInterval"); tsStreamCheckpointInterval = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "concurrentCheckpoint"); + tsMaxConcurrentCheckpoint = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "streamSinkDataRate"); tsSinkDataRate = pItem->fval; @@ -2029,7 +2042,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { {"tdbDebugFlag", &tdbDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag}, {"metaDebugFlag", &metaDebugFlag}, {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, - {"tqClientDebug", &tqClientDebug}, + {"tqClientDebugFlag", &tqClientDebugFlag}, }; static OptionNameAndVar options[] = {{"audit", &tsEnableAudit}, @@ -2077,7 +2090,8 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { {"experimental", &tsExperimental}, {"maxTsmaNum", &tsMaxTsmaNum}, {"singleQueryMaxMemorySize", &tsSingleQueryMaxMemorySize}, - {"minReservedMemorySize", &tsMinReservedMemorySize}}; + {"minReservedMemorySize", &tsMinReservedMemorySize}, + {"safetyCheckLevel", &tsSafetyCheckLevel}}; if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) { code = taosCfgSetOption(options, tListLen(options), pItem, false); @@ -2333,7 +2347,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) { {"experimental", &tsExperimental}, {"multiResultFunctionStarReturnTags", &tsMultiResultFunctionStarReturnTags}, {"maxTsmaCalcDelay", &tsMaxTsmaCalcDelay}, - {"tsmaDataDeleteMark", &tsmaDataDeleteMark}}; + {"tsmaDataDeleteMark", &tsmaDataDeleteMark}, + {"safetyCheckLevel", &tsSafetyCheckLevel}}; if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) { code = taosCfgSetOption(options, tListLen(options), pItem, false); @@ -2377,8 +2392,13 @@ static void taosCheckAndSetDebugFlag(int32_t *pFlagPtr, char *name, int32_t flag if (noNeedToSetVars != NULL && taosArraySearch(noNeedToSetVars, name, taosLogVarComp, TD_EQ) != NULL) { return; } - if (taosSetDebugFlag(pFlagPtr, name, flag) != 0) { - uError("failed to set flag %s to %d", name, flag); + int32_t code = 0; + if ((code = taosSetDebugFlag(pFlagPtr, name, flag)) != 0) { + if (code != TSDB_CODE_CFG_NOT_FOUND) { + uError("failed to set flag %s to %d, since:%s", name, flag, tstrerror(code)); + } else { + uDebug("failed to set flag %s to %d, since:%s", name, flag, tstrerror(code)); + } } return; } diff --git a/source/common/src/tmisce.c b/source/common/src/tmisce.c index 10375ba8574..4df458c2bb7 100644 --- a/source/common/src/tmisce.c +++ b/source/common/src/tmisce.c @@ -221,10 +221,9 @@ int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t } TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), NULL, _exit); - TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", version), NULL, _exit); - TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", buildinfo), NULL, _exit); - - TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", gitinfo), NULL, _exit); + TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", td_version), NULL, _exit); + TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", td_buildinfo), NULL, _exit); + TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", td_gitinfo), NULL, _exit); TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashSig", signum), NULL, _exit); TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashTs", taosGetTimestampUs()), NULL, _exit); diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index c8cd5eb462a..0b9af8010cb 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -40,7 +40,7 @@ #define TD_MSG_RANGE_CODE_ #include "tmsgdef.h" -#include "tanal.h" +#include "tanalytics.h" #include "tcol.h" #include "tlog.h" @@ -567,6 +567,7 @@ int32_t tSerializeSClientHbBatchRsp(void *buf, int32_t bufLen, const SClientHbBa TAOS_CHECK_EXIT(tSerializeSClientHbRsp(&encoder, pRsp)); } TAOS_CHECK_EXIT(tSerializeSMonitorParas(&encoder, &pBatchRsp->monitorParas)); + TAOS_CHECK_EXIT(tEncodeI8(&encoder, pBatchRsp->enableAuditDelete)); tEndEncode(&encoder); _exit: @@ -609,6 +610,12 @@ int32_t tDeserializeSClientHbBatchRsp(void *buf, int32_t bufLen, SClientHbBatchR TAOS_CHECK_EXIT(tDeserializeSMonitorParas(&decoder, &pBatchRsp->monitorParas)); } + if (!tDecodeIsEnd(&decoder)) { + TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pBatchRsp->enableAuditDelete)); + } else { + pBatchRsp->enableAuditDelete = 0; + } + tEndDecode(&decoder); _exit: @@ -1813,6 +1820,60 @@ int32_t tDeserializeSDropUserReq(void *buf, int32_t bufLen, SDropUserReq *pReq) void tFreeSDropUserReq(SDropUserReq *pReq) { FREESQL(); } +int32_t tSerializeSAuditReq(void *buf, int32_t bufLen, SAuditReq *pReq) { + SEncoder encoder = {0}; + int32_t code = 0; + int32_t lino; + int32_t tlen; + tEncoderInit(&encoder, buf, bufLen); + + TAOS_CHECK_EXIT(tStartEncode(&encoder)); + + TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->operation)); + TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->db)); + TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->table)); + TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->sqlLen)); + TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->pSql)); + + tEndEncode(&encoder); + +_exit: + if (code) { + tlen = code; + } else { + tlen = encoder.pos; + } + tEncoderClear(&encoder); + return tlen; +} + +int32_t tDeserializeSAuditReq(void *buf, int32_t bufLen, SAuditReq *pReq) { + SDecoder decoder = {0}; + int32_t code = 0; + int32_t lino; + tDecoderInit(&decoder, buf, bufLen); + + TAOS_CHECK_EXIT(tStartDecode(&decoder)); + + TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->operation)); + TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->db)); + TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->table)); + TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->sqlLen)); + if (pReq->sqlLen > 0) { + pReq->pSql = taosMemoryMalloc(pReq->sqlLen + 1); + if (pReq->pSql == NULL) { + TAOS_CHECK_EXIT(terrno); + } + TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->pSql)); + } + tEndDecode(&decoder); +_exit: + tDecoderClear(&decoder); + return code; +} + +void tFreeSAuditReq(SAuditReq *pReq) { taosMemoryFreeClear(pReq->pSql); } + SIpWhiteList *cloneIpWhiteList(SIpWhiteList *pIpWhiteList) { if (pIpWhiteList == NULL) return NULL; @@ -2105,7 +2166,7 @@ int32_t tSerializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnalAl int32_t numOfAlgos = 0; void *pIter = taosHashIterate(pRsp->hash, NULL); while (pIter != NULL) { - SAnalUrl *pUrl = pIter; + SAnalyticsUrl *pUrl = pIter; size_t nameLen = 0; const char *name = taosHashGetKey(pIter, &nameLen); if (nameLen > 0 && nameLen <= TSDB_ANAL_ALGO_KEY_LEN && pUrl->urlLen > 0) { @@ -2120,7 +2181,7 @@ int32_t tSerializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnalAl pIter = taosHashIterate(pRsp->hash, NULL); while (pIter != NULL) { - SAnalUrl *pUrl = pIter; + SAnalyticsUrl *pUrl = pIter; size_t nameLen = 0; const char *name = taosHashGetKey(pIter, &nameLen); if (nameLen > 0 && pUrl->urlLen > 0) { @@ -2164,7 +2225,7 @@ int32_t tDeserializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnal int32_t nameLen; int32_t type; char name[TSDB_ANAL_ALGO_KEY_LEN]; - SAnalUrl url = {0}; + SAnalyticsUrl url = {0}; TAOS_CHECK_EXIT(tStartDecode(&decoder)); TAOS_CHECK_EXIT(tDecodeI64(&decoder, &pRsp->ver)); @@ -2184,7 +2245,7 @@ int32_t tDeserializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnal TAOS_CHECK_EXIT(tDecodeBinaryAlloc(&decoder, (void **)&url.url, NULL) < 0); } - TAOS_CHECK_EXIT(taosHashPut(pRsp->hash, name, nameLen, &url, sizeof(SAnalUrl))); + TAOS_CHECK_EXIT(taosHashPut(pRsp->hash, name, nameLen, &url, sizeof(SAnalyticsUrl))); } tEndDecode(&decoder); @@ -2197,7 +2258,7 @@ int32_t tDeserializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnal void tFreeRetrieveAnalAlgoRsp(SRetrieveAnalAlgoRsp *pRsp) { void *pIter = taosHashIterate(pRsp->hash, NULL); while (pIter != NULL) { - SAnalUrl *pUrl = (SAnalUrl *)pIter; + SAnalyticsUrl *pUrl = (SAnalyticsUrl *)pIter; taosMemoryFree(pUrl->url); pIter = taosHashIterate(pRsp->hash, pIter); } @@ -3874,6 +3935,7 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) { TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->s3ChunkSize)); TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->s3KeepLocal)); TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->s3Compact)); + TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->dnodeListStr)); tEndEncode(&encoder); @@ -3962,6 +4024,10 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->s3Compact)); } + if (!tDecodeIsEnd(&decoder)) { + TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->dnodeListStr)); + } + tEndDecode(&decoder); _exit: @@ -6289,6 +6355,7 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) { TAOS_CHECK_EXIT(tEncodeI32(&encoder, pRsp->authVer)); TAOS_CHECK_EXIT(tEncodeI64(&encoder, pRsp->whiteListVer)); TAOS_CHECK_EXIT(tSerializeSMonitorParas(&encoder, &pRsp->monitorParas)); + TAOS_CHECK_EXIT(tEncodeI8(&encoder, pRsp->enableAuditDelete)); tEndEncode(&encoder); _exit: @@ -6340,6 +6407,11 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) { if (!tDecodeIsEnd(&decoder)) { TAOS_CHECK_EXIT(tDeserializeSMonitorParas(&decoder, &pRsp->monitorParas)); } + if (!tDecodeIsEnd(&decoder)) { + TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pRsp->enableAuditDelete)); + } else { + pRsp->enableAuditDelete = 0; + } tEndDecode(&decoder); _exit: @@ -10983,6 +11055,7 @@ int32_t tEncodeMqDataRspCommon(SEncoder *pEncoder, const SMqDataRsp *pRsp) { int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { TAOS_CHECK_RETURN(tEncodeMqDataRspCommon(pEncoder, pRsp)); TAOS_CHECK_RETURN(tEncodeI64(pEncoder, pRsp->sleepTime)); + return 0; } @@ -11094,6 +11167,7 @@ int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, createTableReq, createTableLen)); } } + _exit: return code; } diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index 1b27d7a084a..4fb96203a44 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -298,12 +298,13 @@ static void dmPrintArgs(int32_t argc, char const *argv[]) { static void dmGenerateGrant() { mndGenerateMachineCode(); } static void dmPrintVersion() { - printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, version, compatible_version); - printf("git: %s\n", gitinfo); + printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, td_version, + td_compatible_version); + printf("git: %s\n", td_gitinfo); #ifdef TD_ENTERPRISE - printf("gitOfInternal: %s\n", gitinfoOfInternal); + printf("gitOfInternal: %s\n", td_gitinfoOfInternal); #endif - printf("build: %s\n", buildinfo); + printf("build: %s\n", td_buildinfo); } static void dmPrintHelp() { diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 1446faab77c..78cc35a62c1 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -18,7 +18,7 @@ #include "dmInt.h" #include "monitor.h" #include "systable.h" -#include "tanal.h" +#include "tanalytics.h" #include "tchecksum.h" extern SConfig *tsCfg; @@ -548,8 +548,8 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { } size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); - size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * numOfCols + - blockDataGetSize(pBlock) + blockDataGetSerialMetaSize(numOfCols); + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * numOfCols + dataEncodeBufSize; SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size); if (pRsp == NULL) { @@ -574,7 +574,7 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { pStart += sizeof(SSysTableSchema); } - int32_t len = blockEncode(pBlock, pStart, numOfCols); + int32_t len = blockEncode(pBlock, pStart, dataEncodeBufSize, numOfCols); if (len < 0) { dError("failed to retrieve data since %s", tstrerror(code)); blockDataDestroy(pBlock); diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c index 04b4e9101ca..fb7d891c67c 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c @@ -16,7 +16,7 @@ #define _DEFAULT_SOURCE #include "dmInt.h" #include "libs/function/tudf.h" -#include "tanal.h" +#include "tanalytics.h" static int32_t dmStartMgmt(SDnodeMgmt *pMgmt) { int32_t code = 0; @@ -85,7 +85,7 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { dError("failed to start udfd since %s", tstrerror(code)); } - if ((code = taosAnalInit()) != 0) { + if ((code = taosAnalyticsInit()) != 0) { dError("failed to init analysis env since %s", tstrerror(code)); } diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c index bc6c7e55ba0..0d804eadf0e 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c @@ -212,6 +212,7 @@ SArray *mmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_VIEW, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_VIEW_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_STATIS, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_MND_AUDIT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_COMPACT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_CONFIG_CLUSTER, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_COMPACT_PROGRESS_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mgmt/mgmt_snode/CMakeLists.txt b/source/dnode/mgmt/mgmt_snode/CMakeLists.txt index 62dc41a0aec..8ebeafcbfbc 100644 --- a/source/dnode/mgmt/mgmt_snode/CMakeLists.txt +++ b/source/dnode/mgmt/mgmt_snode/CMakeLists.txt @@ -1,5 +1,10 @@ aux_source_directory(src MGMT_SNODE) add_library(mgmt_snode STATIC ${MGMT_SNODE}) + +if(${TD_DARWIN}) + target_compile_options(mgmt_snode PRIVATE -Wno-error=deprecated-non-prototype) +endif() + target_include_directories( mgmt_snode PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" diff --git a/source/dnode/mgmt/mgmt_vnode/CMakeLists.txt b/source/dnode/mgmt/mgmt_vnode/CMakeLists.txt index 15b822ad92e..bb433def6c9 100644 --- a/source/dnode/mgmt/mgmt_vnode/CMakeLists.txt +++ b/source/dnode/mgmt/mgmt_vnode/CMakeLists.txt @@ -1,5 +1,10 @@ aux_source_directory(src MGMT_VNODE) add_library(mgmt_vnode STATIC ${MGMT_VNODE}) + +if(${TD_DARWIN}) + target_compile_options(mgmt_vnode PRIVATE -Wno-error=deprecated-non-prototype) +endif() + target_include_directories( mgmt_vnode PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 0e1a4bc98e2..989adf84ac6 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -36,12 +36,13 @@ typedef struct SVnodeMgmt { SSingleWorker mgmtWorker; SSingleWorker mgmtMultiWorker; SHashObj *hash; + SHashObj *closedHash; TdThreadRwlock lock; SVnodesStat state; STfs *pTfs; TdThread thread; bool stop; - TdThreadMutex createLock; + TdThreadMutex fileLock; } SVnodeMgmt; typedef struct { @@ -94,7 +95,7 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId); SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict); void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode); int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl); -void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal); +void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed); // vmHandle.c SArray *vmGetMsgHandles(); @@ -111,6 +112,7 @@ int32_t vmProcessArbHeartBeatReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes); int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt); int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes); +int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes); // vmWorker.c int32_t vmStartWorker(SVnodeMgmt *pMgmt); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index 5fabd4cdde9..7566b69c02e 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -19,6 +19,54 @@ #define MAX_CONTENT_LEN 2 * 1024 * 1024 +int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) { + (void)taosThreadRwlockRdlock(&pMgmt->lock); + + int32_t num = 0; + int32_t size = taosHashGetSize(pMgmt->hash); + int32_t closedSize = taosHashGetSize(pMgmt->closedHash); + size += closedSize; + SVnodeObj **pVnodes = taosMemoryCalloc(size, sizeof(SVnodeObj *)); + if (pVnodes == NULL) { + (void)taosThreadRwlockUnlock(&pMgmt->lock); + return terrno; + } + + void *pIter = taosHashIterate(pMgmt->hash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + SVnodeObj *pVnode = *ppVnode; + if (pVnode && num < size) { + int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); + dTrace("vgId:%d,acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); + pVnodes[num++] = (*ppVnode); + pIter = taosHashIterate(pMgmt->hash, pIter); + } else { + taosHashCancelIterate(pMgmt->hash, pIter); + } + } + + pIter = taosHashIterate(pMgmt->closedHash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + SVnodeObj *pVnode = *ppVnode; + if (pVnode && num < size) { + int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); + dTrace("vgId:%d, acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); + pVnodes[num++] = (*ppVnode); + pIter = taosHashIterate(pMgmt->closedHash, pIter); + } else { + taosHashCancelIterate(pMgmt->closedHash, pIter); + } + } + + (void)taosThreadRwlockUnlock(&pMgmt->lock); + *numOfVnodes = num; + *ppVnodes = pVnodes; + + return 0; +} + int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) { (void)taosThreadRwlockRdlock(&pMgmt->lock); @@ -36,7 +84,7 @@ int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeOb SVnodeObj *pVnode = *ppVnode; if (pVnode && num < size) { int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); - // dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount); + dTrace("vgId:%d, acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); pVnodes[num++] = (*ppVnode); pIter = taosHashIterate(pMgmt->hash, pIter); } else { @@ -203,6 +251,8 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { SVnodeObj **ppVnodes = NULL; char file[PATH_MAX] = {0}; char realfile[PATH_MAX] = {0}; + int32_t lino = 0; + int32_t ret = -1; int32_t nBytes = snprintf(file, sizeof(file), "%s%svnodes_tmp.json", pMgmt->path, TD_DIRSEP); if (nBytes <= 0 || nBytes >= sizeof(file)) { @@ -215,8 +265,7 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { } int32_t numOfVnodes = 0; - code = vmGetVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes); - if (code) goto _OVER; + TAOS_CHECK_GOTO(vmGetAllVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes), &lino, _OVER); // terrno = TSDB_CODE_OUT_OF_MEMORY; pJson = tjsonCreateObject(); @@ -224,39 +273,56 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { code = terrno; goto _OVER; } - if ((code = vmEncodeVnodeList(pJson, ppVnodes, numOfVnodes)) != 0) goto _OVER; + TAOS_CHECK_GOTO(vmEncodeVnodeList(pJson, ppVnodes, numOfVnodes), &lino, _OVER); buffer = tjsonToString(pJson); if (buffer == NULL) { code = TSDB_CODE_INVALID_JSON_FORMAT; + lino = __LINE__; + goto _OVER; + } + + code = taosThreadMutexLock(&pMgmt->fileLock); + if (code != 0) { + lino = __LINE__; goto _OVER; } pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_WRITE_THROUGH); if (pFile == NULL) { code = terrno; - goto _OVER; + lino = __LINE__; + goto _OVER1; } int32_t len = strlen(buffer); if (taosWriteFile(pFile, buffer, len) <= 0) { code = terrno; - goto _OVER; + lino = __LINE__; + goto _OVER1; } if (taosFsyncFile(pFile) < 0) { code = TAOS_SYSTEM_ERROR(errno); - goto _OVER; + lino = __LINE__; + goto _OVER1; } code = taosCloseFile(&pFile); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); - goto _OVER; + lino = __LINE__; + goto _OVER1; } - TAOS_CHECK_GOTO(taosRenameFile(file, realfile), NULL, _OVER); + TAOS_CHECK_GOTO(taosRenameFile(file, realfile), &lino, _OVER1); dInfo("succeed to write vnodes file:%s, vnodes:%d", realfile, numOfVnodes); +_OVER1: + ret = taosThreadMutexUnlock(&pMgmt->fileLock); + if (ret != 0) { + dError("failed to unlock since %s", tstrerror(ret)); + } + _OVER: if (pJson != NULL) tjsonDelete(pJson); if (buffer != NULL) taosMemoryFree(buffer); @@ -272,7 +338,8 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { } if (code != 0) { - dError("failed to write vnodes file:%s since %s, vnodes:%d", realfile, tstrerror(code), numOfVnodes); + dError("failed to write vnodes file:%s at line:%d since %s, vnodes:%d", realfile, lino, tstrerror(code), + numOfVnodes); } return code; } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 7e950ef1be1..006f44b349c 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -415,27 +415,30 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { goto _OVER; } - code = taosThreadMutexLock(&pMgmt->createLock); - if (code != 0) { - dError("vgId:%d, failed to lock since %s", req.vgId, tstrerror(code)); - goto _OVER; - } code = vmWriteVnodeListToFile(pMgmt); if (code != 0) { code = terrno != 0 ? terrno : code; - int32_t ret = taosThreadMutexUnlock(&pMgmt->createLock); - if (ret != 0) { - dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(ret)); - } goto _OVER; } - int32_t ret = taosThreadMutexUnlock(&pMgmt->createLock); - if (ret != 0) { - dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(ret)); - } _OVER: if (code != 0) { + int32_t r = 0; + r = taosThreadRwlockWrlock(&pMgmt->lock); + if (r != 0) { + dError("vgId:%d, failed to lock since %s", req.vgId, tstrerror(r)); + } + if (r == 0) { + dInfo("vgId:%d, remove from hash", req.vgId); + r = taosHashRemove(pMgmt->hash, &req.vgId, sizeof(int32_t)); + if (r != 0) { + dError("vgId:%d, failed to remove vnode since %s", req.vgId, tstrerror(r)); + } + } + r = taosThreadRwlockUnlock(&pMgmt->lock); + if (r != 0) { + dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(r)); + } vnodeClose(pImpl); vnodeDestroy(0, path, pMgmt->pTfs, 0); } else { @@ -535,7 +538,7 @@ int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path)); bool commitAndRemoveWal = vnodeShouldRemoveWal(pVnode->pImpl); - vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal); + vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal, true); int32_t diskPrimary = wrapperCfg.diskPrimary; char path[TSDB_FILENAME_LEN] = {0}; @@ -683,7 +686,7 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { } dInfo("vgId:%d, close vnode", srcVgId); - vmCloseVnode(pMgmt, pVnode, true); + vmCloseVnode(pMgmt, pVnode, true, false); int32_t diskPrimary = wrapperCfg.diskPrimary; char srcPath[TSDB_FILENAME_LEN] = {0}; @@ -738,7 +741,7 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { int32_t vgId = alterReq.vgId; dInfo( - "vgId:%d,vnode management handle msgType:%s, start to alter vnode replica:%d selfIndex:%d leanerReplica:%d " + "vgId:%d, vnode management handle msgType:%s, start to alter vnode replica:%d selfIndex:%d leanerReplica:%d " "learnerSelfIndex:%d strict:%d changeVersion:%d", vgId, TMSG_INFO(pMsg->msgType), alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica, alterReq.learnerSelfIndex, alterReq.strict, alterReq.changeVersion); @@ -792,7 +795,7 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path)); bool commitAndRemoveWal = vnodeShouldRemoveWal(pVnode->pImpl); - vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal); + vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal, true); int32_t diskPrimary = wrapperCfg.diskPrimary; char path[TSDB_FILENAME_LEN] = {0}; @@ -860,7 +863,7 @@ int32_t vmProcessDropVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return code; } - vmCloseVnode(pMgmt, pVnode, false); + vmCloseVnode(pMgmt, pVnode, false, false); if (vmWriteVnodeListToFile(pMgmt) != 0) { dError("vgId:%d, failed to write vnode list since %s", vgId, terrstr()); } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 20618dbdf38..682c1792709 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -103,7 +103,7 @@ SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict) { pVnode = NULL; } else { int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); - // dTrace("vgId:%d, acquire vnode, ref:%d", pVnode->vgId, refCount); + dTrace("vgId:%d, acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); } (void)taosThreadRwlockUnlock(&pMgmt->lock); @@ -115,16 +115,24 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) { return vmAcquireVno void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) { if (pVnode == NULL) return; - (void)taosThreadRwlockRdlock(&pMgmt->lock); + //(void)taosThreadRwlockRdlock(&pMgmt->lock); int32_t refCount = atomic_sub_fetch_32(&pVnode->refCount, 1); - // dTrace("vgId:%d, release vnode, ref:%d", pVnode->vgId, refCount); - (void)taosThreadRwlockUnlock(&pMgmt->lock); + dTrace("vgId:%d, release vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); + //(void)taosThreadRwlockUnlock(&pMgmt->lock); } static void vmFreeVnodeObj(SVnodeObj **ppVnode) { if (!ppVnode || !(*ppVnode)) return; SVnodeObj *pVnode = *ppVnode; + + int32_t refCount = atomic_load_32(&pVnode->refCount); + while (refCount > 0) { + dWarn("vgId:%d, vnode is refenced, retry to free in 200ms, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount); + taosMsleep(200); + refCount = atomic_load_32(&pVnode->refCount); + } + taosMemoryFree(pVnode->path); taosMemoryFree(pVnode); ppVnode[0] = NULL; @@ -166,16 +174,34 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { (void)taosThreadRwlockWrlock(&pMgmt->lock); SVnodeObj *pOld = NULL; int32_t r = taosHashGetDup(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld); + if (r != 0) { + dError("vgId:%d, failed to get vnode from hash", pVnode->vgId); + } if (pOld) { vmFreeVnodeObj(&pOld); } int32_t code = taosHashPut(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), &pVnode, sizeof(SVnodeObj *)); + + pOld = NULL; + r = taosHashGetDup(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld); + if (r != 0) { + dError("vgId:%d, failed to get vnode from closedHash", pVnode->vgId); + } + if (pOld) { + vmFreeVnodeObj(&pOld); + } + + dInfo("vgId:%d, remove from closedHash", pVnode->vgId); + r = taosHashRemove(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t)); + if (r != 0) { + dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId); + } (void)taosThreadRwlockUnlock(&pMgmt->lock); return code; } -void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) { +void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed) { char path[TSDB_FILENAME_LEN] = {0}; bool atExit = true; @@ -185,7 +211,40 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) (void)taosThreadRwlockWrlock(&pMgmt->lock); int32_t r = taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t)); + if (r != 0) { + dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId); + } + if (keepClosed) { + SVnodeObj *pClosedVnode = taosMemoryCalloc(1, sizeof(SVnodeObj)); + (void)memset(pClosedVnode, 0, sizeof(SVnodeObj)); + if (pVnode == NULL) { + dError("vgId:%d, failed to alloc vnode since %s", pVnode->vgId, terrstr()); + (void)taosThreadRwlockUnlock(&pMgmt->lock); + return; + } + + pClosedVnode->vgId = pVnode->vgId; + pClosedVnode->dropped = pVnode->dropped; + pClosedVnode->vgVersion = pVnode->vgVersion; + pClosedVnode->diskPrimary = pVnode->diskPrimary; + pClosedVnode->toVgId = pVnode->toVgId; + + SVnodeObj *pOld = NULL; + r = taosHashGetDup(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld); + if (r != 0) { + dError("vgId:%d, failed to get vnode from closedHash", pVnode->vgId); + } + if (pOld) { + vmFreeVnodeObj(&pOld); + } + dInfo("vgId:%d, put vnode to closedHash", pVnode->vgId); + r = taosHashPut(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), &pClosedVnode, sizeof(SVnodeObj *)); + if (r != 0) { + dError("vgId:%d, failed to put vnode to closedHash", pVnode->vgId); + } + } (void)taosThreadRwlockUnlock(&pMgmt->lock); + vmReleaseVnode(pMgmt, pVnode); if (pVnode->failed) { @@ -362,9 +421,15 @@ static void *vmOpenVnodeInThread(void *param) { static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) { pMgmt->hash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); if (pMgmt->hash == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; dError("failed to init vnode hash since %s", terrstr()); - return -1; + return TSDB_CODE_OUT_OF_MEMORY; + } + + pMgmt->closedHash = + taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); + if (pMgmt->hash == NULL) { + dError("failed to init vnode closed hash since %s", terrstr()); + return TSDB_CODE_OUT_OF_MEMORY; } SWrapperCfg *pCfgs = NULL; @@ -459,7 +524,7 @@ static void *vmCloseVnodeInThread(void *param) { pMgmt->state.openVnodes, pMgmt->state.totalVnodes); tmsgReportStartup("vnode-close", stepDesc); - vmCloseVnode(pMgmt, pVnode, false); + vmCloseVnode(pMgmt, pVnode, false, false); } dInfo("thread:%d, numOfVnodes:%d is closed", pThread->threadIndex, pThread->vnodeNum); @@ -537,6 +602,18 @@ static void vmCloseVnodes(SVnodeMgmt *pMgmt) { pMgmt->hash = NULL; } + void *pIter = taosHashIterate(pMgmt->closedHash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + vmFreeVnodeObj(ppVnode); + pIter = taosHashIterate(pMgmt->closedHash, pIter); + } + + if (pMgmt->closedHash != NULL) { + taosHashCleanup(pMgmt->closedHash); + pMgmt->closedHash = NULL; + } + dInfo("total vnodes:%d are all closed", numOfVnodes); } @@ -545,7 +622,7 @@ static void vmCleanup(SVnodeMgmt *pMgmt) { vmStopWorker(pMgmt); vnodeCleanup(); (void)taosThreadRwlockDestroy(&pMgmt->lock); - (void)taosThreadMutexDestroy(&pMgmt->createLock); + (void)taosThreadMutexDestroy(&pMgmt->fileLock); taosMemoryFree(pMgmt); } @@ -637,7 +714,7 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { goto _OVER; } - code = taosThreadMutexInit(&pMgmt->createLock, NULL); + code = taosThreadMutexInit(&pMgmt->fileLock, NULL); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); goto _OVER; diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index 694cc52d647..6d4ebe424a5 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -21,7 +21,7 @@ #include "tgrant.h" #include "tcompare.h" #include "tcs.h" -#include "tanal.h" +#include "tanalytics.h" // clang-format on #define DM_INIT_AUDIT() \ @@ -209,7 +209,7 @@ void dmCleanup() { dError("failed to close udfc"); } udfStopUdfd(); - taosAnalCleanup(); + taosAnalyticsCleanup(); taosStopCacheRefreshWorker(); (void)dmDiskClose(); DestroyRegexCache(); diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index fd593e06385..61543e619e2 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -16,7 +16,7 @@ #define _DEFAULT_SOURCE #include "dmMgmt.h" #include "qworker.h" -#include "tanal.h" +#include "tanalytics.h" #include "tversion.h" static inline void dmSendRsp(SRpcMsg *pMsg) { @@ -30,9 +30,18 @@ static inline void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) { dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet); if (epSet.numOfEps <= 1) { - pMsg->pCont = NULL; - pMsg->code = TSDB_CODE_MNODE_NOT_FOUND; - return; + if (epSet.numOfEps == 0) { + pMsg->pCont = NULL; + pMsg->code = TSDB_CODE_MNODE_NOT_FOUND; + return; + } + // dnode is not the mnode or mnode leader and This ensures that the function correctly handles cases where the + // dnode cannot obtain a valid epSet and avoids returning an incorrect or misleading epSet. + if (strcmp(epSet.eps[0].fqdn, tsLocalFqdn) == 0 && epSet.eps[0].port == tsServerPort) { + pMsg->pCont = NULL; + pMsg->code = TSDB_CODE_MNODE_NOT_FOUND; + return; + } } int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet); @@ -129,9 +138,9 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { pRpc->info.handle, pRpc->contLen, pRpc->code, pRpc->info.ahandle, pRpc->info.refId); int32_t svrVer = 0; - code = taosVersionStrToInt(version, &svrVer); + code = taosVersionStrToInt(td_version, &svrVer); if (code != 0) { - dError("failed to convert version string:%s to int, code:%d", version, code); + dError("failed to convert version string:%s to int, code:%d", td_version, code); goto _OVER; } if ((code = taosCheckVersionCompatible(pRpc->info.cliVer, svrVer, 3)) != 0) { @@ -425,8 +434,8 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.startReadTimer = 1; rpcInit.readTimeout = tsReadTimeout; - if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) { - dError("failed to convert version string:%s to int", version); + if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) { + dError("failed to convert version string:%s to int", td_version); } pTrans->clientRpc = rpcOpen(&rpcInit); @@ -474,8 +483,8 @@ int32_t dmInitStatusClient(SDnode *pDnode) { rpcInit.startReadTimer = 0; rpcInit.readTimeout = 0; - if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) { - dError("failed to convert version string:%s to int", version); + if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) { + dError("failed to convert version string:%s to int", td_version); } pTrans->statusRpc = rpcOpen(&rpcInit); @@ -524,8 +533,8 @@ int32_t dmInitSyncClient(SDnode *pDnode) { rpcInit.startReadTimer = 1; rpcInit.readTimeout = tsReadTimeout; - if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) { - dError("failed to convert version string:%s to int", version); + if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) { + dError("failed to convert version string:%s to int", td_version); } pTrans->syncRpc = rpcOpen(&rpcInit); @@ -579,8 +588,8 @@ int32_t dmInitServer(SDnode *pDnode) { rpcInit.compressSize = tsCompressMsgSize; rpcInit.shareConnLimit = tsShareConnLimit * 16; - if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) { - dError("failed to convert version string:%s to int", version); + if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) { + dError("failed to convert version string:%s to int", td_version); } pTrans->serverRpc = rpcOpen(&rpcInit); diff --git a/source/dnode/mgmt/test/sut/src/client.cpp b/source/dnode/mgmt/test/sut/src/client.cpp index 95eea2359d0..6f8b1eb2b44 100644 --- a/source/dnode/mgmt/test/sut/src/client.cpp +++ b/source/dnode/mgmt/test/sut/src/client.cpp @@ -54,7 +54,7 @@ void TestClient::DoInit() { rpcInit.parent = this; // rpcInit.secret = (char*)secretEncrypt; // rpcInit.spi = 1; - taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + taosVersionStrToInt(td_version, &rpcInit.compatibilityVer); clientRpc = rpcOpen(&rpcInit); ASSERT(clientRpc); diff --git a/source/dnode/mnode/impl/CMakeLists.txt b/source/dnode/mnode/impl/CMakeLists.txt index 8a390948aeb..ad36d8c8aea 100644 --- a/source/dnode/mnode/impl/CMakeLists.txt +++ b/source/dnode/mnode/impl/CMakeLists.txt @@ -18,7 +18,7 @@ if(TD_ENTERPRISE) endif() if(${BUILD_WITH_ANALYSIS}) - add_definitions(-DUSE_ANAL) + add_definitions(-DUSE_ANALYTICS) endif() endif() diff --git a/source/dnode/mnode/impl/inc/mndArbGroup.h b/source/dnode/mnode/impl/inc/mndArbGroup.h index 779d64c7e2b..66ef3f766be 100644 --- a/source/dnode/mnode/impl/inc/mndArbGroup.h +++ b/source/dnode/mnode/impl/inc/mndArbGroup.h @@ -47,6 +47,15 @@ bool mndUpdateArbGroupBySetAssignedLeader(SArbGroup *pGroup, int32_t vgId, char int32_t mndGetArbGroupSize(SMnode *pMnode); +typedef enum { + CHECK_SYNC_NONE = 0, + CHECK_SYNC_SET_ASSIGNED_LEADER = 1, + CHECK_SYNC_CHECK_SYNC = 2, + CHECK_SYNC_UPDATE = 3 +} ECheckSyncOp; + +void mndArbCheckSync(SArbGroup *pArbGroup, int64_t nowMs, ECheckSyncOp *pOp, SArbGroup *pNewGroup); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/inc/mndDb.h b/source/dnode/mnode/impl/inc/mndDb.h index b72d1386c1b..fdb6b5a80b9 100644 --- a/source/dnode/mnode/impl/inc/mndDb.h +++ b/source/dnode/mnode/impl/inc/mndDb.h @@ -37,6 +37,7 @@ const char *mndGetDbStr(const char *src); const char *mndGetStableStr(const char *src); int32_t mndProcessCompactDbReq(SRpcMsg *pReq); +int32_t mndCheckDbDnodeList(SMnode *pMnode, char *db, char *dnodeListStr, SArray *dnodeList); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 742db8f450a..d2d9b2e8eb2 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -70,7 +70,7 @@ typedef enum { MND_OPER_WRITE_DB, MND_OPER_READ_DB, MND_OPER_READ_OR_WRITE_DB, - MND_OPER_SHOW_VARIBALES, + MND_OPER_SHOW_VARIABLES, MND_OPER_SUBSCRIBE, MND_OPER_CREATE_TOPIC, MND_OPER_DROP_TOPIC, diff --git a/source/dnode/mnode/impl/inc/mndStream.h b/source/dnode/mnode/impl/inc/mndStream.h index b97eaf31d1a..c9155f536c9 100644 --- a/source/dnode/mnode/impl/inc/mndStream.h +++ b/source/dnode/mnode/impl/inc/mndStream.h @@ -133,6 +133,7 @@ int32_t mndStreamSetUpdateEpsetAction(SMnode *pMnode, SStreamObj *pStream, SVgr int32_t mndGetStreamObj(SMnode *pMnode, int64_t streamId, SStreamObj** pStream); bool mndStreamNodeIsUpdated(SMnode *pMnode); +int32_t mndCheckForSnode(SMnode *pMnode, SDbObj *pSrcDb); int32_t extractNodeEpset(SMnode *pMnode, SEpSet *pEpSet, bool *hasEpset, int32_t taskId, int32_t nodeId); int32_t mndProcessStreamHb(SRpcMsg *pReq); diff --git a/source/dnode/mnode/impl/inc/mndVgroup.h b/source/dnode/mnode/impl/inc/mndVgroup.h index 682a51a6878..a8a806e497b 100644 --- a/source/dnode/mnode/impl/inc/mndVgroup.h +++ b/source/dnode/mnode/impl/inc/mndVgroup.h @@ -35,9 +35,9 @@ void mndSortVnodeGid(SVgObj *pVgroup); int64_t mndGetVnodesMemory(SMnode *pMnode, int32_t dnodeId); int64_t mndGetVgroupMemory(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup); -SArray *mndBuildDnodesArray(SMnode *, int32_t exceptDnodeId); +SArray *mndBuildDnodesArray(SMnode *, int32_t exceptDnodeId, SArray *dnodeList); int32_t mndAllocSmaVgroup(SMnode *, SDbObj *pDb, SVgObj *pVgroup); -int32_t mndAllocVgroup(SMnode *, SDbObj *pDb, SVgObj **ppVgroups); +int32_t mndAllocVgroup(SMnode *, SDbObj *pDb, SVgObj **ppVgroups, SArray *dnodeList); int32_t mndAddNewVgPrepareAction(SMnode *, STrans *pTrans, SVgObj *pVg); int32_t mndAddCreateVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid); int32_t mndAddAlterVnodeConfirmAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup); diff --git a/source/dnode/mnode/impl/src/mndAnode.c b/source/dnode/mnode/impl/src/mndAnode.c index 17e3e84c810..87bfe9f7afa 100644 --- a/source/dnode/mnode/impl/src/mndAnode.c +++ b/source/dnode/mnode/impl/src/mndAnode.c @@ -21,10 +21,10 @@ #include "mndShow.h" #include "mndTrans.h" #include "mndUser.h" -#include "tanal.h" +#include "tanalytics.h" #include "tjson.h" -#ifdef USE_ANAL +#ifdef USE_ANALYTICS #define TSDB_ANODE_VER_NUMBER 1 #define TSDB_ANODE_RESERVE_SIZE 64 @@ -806,7 +806,7 @@ static int32_t mndProcessAnalAlgoReq(SRpcMsg *pReq) { SSdb *pSdb = pMnode->pSdb; int32_t code = -1; SAnodeObj *pObj = NULL; - SAnalUrl url; + SAnalyticsUrl url; int32_t nameLen; char name[TSDB_ANAL_ALGO_KEY_LEN]; SRetrieveAnalAlgoReq req = {0}; @@ -838,7 +838,7 @@ static int32_t mndProcessAnalAlgoReq(SRpcMsg *pReq) { SAnodeAlgo *algo = taosArrayGet(algos, a); nameLen = 1 + tsnprintf(name, sizeof(name) - 1, "%d:%s", url.type, algo->name); - SAnalUrl *pOldUrl = taosHashAcquire(rsp.hash, name, nameLen); + SAnalyticsUrl *pOldUrl = taosHashAcquire(rsp.hash, name, nameLen); if (pOldUrl == NULL || (pOldUrl != NULL && pOldUrl->anode < url.anode)) { if (pOldUrl != NULL) { taosMemoryFreeClear(pOldUrl->url); @@ -855,7 +855,7 @@ static int32_t mndProcessAnalAlgoReq(SRpcMsg *pReq) { url.urlLen = 1 + tsnprintf(url.url, TSDB_ANAL_ANODE_URL_LEN + TSDB_ANAL_ALGO_TYPE_LEN, "%s/%s", pAnode->url, taosAnalAlgoUrlStr(url.type)); - if (taosHashPut(rsp.hash, name, nameLen, &url, sizeof(SAnalUrl)) != 0) { + if (taosHashPut(rsp.hash, name, nameLen, &url, sizeof(SAnalyticsUrl)) != 0) { taosMemoryFree(url.url); sdbRelease(pSdb, pAnode); goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndArbGroup.c b/source/dnode/mnode/impl/src/mndArbGroup.c index 97bf661bc35..0192044e67c 100644 --- a/source/dnode/mnode/impl/src/mndArbGroup.c +++ b/source/dnode/mnode/impl/src/mndArbGroup.c @@ -15,13 +15,10 @@ #define _DEFAULT_SOURCE #include "mndArbGroup.h" -#include "audit.h" #include "mndDb.h" #include "mndDnode.h" -#include "mndPrivilege.h" #include "mndShow.h" #include "mndTrans.h" -#include "mndUser.h" #include "mndVgroup.h" #define ARBGROUP_VER_NUMBER 1 @@ -245,14 +242,20 @@ static int32_t mndArbGroupActionUpdate(SSdb *pSdb, SArbGroup *pOld, SArbGroup *p } for (int i = 0; i < TSDB_ARB_GROUP_MEMBER_NUM; i++) { - (void)memcpy(pOld->members[i].state.token, pNew->members[i].state.token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(pOld->members[i].state.token, pNew->members[i].state.token, TSDB_ARB_TOKEN_SIZE); } pOld->isSync = pNew->isSync; pOld->assignedLeader.dnodeId = pNew->assignedLeader.dnodeId; - (void)memcpy(pOld->assignedLeader.token, pNew->assignedLeader.token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(pOld->assignedLeader.token, pNew->assignedLeader.token, TSDB_ARB_TOKEN_SIZE); pOld->assignedLeader.acked = pNew->assignedLeader.acked; pOld->version++; + mInfo( + "arbgroup:%d, perform update action. members[0].token:%s, members[1].token:%s, isSync:%d, as-dnodeid:%d, " + "as-token:%s, as-acked:%d, version:%" PRId64, + pOld->vgId, pOld->members[0].state.token, pOld->members[1].state.token, pOld->isSync, + pOld->assignedLeader.dnodeId, pOld->assignedLeader.token, pOld->assignedLeader.acked, pOld->version); + _OVER: (void)taosThreadMutexUnlock(&pOld->mutex); @@ -580,19 +583,77 @@ static int32_t mndSendArbSetAssignedLeaderReq(SMnode *pMnode, int32_t dnodeId, i return code; } +void mndArbCheckSync(SArbGroup *pArbGroup, int64_t nowMs, ECheckSyncOp *pOp, SArbGroup *pNewGroup) { + *pOp = CHECK_SYNC_NONE; + int32_t code = 0; + + int32_t vgId = pArbGroup->vgId; + + bool member0IsTimeout = mndCheckArbMemberHbTimeout(pArbGroup, 0, nowMs); + bool member1IsTimeout = mndCheckArbMemberHbTimeout(pArbGroup, 1, nowMs); + SArbAssignedLeader *pAssignedLeader = &pArbGroup->assignedLeader; + int32_t currentAssignedDnodeId = pAssignedLeader->dnodeId; + + // 1. has assigned && no response => send req + if (currentAssignedDnodeId != 0 && pAssignedLeader->acked == false) { + *pOp = CHECK_SYNC_SET_ASSIGNED_LEADER; + return; + } + + // 2. both of the two members are timeout => skip + if (member0IsTimeout && member1IsTimeout) { + return; + } + + // 3. no member is timeout => check sync + if (member0IsTimeout == false && member1IsTimeout == false) { + // no assigned leader and not sync + if (currentAssignedDnodeId == 0 && !pArbGroup->isSync) { + *pOp = CHECK_SYNC_CHECK_SYNC; + } + return; + } + + // 4. one of the members is timeout => set assigned leader + int32_t candidateIndex = member0IsTimeout ? 1 : 0; + SArbGroupMember *pMember = &pArbGroup->members[candidateIndex]; + + // has assigned leader and dnodeId not match => skip + if (currentAssignedDnodeId != 0 && currentAssignedDnodeId != pMember->info.dnodeId) { + mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, assigned leader has been set to dnodeId:%d", vgId, + pMember->info.dnodeId, currentAssignedDnodeId); + return; + } + + // not sync => skip + if (pArbGroup->isSync == false) { + if (currentAssignedDnodeId == pMember->info.dnodeId) { + mDebug("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId, + pMember->info.dnodeId); + } else { + mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId, + pMember->info.dnodeId); + } + return; + } + + // is sync && no assigned leader => write to sdb + mndArbGroupDupObj(pArbGroup, pNewGroup); + mndArbGroupSetAssignedLeader(pNewGroup, candidateIndex); + *pOp = CHECK_SYNC_UPDATE; +} + static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) { - int32_t code = 0; + int32_t code = 0, lino = 0; SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; SArbGroup *pArbGroup = NULL; - SArbGroup arbGroupDup = {0}; void *pIter = NULL; + SArray *pUpdateArray = NULL; char arbToken[TSDB_ARB_TOKEN_SIZE]; - if ((code = mndGetArbToken(pMnode, arbToken)) != 0) { - mError("failed to get arb token for arb-check-sync timer"); - TAOS_RETURN(code); - } + TAOS_CHECK_EXIT(mndGetArbToken(pMnode, arbToken)); + int64_t term = mndGetTerm(pMnode); if (term < 0) { mError("arb failed to get term since %s", terrstr()); @@ -609,88 +670,64 @@ static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) { return 0; } - SArray *pUpdateArray = taosArrayInit(16, sizeof(SArbGroup)); - while (1) { pIter = sdbFetch(pSdb, SDB_ARBGROUP, pIter, (void **)&pArbGroup); if (pIter == NULL) break; + SArbGroup arbGroupDup = {0}; + (void)taosThreadMutexLock(&pArbGroup->mutex); mndArbGroupDupObj(pArbGroup, &arbGroupDup); (void)taosThreadMutexUnlock(&pArbGroup->mutex); - int32_t vgId = arbGroupDup.vgId; - - bool member0IsTimeout = mndCheckArbMemberHbTimeout(&arbGroupDup, 0, nowMs); - bool member1IsTimeout = mndCheckArbMemberHbTimeout(&arbGroupDup, 1, nowMs); - SArbAssignedLeader *pAssignedLeader = &arbGroupDup.assignedLeader; - int32_t currentAssignedDnodeId = pAssignedLeader->dnodeId; - - // 1. has assigned && is sync && no response => send req - if (currentAssignedDnodeId != 0 && arbGroupDup.isSync == true && pAssignedLeader->acked == false) { - (void)mndSendArbSetAssignedLeaderReq(pMnode, currentAssignedDnodeId, vgId, arbToken, term, - pAssignedLeader->token); - mInfo("vgId:%d, arb send set assigned leader to dnodeId:%d", vgId, currentAssignedDnodeId); - sdbRelease(pSdb, pArbGroup); - continue; - } - - // 2. both of the two members are timeout => skip - if (member0IsTimeout && member1IsTimeout) { - sdbRelease(pSdb, pArbGroup); - continue; - } + sdbRelease(pSdb, pArbGroup); - // 3. no member is timeout => check sync - if (member0IsTimeout == false && member1IsTimeout == false) { - // no assigned leader and not sync - if (currentAssignedDnodeId == 0 && !arbGroupDup.isSync) { - (void)mndSendArbCheckSyncReq(pMnode, arbGroupDup.vgId, arbToken, term, arbGroupDup.members[0].state.token, + ECheckSyncOp op = CHECK_SYNC_NONE; + SArbGroup newGroup = {0}; + mndArbCheckSync(&arbGroupDup, nowMs, &op, &newGroup); + + int32_t vgId = arbGroupDup.vgId; + SArbAssignedLeader *pAssgndLeader = &arbGroupDup.assignedLeader; + int32_t assgndDnodeId = pAssgndLeader->dnodeId; + + switch (op) { + case CHECK_SYNC_NONE: + mTrace("vgId:%d, arb skip to send msg by check sync", vgId); + break; + case CHECK_SYNC_SET_ASSIGNED_LEADER: + (void)mndSendArbSetAssignedLeaderReq(pMnode, assgndDnodeId, vgId, arbToken, term, pAssgndLeader->token); + mInfo("vgId:%d, arb send set assigned leader to dnodeId:%d", vgId, assgndDnodeId); + break; + case CHECK_SYNC_CHECK_SYNC: + (void)mndSendArbCheckSyncReq(pMnode, vgId, arbToken, term, arbGroupDup.members[0].state.token, arbGroupDup.members[1].state.token); - } - sdbRelease(pSdb, pArbGroup); - continue; - } - - // 4. one of the members is timeout => set assigned leader - int32_t candidateIndex = member0IsTimeout ? 1 : 0; - SArbGroupMember *pMember = &arbGroupDup.members[candidateIndex]; - - // has assigned leader and dnodeId not match => skip - if (currentAssignedDnodeId != 0 && currentAssignedDnodeId != pMember->info.dnodeId) { - mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, assigned leader has been set to dnodeId:%d", vgId, - pMember->info.dnodeId, currentAssignedDnodeId); - sdbRelease(pSdb, pArbGroup); - continue; - } + mInfo("vgId:%d, arb send check sync request", vgId); + break; + case CHECK_SYNC_UPDATE: + if (!pUpdateArray) { + pUpdateArray = taosArrayInit(16, sizeof(SArbGroup)); + if (!pUpdateArray) { + TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY); + } + } - // not sync => skip - if (arbGroupDup.isSync == false) { - if (currentAssignedDnodeId == pMember->info.dnodeId) { - mDebug("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId, - pMember->info.dnodeId); - } else { - mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId, - pMember->info.dnodeId); - } - sdbRelease(pSdb, pArbGroup); - continue; + if (taosArrayPush(pUpdateArray, &newGroup) == NULL) { + TAOS_CHECK_EXIT(terrno); + } + break; + default: + mError("vgId:%d, arb unknown check sync op:%d", vgId, op); + break; } + } - // is sync && no assigned leader => write to sdb - SArbGroup newGroup = {0}; - mndArbGroupDupObj(&arbGroupDup, &newGroup); - mndArbGroupSetAssignedLeader(&newGroup, candidateIndex); - if (taosArrayPush(pUpdateArray, &newGroup) == NULL) { - taosArrayDestroy(pUpdateArray); - return terrno; - } + TAOS_CHECK_EXIT(mndPullupArbUpdateGroupBatch(pMnode, pUpdateArray)); - sdbRelease(pSdb, pArbGroup); +_exit: + if (code != 0) { + mError("failed to check sync at line %d since %s", lino, terrstr()); } - TAOS_CHECK_RETURN(mndPullupArbUpdateGroupBatch(pMnode, pUpdateArray)); - taosArrayDestroy(pUpdateArray); return 0; } @@ -834,12 +871,12 @@ static int32_t mndProcessArbUpdateGroupBatchReq(SRpcMsg *pReq) { newGroup.dbUid = pUpdateGroup->dbUid; for (int i = 0; i < TSDB_ARB_GROUP_MEMBER_NUM; i++) { newGroup.members[i].info.dnodeId = pUpdateGroup->members[i].dnodeId; - (void)memcpy(newGroup.members[i].state.token, pUpdateGroup->members[i].token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(newGroup.members[i].state.token, pUpdateGroup->members[i].token, TSDB_ARB_TOKEN_SIZE); } newGroup.isSync = pUpdateGroup->isSync; newGroup.assignedLeader.dnodeId = pUpdateGroup->assignedLeader.dnodeId; - (void)memcpy(newGroup.assignedLeader.token, pUpdateGroup->assignedLeader.token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(newGroup.assignedLeader.token, pUpdateGroup->assignedLeader.token, TSDB_ARB_TOKEN_SIZE); newGroup.assignedLeader.acked = pUpdateGroup->assignedLeader.acked; newGroup.version = pUpdateGroup->version; @@ -897,7 +934,7 @@ static void mndArbGroupSetAssignedLeader(SArbGroup *pGroup, int32_t index) { SArbGroupMember *pMember = &pGroup->members[index]; pGroup->assignedLeader.dnodeId = pMember->info.dnodeId; - (void)strncpy(pGroup->assignedLeader.token, pMember->state.token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(pGroup->assignedLeader.token, pMember->state.token, TSDB_ARB_TOKEN_SIZE); pGroup->assignedLeader.acked = false; } @@ -979,7 +1016,7 @@ bool mndUpdateArbGroupByHeartBeat(SArbGroup *pGroup, SVArbHbRspMember *pRspMembe // update token mndArbGroupDupObj(pGroup, pNewGroup); - (void)memcpy(pNewGroup->members[index].state.token, pRspMember->memberToken, TSDB_ARB_TOKEN_SIZE); + tstrncpy(pNewGroup->members[index].state.token, pRspMember->memberToken, TSDB_ARB_TOKEN_SIZE); pNewGroup->isSync = false; bool resetAssigned = false; diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index b12345d45cf..fbce8f544ae 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -239,12 +239,13 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { MND_TMQ_RETURN_CHECK(mndAcquireConsumer(pMnode, consumerId, &pConsumer)); MND_TMQ_RETURN_CHECK(checkPrivilege(pMnode, pConsumer, &rsp, pMsg->info.conn.user)); atomic_store_32(&pConsumer->hbStatus, 0); + mDebug("consumer:0x%" PRIx64 " receive hb pollFlag:%d %d", consumerId, req.pollFlag, pConsumer->pollStatus); if (req.pollFlag == 1){ atomic_store_32(&pConsumer->pollStatus, 0); } storeOffsetRows(pMnode, &req, pConsumer); - rsp.debugFlag = tqClientDebug; + rsp.debugFlag = tqClientDebugFlag; code = buildMqHbRsp(pMsg, &rsp); END: diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 7c42564f4c4..0d17ccd0b08 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -462,8 +462,8 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) { if (pCfg->cacheLast < TSDB_CACHE_MODEL_NONE || pCfg->cacheLast > TSDB_CACHE_MODEL_BOTH) return code; if (pCfg->hashMethod != 1) return code; if (pCfg->replications > mndGetDnodeSize(pMnode)) { - terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES; - return code; + code = TSDB_CODE_MND_NO_ENOUGH_DNODES; + TAOS_RETURN(code); } if (pCfg->walRetentionPeriod < TSDB_DB_MIN_WAL_RETENTION_PERIOD) return code; if (pCfg->walRetentionSize < TSDB_DB_MIN_WAL_RETENTION_SIZE) return code; @@ -583,7 +583,7 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) { if (pCfg->tsdbPageSize <= 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE; if (pCfg->s3ChunkSize <= 0) pCfg->s3ChunkSize = TSDB_DEFAULT_S3_CHUNK_SIZE; if (pCfg->s3KeepLocal <= 0) pCfg->s3KeepLocal = TSDB_DEFAULT_S3_KEEP_LOCAL; - if (pCfg->s3Compact <= 0) pCfg->s3Compact = TSDB_DEFAULT_S3_COMPACT; + if (pCfg->s3Compact < 0) pCfg->s3Compact = TSDB_DEFAULT_S3_COMPACT; if (pCfg->withArbitrator < 0) pCfg->withArbitrator = TSDB_DEFAULT_DB_WITH_ARBITRATOR; if (pCfg->encryptAlgorithm < 0) pCfg->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO; } @@ -746,7 +746,7 @@ static int32_t mndSetCreateDbUndoActions(SMnode *pMnode, STrans *pTrans, SDbObj TAOS_RETURN(code); } -static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, SUserObj *pUser) { +static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, SUserObj *pUser, SArray *dnodeList) { int32_t code = 0; SUserObj newUserObj = {0}; SDbObj dbObj = {0}; @@ -823,7 +823,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, } SVgObj *pVgroups = NULL; - if ((code = mndAllocVgroup(pMnode, &dbObj, &pVgroups)) != 0) { + if ((code = mndAllocVgroup(pMnode, &dbObj, &pVgroups, dnodeList)) != 0) { mError("db:%s, failed to create, alloc vgroup failed, since %s", pCreate->db, terrstr()); TAOS_RETURN(code); } @@ -925,6 +925,17 @@ static int32_t mndCheckDbEncryptKey(SMnode *pMnode, SCreateDbReq *pReq) { TAOS_RETURN(code); } +#ifndef TD_ENTERPRISE +int32_t mndCheckDbDnodeList(SMnode *pMnode, char *db, char *dnodeListStr, SArray *dnodeList) { + if (dnodeListStr[0] != 0) { + terrno = TSDB_CODE_OPS_NOT_SUPPORT; + return terrno; + } else { + return 0; + } +} +#endif + static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; int32_t code = -1; @@ -932,6 +943,10 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { SDbObj *pDb = NULL; SUserObj *pUser = NULL; SCreateDbReq createReq = {0}; + SArray *dnodeList = NULL; + + dnodeList = taosArrayInit(mndGetDnodeSize(pMnode), sizeof(int32_t)); + TSDB_CHECK_NULL(dnodeList, code, lino, _OVER, TSDB_CODE_OUT_OF_MEMORY); TAOS_CHECK_GOTO(tDeserializeSCreateDbReq(pReq->pCont, pReq->contLen, &createReq), NULL, _OVER); #ifdef WINDOWS @@ -975,9 +990,11 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { TAOS_CHECK_GOTO(mndCheckDbEncryptKey(pMnode, &createReq), &lino, _OVER); + TAOS_CHECK_GOTO(mndCheckDbDnodeList(pMnode, createReq.db, createReq.dnodeListStr, dnodeList), &lino, _OVER); + TAOS_CHECK_GOTO(mndAcquireUser(pMnode, pReq->info.conn.user, &pUser), &lino, _OVER); - TAOS_CHECK_GOTO(mndCreateDb(pMnode, pReq, &createReq, pUser), &lino, _OVER); + TAOS_CHECK_GOTO(mndCreateDb(pMnode, pReq, &createReq, pUser, dnodeList), &lino, _OVER); if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; SName name = {0}; @@ -994,6 +1011,7 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { mndReleaseDb(pMnode, pDb); mndReleaseUser(pMnode, pUser); tFreeSCreateDbReq(&createReq); + taosArrayDestroy(dnodeList); TAOS_RETURN(code); } @@ -1168,7 +1186,9 @@ static int32_t mndSetAlterDbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj * SSdb *pSdb = pMnode->pSdb; void *pIter = NULL; SVgObj *pVgroup = NULL; - SArray *pArray = mndBuildDnodesArray(pMnode, 0); + SArray *pArray = mndBuildDnodesArray(pMnode, 0, NULL); + + TSDB_CHECK_NULL(pArray, code, lino, _err, TSDB_CODE_OUT_OF_MEMORY); while (1) { pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 5e10583a0a6..24ae8382f93 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -86,6 +86,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq); static int32_t mndProcessNotifyReq(SRpcMsg *pReq); static int32_t mndProcessRestoreDnodeReq(SRpcMsg *pReq); static int32_t mndProcessStatisReq(SRpcMsg *pReq); +static int32_t mndProcessAuditReq(SRpcMsg *pReq); static int32_t mndProcessUpdateDnodeInfoReq(SRpcMsg *pReq); static int32_t mndProcessCreateEncryptKeyReq(SRpcMsg *pRsp); static int32_t mndProcessCreateEncryptKeyRsp(SRpcMsg *pRsp); @@ -125,6 +126,7 @@ int32_t mndInitDnode(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_SHOW_VARIABLES, mndProcessShowVariablesReq); mndSetMsgHandle(pMnode, TDMT_MND_RESTORE_DNODE, mndProcessRestoreDnodeReq); mndSetMsgHandle(pMnode, TDMT_MND_STATIS, mndProcessStatisReq); + mndSetMsgHandle(pMnode, TDMT_MND_AUDIT, mndProcessAuditReq); mndSetMsgHandle(pMnode, TDMT_MND_CREATE_ENCRYPT_KEY, mndProcessCreateEncryptKeyReq); mndSetMsgHandle(pMnode, TDMT_DND_CREATE_ENCRYPT_KEY_RSP, mndProcessCreateEncryptKeyRsp); mndSetMsgHandle(pMnode, TDMT_MND_UPDATE_DNODE_INFO, mndProcessUpdateDnodeInfoReq); @@ -604,6 +606,24 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) { return 0; } +static int32_t mndProcessAuditReq(SRpcMsg *pReq) { + mTrace("process audit req:%p", pReq); + if (tsEnableAudit && tsEnableAuditDelete) { + SMnode *pMnode = pReq->info.node; + SAuditReq auditReq = {0}; + + TAOS_CHECK_RETURN(tDeserializeSAuditReq(pReq->pCont, pReq->contLen, &auditReq)); + + mDebug("received audit req:%s, %s, %s, %s", auditReq.operation, auditReq.db, auditReq.table, auditReq.pSql); + + auditAddRecord(pReq, pMnode->clusterId, auditReq.operation, auditReq.db, auditReq.table, auditReq.pSql, + auditReq.sqlLen); + + tFreeSAuditReq(&auditReq); + } + return 0; +} + static int32_t mndUpdateDnodeObj(SMnode *pMnode, SDnodeObj *pDnode) { int32_t code = 0, lino = 0; SDnodeInfoReq infoReq = {0}; @@ -1068,7 +1088,7 @@ static int32_t mndProcessShowVariablesReq(SRpcMsg *pReq) { SShowVariablesRsp rsp = {0}; int32_t code = -1; - if (mndCheckOperPrivilege(pReq->info.node, pReq->info.conn.user, MND_OPER_SHOW_VARIBALES) != 0) { + if (mndCheckOperPrivilege(pReq->info.node, pReq->info.conn.user, MND_OPER_SHOW_VARIABLES) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 08ebf52ec62..6c30193ea76 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -1021,7 +1021,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr } // cluster info - tstrncpy(pClusterInfo->version, version, sizeof(pClusterInfo->version)); + tstrncpy(pClusterInfo->version, td_version, sizeof(pClusterInfo->version)); pClusterInfo->monitor_interval = tsMonitorInterval; pClusterInfo->connections_total = mndGetNumOfConnections(pMnode); pClusterInfo->dbs_total = sdbGetSize(pSdb, SDB_DB); diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index a1ffee9b06a..21aba8df100 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -239,8 +239,8 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { goto _OVER; } - if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, version, 3)) != 0) { - mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, version); + if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, td_version, 3)) != 0) { + mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, td_version); goto _OVER; } @@ -305,12 +305,13 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { connectRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen; connectRsp.monitorParas.tsSlowLogThreshold = tsSlowLogThreshold; connectRsp.monitorParas.tsSlowLogThresholdTest = tsSlowLogThresholdTest; + connectRsp.enableAuditDelete = tsEnableAuditDelete; tstrncpy(connectRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN); connectRsp.whiteListVer = pUser->ipWhiteListVer; - (void)strcpy(connectRsp.sVer, version); - (void)snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", version, - buildinfo, gitinfo); + tstrncpy(connectRsp.sVer, td_version, sizeof(connectRsp.sVer)); + (void)snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", td_version, + td_buildinfo, td_gitinfo); mndGetMnodeEpSet(pMnode, &connectRsp.epSet); int32_t contLen = tSerializeSConnectRsp(NULL, 0, &connectRsp); @@ -709,6 +710,7 @@ static int32_t mndProcessHeartBeatReq(SRpcMsg *pReq) { tstrncpy(batchRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN); batchRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen; batchRsp.monitorParas.tsSlowLogScope = tsSlowLogScope; + batchRsp.enableAuditDelete = tsEnableAuditDelete; int32_t sz = taosArrayGetSize(batchReq.reqs); for (int i = 0; i < sz; i++) { @@ -813,7 +815,7 @@ static int32_t mndProcessSvrVerReq(SRpcMsg *pReq) { int32_t code = 0; int32_t lino = 0; SServerVerRsp rsp = {0}; - tstrncpy(rsp.ver, version, sizeof(rsp.ver)); + tstrncpy(rsp.ver, td_version, sizeof(rsp.ver)); int32_t contLen = tSerializeSServerVerRsp(NULL, 0, &rsp); if (contLen < 0) { diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 4f72b26a5ef..e67e4a963bd 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -242,13 +242,13 @@ static int32_t doAddSinkTask(SStreamObj* pStream, SMnode* pMnode, SVgObj* pVgrou SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks); SStreamTask* pTask = NULL; - int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, pEpset, isFillhistory, 0, *pTaskList, pStream->conf.fillHistory, + int32_t code = tNewStreamTask(uid, TASK_LEVEL__SINK, pEpset, isFillhistory, 0, 0, *pTaskList, pStream->conf.fillHistory, pStream->subTableWithoutMd5, &pTask); if (code != 0) { return code; } - mDebug("doAddSinkTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory); + mDebug("doAddSinkTask taskId:%s, %p vgId:%d, isFillHistory:%d", pTask->id.idStr, pTask, pVgroup->vgId, isFillhistory); pTask->info.nodeId = pVgroup->vgId; pTask->info.epSet = mndGetVgroupEpset(pMnode, pVgroup); @@ -356,20 +356,22 @@ static int32_t buildSourceTask(SStreamObj* pStream, SEpSet* pEpset, bool isFillh uint64_t uid = (isFillhistory) ? pStream->hTaskUid : pStream->uid; SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks); - int32_t code = tNewStreamTask(uid, TASK_LEVEL__SOURCE, pEpset, isFillhistory, useTriggerParam ? pStream->conf.triggerParam : 0, - *pTaskList, pStream->conf.fillHistory, pStream->subTableWithoutMd5, pTask); + int32_t code = tNewStreamTask(uid, TASK_LEVEL__SOURCE, pEpset, isFillhistory, pStream->conf.trigger, + useTriggerParam ? pStream->conf.triggerParam : 0, *pTaskList, pStream->conf.fillHistory, + pStream->subTableWithoutMd5, pTask); return code; } static void addNewTaskList(SStreamObj* pStream) { SArray* pTaskList = taosArrayInit(0, POINTER_BYTES); if (taosArrayPush(pStream->tasks, &pTaskList) == NULL) { - mError("failed to put array"); + mError("failed to put into array"); } + if (pStream->conf.fillHistory) { pTaskList = taosArrayInit(0, POINTER_BYTES); if (taosArrayPush(pStream->pHTasksList, &pTaskList) == NULL) { - mError("failed to put array"); + mError("failed to put into array"); } } } @@ -395,17 +397,18 @@ static void setHTasksId(SStreamObj* pStream) { } static int32_t doAddSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream, SEpSet* pEpset, int64_t skey, - SArray* pVerList, SVgObj* pVgroup, bool isFillhistory, bool useTriggerParam) { + SArray* pVerList, SVgObj* pVgroup, bool isHistoryTask, bool useTriggerParam) { SStreamTask* pTask = NULL; - int32_t code = buildSourceTask(pStream, pEpset, isFillhistory, useTriggerParam, &pTask); + int32_t code = buildSourceTask(pStream, pEpset, isHistoryTask, useTriggerParam, &pTask); if (code != TSDB_CODE_SUCCESS) { return code; } - mDebug("doAddSourceTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory); + mDebug("doAddSourceTask taskId:%s, %p vgId:%d, isFillHistory:%d", pTask->id.idStr, pTask, pVgroup->vgId, + isHistoryTask); if (pStream->conf.fillHistory) { - haltInitialTaskStatus(pTask, plan, isFillhistory); + haltInitialTaskStatus(pTask, plan, isHistoryTask); } streamTaskSetDataRange(pTask, skey, pVerList, pVgroup->vgId); @@ -451,10 +454,12 @@ static SSubplan* getAggSubPlan(const SQueryPlan* pPlan, int index) { static int32_t addSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream, SEpSet* pEpset, int64_t nextWindowSkey, SArray* pVerList, bool useTriggerParam) { + void* pIter = NULL; + int32_t code = 0; + SSdb* pSdb = pMnode->pSdb; + addNewTaskList(pStream); - void* pIter = NULL; - SSdb* pSdb = pMnode->pSdb; while (1) { SVgObj* pVgroup; pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup); @@ -467,10 +472,9 @@ static int32_t addSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream continue; } - int code = - doAddSourceTask(pMnode, plan, pStream, pEpset, nextWindowSkey, pVerList, pVgroup, false, useTriggerParam); + code = doAddSourceTask(pMnode, plan, pStream, pEpset, nextWindowSkey, pVerList, pVgroup, false, useTriggerParam); if (code != 0) { - mError("create stream task, code:%s", tstrerror(code)); + mError("failed to create stream task, code:%s", tstrerror(code)); // todo drop the added source tasks. sdbRelease(pSdb, pVgroup); @@ -502,9 +506,9 @@ static int32_t buildAggTask(SStreamObj* pStream, SEpSet* pEpset, bool isFillhist uint64_t uid = (isFillhistory) ? pStream->hTaskUid : pStream->uid; SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks); - int32_t code = - tNewStreamTask(uid, TASK_LEVEL__AGG, pEpset, isFillhistory, useTriggerParam ? pStream->conf.triggerParam : 0, - *pTaskList, pStream->conf.fillHistory, pStream->subTableWithoutMd5, pAggTask); + int32_t code = tNewStreamTask(uid, TASK_LEVEL__AGG, pEpset, isFillhistory, pStream->conf.trigger, + useTriggerParam ? pStream->conf.triggerParam : 0, *pTaskList, pStream->conf.fillHistory, + pStream->subTableWithoutMd5, pAggTask); return code; } @@ -512,19 +516,20 @@ static int32_t doAddAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan, SSnodeObj* pSnode, bool isFillhistory, bool useTriggerParam) { int32_t code = 0; SStreamTask* pTask = NULL; + const char* id = NULL; code = buildAggTask(pStream, pEpset, isFillhistory, useTriggerParam, &pTask); if (code != TSDB_CODE_SUCCESS) { return code; } + id = pTask->id.idStr; if (pSnode != NULL) { code = mndAssignStreamTaskToSnode(pMnode, pTask, plan, pSnode); - mDebug("doAddAggTask taskId:%s, snode id:%d, isFillHistory:%d", pTask->id.idStr, pSnode->id, isFillhistory); - + mDebug("doAddAggTask taskId:%s, %p snode id:%d, isFillHistory:%d", id, pTask, pSnode->id, isFillhistory); } else { code = mndAssignStreamTaskToVgroup(pMnode, pTask, plan, pVgroup); - mDebug("doAddAggTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory); + mDebug("doAddAggTask taskId:%s, %p vgId:%d, isFillHistory:%d", id, pTask, pVgroup->vgId, isFillhistory); } return code; } @@ -678,7 +683,7 @@ static int32_t doScheduleStream(SStreamObj* pStream, SMnode* pMnode, SQueryPlan* if (numOfPlanLevel > 1 || externalTargetDB || multiTarget || pStream->fixedSinkVgId) { // add extra sink hasExtraSink = true; - int32_t code = addSinkTask(pMnode, pStream, pEpset); + code = addSinkTask(pMnode, pStream, pEpset); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -692,7 +697,8 @@ static int32_t doScheduleStream(SStreamObj* pStream, SMnode* pMnode, SQueryPlan* if (terrno != 0) code = terrno; TAOS_RETURN(code); } - code = addSourceTask(pMnode, plan, pStream, pEpset, skey, pVerList, numOfPlanLevel == 1); + + code = addSourceTask(pMnode, plan, pStream, pEpset, skey, pVerList, (numOfPlanLevel == 1)); if (code != TSDB_CODE_SUCCESS) { return code; } diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 264fea3476b..29f6c32dbe6 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -333,8 +333,8 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) { mDebug("show:0x%" PRIx64 ", stop retrieve data, rowsRead:%d numOfRows:%d", pShow->id, rowsRead, pShow->numOfRows); } - size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pShow->pMeta->numOfColumns + - blockDataGetSize(pBlock) + blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)); + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pShow->pMeta->numOfColumns + dataEncodeBufSize; SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size); if (pRsp == NULL) { @@ -361,7 +361,7 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) { pStart += sizeof(SSysTableSchema); } - int32_t len = blockEncode(pBlock, pStart, pShow->pMeta->numOfColumns); + int32_t len = blockEncode(pBlock, pStart, dataEncodeBufSize, pShow->pMeta->numOfColumns); if(len < 0){ mError("show:0x%" PRIx64 ", failed to retrieve data since %s", pShow->id, tstrerror(code)); code = terrno; diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 23e91e8d63f..cded2b821f4 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -1931,7 +1931,7 @@ static int32_t mndDropSuperTableColumn(SMnode *pMnode, const SStbObj *pOld, SStb } if (pOld->numOfColumns == 2) { - code = TSDB_CODE_MND_INVALID_STB_ALTER_OPTION; + code = TSDB_CODE_PAR_INVALID_DROP_COL; TAOS_RETURN(code); } @@ -4079,7 +4079,7 @@ typedef struct SMDropTbDbInfo { typedef struct SMDropTbTsmaInfo { char tsmaResTbDbFName[TSDB_DB_FNAME_LEN]; - char tsmaResTbNamePrefix[TSDB_TABLE_NAME_LEN]; + char tsmaResTbNamePrefix[TSDB_TABLE_FNAME_LEN]; int32_t suid; SMDropTbDbInfo dbInfo; // reference to DbInfo in pDbMap } SMDropTbTsmaInfo; @@ -4210,6 +4210,7 @@ static int32_t mndCreateDropTbsTxnPrepare(SRpcMsg *pRsp, SMndDropTbsWithTsmaCtx SMnode *pMnode = pRsp->info.node; STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pRsp, "drop-tbs"); mndTransSetChangeless(pTrans); + mndTransSetSerial(pTrans); if (pTrans == NULL) { code = TSDB_CODE_MND_RETURN_VALUE_NULL; if (terrno != 0) code = terrno; @@ -4297,6 +4298,18 @@ static int32_t mndDropTbAdd(SMnode *pMnode, SHashObj *pVgHashMap, const SVgroupI return 0; } +int vgInfoCmp(const void *lp, const void *rp) { + SVgroupInfo *pLeft = (SVgroupInfo *)lp; + SVgroupInfo *pRight = (SVgroupInfo *)rp; + if (pLeft->hashBegin < pRight->hashBegin) { + return -1; + } else if (pLeft->hashBegin > pRight->hashBegin) { + return 1; + } + + return 0; +} + static int32_t mndGetDbVgInfoForTsma(SMnode *pMnode, const char *dbname, SMDropTbTsmaInfo *pInfo) { int32_t code = 0; SDbObj *pDb = mndAcquireDb(pMnode, dbname); @@ -4311,6 +4324,7 @@ static int32_t mndGetDbVgInfoForTsma(SMnode *pMnode, const char *dbname, SMDropT goto _end; } mndBuildDBVgroupInfo(pDb, pMnode, pInfo->dbInfo.dbVgInfos); + taosArraySort(pInfo->dbInfo.dbVgInfos, vgInfoCmp); pInfo->dbInfo.hashPrefix = pDb->cfg.hashPrefix; pInfo->dbInfo.hashSuffix = pDb->cfg.hashSuffix; @@ -4383,9 +4397,8 @@ static int32_t mndDropTbAddTsmaResTbsForSingleVg(SMnode *pMnode, SMndDropTbsWith if (pInfos) { SMDropTbTsmaInfo info = {0}; int32_t len = sprintf(buf, "%s", pSma->name); - len = taosCreateMD5Hash(buf, len); sprintf(info.tsmaResTbDbFName, "%s", pSma->db); - snprintf(info.tsmaResTbNamePrefix, TSDB_TABLE_NAME_LEN, "%s", buf); + snprintf(info.tsmaResTbNamePrefix, TSDB_TABLE_FNAME_LEN, "%s", buf); SMDropTbDbInfo *pDbInfo = taosHashGet(pCtx->pDbMap, pSma->db, TSDB_DB_FNAME_LEN); info.suid = pSma->dstTbUid; if (!pDbInfo) { @@ -4420,14 +4433,17 @@ static int32_t mndDropTbAddTsmaResTbsForSingleVg(SMnode *pMnode, SMndDropTbsWith SMDropTbTsmaInfos *pInfos = taosHashGet(pCtx->pTsmaMap, &pTb->suid, sizeof(pTb->suid)); SArray *pVgInfos = NULL; - char buf[TSDB_TABLE_FNAME_LEN]; + char buf[TSDB_TABLE_FNAME_LEN + TSDB_TABLE_NAME_LEN + 1]; + char resTbFullName[TSDB_TABLE_FNAME_LEN + 1] = {0}; for (int32_t j = 0; j < pInfos->pTsmaInfos->size; ++j) { SMDropTbTsmaInfo *pInfo = taosArrayGet(pInfos->pTsmaInfos, j); - int32_t len = sprintf(buf, "%s.%s_%s", pInfo->tsmaResTbDbFName, pInfo->tsmaResTbNamePrefix, pTb->name); - uint32_t hashVal = - taosGetTbHashVal(buf, len, pInfo->dbInfo.hashMethod, pInfo->dbInfo.hashPrefix, pInfo->dbInfo.hashSuffix); + int32_t len = sprintf(buf, "%s_%s", pInfo->tsmaResTbNamePrefix, pTb->name); + len = taosCreateMD5Hash(buf, len); + len = snprintf(resTbFullName, TSDB_TABLE_FNAME_LEN + 1, "%s.%s", pInfo->tsmaResTbDbFName, buf); + uint32_t hashVal = taosGetTbHashVal(resTbFullName, len, pInfo->dbInfo.hashMethod, pInfo->dbInfo.hashPrefix, + pInfo->dbInfo.hashSuffix); const SVgroupInfo *pVgInfo = taosArraySearch(pInfo->dbInfo.dbVgInfos, &hashVal, vgHashValCmp, TD_EQ); - void *p = taosStrdup(buf + strlen(pInfo->tsmaResTbDbFName) + TSDB_NAME_DELIMITER_LEN); + void *p = taosStrdup(resTbFullName + strlen(pInfo->tsmaResTbDbFName) + TSDB_NAME_DELIMITER_LEN); if (taosArrayPush(pCtx->pResTbNames, &p) == NULL) { code = terrno; goto _end; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index a4327b777fc..81db427afda 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -454,17 +454,16 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, pObj->outputSchema.pSchema = pFullSchema; } - bool hasKey = hasDestPrimaryKey(&pObj->outputSchema); SPlanContext cxt = { .pAstRoot = pAst, .topicQuery = false, .streamQuery = true, - .triggerType = pObj->conf.trigger == STREAM_TRIGGER_MAX_DELAY ? STREAM_TRIGGER_WINDOW_CLOSE : pObj->conf.trigger, + .triggerType = (pObj->conf.trigger == STREAM_TRIGGER_MAX_DELAY)? STREAM_TRIGGER_WINDOW_CLOSE : pObj->conf.trigger, .watermark = pObj->conf.watermark, .igExpired = pObj->conf.igExpired, .deleteMark = pObj->deleteMark, .igCheckUpdate = pObj->igCheckUpdate, - .destHasPrimaryKey = hasKey, + .destHasPrimaryKey = hasDestPrimaryKey(&pObj->outputSchema), }; // using ast and param to build physical plan @@ -795,12 +794,22 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { } if (createReq.sql != NULL) { - sqlLen = strlen(createReq.sql); - sql = taosMemoryMalloc(sqlLen + 1); + sql = taosStrdup(createReq.sql); TSDB_CHECK_NULL(sql, code, lino, _OVER, terrno); + } + + SDbObj *pSourceDb = mndAcquireDb(pMnode, createReq.sourceDB); + if (pSourceDb == NULL) { + code = terrno; + mInfo("stream:%s failed to create, acquire source db %s failed, code:%s", createReq.name, createReq.sourceDB, + tstrerror(code)); + goto _OVER; + } - memset(sql, 0, sqlLen + 1); - memcpy(sql, createReq.sql, sqlLen); + code = mndCheckForSnode(pMnode, pSourceDb); + mndReleaseDb(pMnode, pSourceDb); + if (code != 0) { + goto _OVER; } // build stream obj from request @@ -1284,9 +1293,10 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { void* p = taosArrayPush(pList, &in); if (p) { int32_t currentSize = taosArrayGetSize(pList); - mDebug("stream:%s (uid:0x%" PRIx64 ") checkpoint interval beyond threshold: %ds(%" PRId64 - "s) beyond concurrently launch threshold:%d", - pStream->name, pStream->uid, tsStreamCheckpointInterval, duration / 1000, currentSize); + mDebug("stream:%s (uid:0x%" PRIx64 ") total %d stream(s) beyond chpt interval threshold: %ds(%" PRId64 + "s), concurrently launch threshold:%d", + pStream->name, pStream->uid, currentSize, tsStreamCheckpointInterval, duration / 1000, + tsMaxConcurrentCheckpoint); } else { mError("failed to record the checkpoint interval info, stream:0x%" PRIx64, pStream->uid); } @@ -1338,7 +1348,7 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { code = mndProcessStreamCheckpointTrans(pMnode, p, checkpointId, 1, true); sdbRelease(pSdb, p); - if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { + if (code == 0 || code == TSDB_CODE_ACTION_IN_PROGRESS) { started += 1; if (started >= capacity) { @@ -1346,6 +1356,8 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { (started + numOfCheckpointTrans)); break; } + } else { + mError("failed to start checkpoint trans, code:%s", tstrerror(code)); } } } diff --git a/source/dnode/mnode/impl/src/mndStreamUtil.c b/source/dnode/mnode/impl/src/mndStreamUtil.c index 6e48c58b30f..f9b7644af48 100644 --- a/source/dnode/mnode/impl/src/mndStreamUtil.c +++ b/source/dnode/mnode/impl/src/mndStreamUtil.c @@ -877,6 +877,8 @@ static void mndShowStreamTrigger(char *dst, SStreamObj *pStream) { strcpy(dst, "window close"); } else if (trigger == STREAM_TRIGGER_MAX_DELAY) { strcpy(dst, "max delay"); + } else if (trigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + strcpy(dst, "force window close"); } } @@ -1497,6 +1499,30 @@ bool mndStreamNodeIsUpdated(SMnode *pMnode) { return updated; } +int32_t mndCheckForSnode(SMnode *pMnode, SDbObj *pSrcDb) { + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + SSnodeObj *pObj = NULL; + + if (pSrcDb->cfg.replications == 1) { + return TSDB_CODE_SUCCESS; + } else { + while (1) { + pIter = sdbFetch(pSdb, SDB_SNODE, pIter, (void **)&pObj); + if (pIter == NULL) { + break; + } + + sdbRelease(pSdb, pObj); + sdbCancelFetch(pSdb, pIter); + return TSDB_CODE_SUCCESS; + } + + mError("snode not existed when trying to create stream in db with multiple replica"); + return TSDB_CODE_SNODE_NOT_DEPLOYED; + } +} + uint32_t seed = 0; static SRpcMsg createRpcMsg(STransAction* pAction, int64_t traceId, int64_t signature) { SRpcMsg rpcMsg = {.msgType = pAction->msgType, .contLen = pAction->contLen, .info.ahandle = (void *)signature}; diff --git a/source/dnode/mnode/impl/src/mndTelem.c b/source/dnode/mnode/impl/src/mndTelem.c index 0022aee6198..810c71b7c58 100644 --- a/source/dnode/mnode/impl/src/mndTelem.c +++ b/source/dnode/mnode/impl/src/mndTelem.c @@ -115,9 +115,9 @@ static char* mndBuildTelemetryReport(SMnode* pMnode) { snprintf(tmp, sizeof(tmp), "%" PRId64 " kB", tsTotalMemoryKB); TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), &lino, _OVER); - TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", version), &lino, _OVER); - TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", buildinfo), &lino, _OVER); - TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", gitinfo), &lino, _OVER); + TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", td_version), &lino, _OVER); + TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", td_buildinfo), &lino, _OVER); + TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", td_gitinfo), &lino, _OVER); TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "email", pMgmt->email), &lino, _OVER); mndBuildRuntimeInfo(pMnode, pJson); diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index 5a79ac6bc8d..913e6e32958 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -717,11 +717,28 @@ static bool mndBuildDnodesArrayFp(SMnode *pMnode, void *pObj, void *p1, void *p2 SDnodeObj *pDnode = pObj; SArray *pArray = p1; int32_t exceptDnodeId = *(int32_t *)p2; + SArray *dnodeList = p3; if (exceptDnodeId == pDnode->id) { return true; } + if (dnodeList != NULL) { + int32_t dnodeListSize = taosArrayGetSize(dnodeList); + if (dnodeListSize > 0) { + bool inDnodeList = false; + for (int32_t index = 0; index < dnodeListSize; ++index) { + int32_t dnodeId = *(int32_t *)taosArrayGet(dnodeList, index); + if (pDnode->id == dnodeId) { + inDnodeList = true; + } + } + if (!inDnodeList) { + return true; + } + } + } + int64_t curMs = taosGetTimestampMs(); bool online = mndIsDnodeOnline(pDnode, curMs); bool isMnode = mndIsMnode(pMnode, pDnode->id); @@ -741,7 +758,7 @@ static bool mndBuildDnodesArrayFp(SMnode *pMnode, void *pObj, void *p1, void *p2 return true; } -SArray *mndBuildDnodesArray(SMnode *pMnode, int32_t exceptDnodeId) { +SArray *mndBuildDnodesArray(SMnode *pMnode, int32_t exceptDnodeId, SArray *dnodeList) { SSdb *pSdb = pMnode->pSdb; int32_t numOfDnodes = mndGetDnodeSize(pMnode); @@ -752,7 +769,7 @@ SArray *mndBuildDnodesArray(SMnode *pMnode, int32_t exceptDnodeId) { } sdbTraverse(pSdb, SDB_DNODE, mndResetDnodesArrayFp, NULL, NULL, NULL); - sdbTraverse(pSdb, SDB_DNODE, mndBuildDnodesArrayFp, pArray, &exceptDnodeId, NULL); + sdbTraverse(pSdb, SDB_DNODE, mndBuildDnodesArrayFp, pArray, &exceptDnodeId, dnodeList); mDebug("build %d dnodes array", (int32_t)taosArrayGetSize(pArray)); for (int32_t i = 0; i < (int32_t)taosArrayGetSize(pArray); ++i) { @@ -845,7 +862,7 @@ static int32_t mndGetAvailableDnode(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup) { int32_t code = 0; - SArray *pArray = mndBuildDnodesArray(pMnode, 0); + SArray *pArray = mndBuildDnodesArray(pMnode, 0, NULL); if (pArray == NULL) { code = TSDB_CODE_MND_RETURN_VALUE_NULL; if (terrno != 0) code = terrno; @@ -868,7 +885,7 @@ int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup) { return 0; } -int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups) { +int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups, SArray *dnodeList) { int32_t code = -1; SArray *pArray = NULL; SVgObj *pVgroups = NULL; @@ -879,7 +896,7 @@ int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups) { goto _OVER; } - pArray = mndBuildDnodesArray(pMnode, 0); + pArray = mndBuildDnodesArray(pMnode, 0, dnodeList); if (pArray == NULL) { code = TSDB_CODE_MND_RETURN_VALUE_NULL; if (terrno != 0) code = terrno; @@ -2062,7 +2079,7 @@ int32_t mndSetMoveVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, int32_t mndSetMoveVgroupsInfoToTrans(SMnode *pMnode, STrans *pTrans, int32_t delDnodeId, bool force, bool unsafe) { int32_t code = 0; - SArray *pArray = mndBuildDnodesArray(pMnode, delDnodeId); + SArray *pArray = mndBuildDnodesArray(pMnode, delDnodeId, NULL); if (pArray == NULL) { code = TSDB_CODE_MND_RETURN_VALUE_NULL; if (terrno != 0) code = terrno; @@ -3140,7 +3157,7 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro int32_t code = -1; STrans *pTrans = NULL; SDbObj dbObj = {0}; - SArray *pArray = mndBuildDnodesArray(pMnode, 0); + SArray *pArray = mndBuildDnodesArray(pMnode, 0, NULL); int32_t numOfStreams = 0; if ((code = mndGetNumOfStreams(pMnode, pDb->name, &numOfStreams)) != 0) { @@ -3506,7 +3523,7 @@ static int32_t mndProcessBalanceVgroupMsg(SRpcMsg *pReq) { sdbRelease(pMnode->pSdb, pDnode); } - pArray = mndBuildDnodesArray(pMnode, 0); + pArray = mndBuildDnodesArray(pMnode, 0, NULL); if (pArray == NULL) { code = TSDB_CODE_MND_RETURN_VALUE_NULL; if (terrno != 0) code = terrno; diff --git a/source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp b/source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp index fdfc560d54f..d5ca0194202 100644 --- a/source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp +++ b/source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp @@ -80,17 +80,17 @@ TEST_F(ArbgroupTest, 01_encode_decode_sdb) { SArbGroup* pNewGroup = (SArbGroup*)sdbGetRowObj(pRow); - EXPECT_EQ(group.vgId, pNewGroup->vgId); - EXPECT_EQ(group.dbUid, pNewGroup->dbUid); - EXPECT_EQ(group.members[0].info.dnodeId, pNewGroup->members[0].info.dnodeId); - EXPECT_EQ(group.members[1].info.dnodeId, pNewGroup->members[1].info.dnodeId); - EXPECT_EQ(group.isSync, pNewGroup->isSync); - EXPECT_EQ(group.assignedLeader.dnodeId, pNewGroup->assignedLeader.dnodeId); - - EXPECT_EQ(std::string(group.members[0].state.token), std::string(pNewGroup->members[0].state.token)); - EXPECT_EQ(std::string(group.members[1].state.token), std::string(pNewGroup->members[1].state.token)); - EXPECT_EQ(std::string(group.assignedLeader.token), std::string(pNewGroup->assignedLeader.token)); - EXPECT_EQ(group.version, pNewGroup->version); + ASSERT_EQ(group.vgId, pNewGroup->vgId); + ASSERT_EQ(group.dbUid, pNewGroup->dbUid); + ASSERT_EQ(group.members[0].info.dnodeId, pNewGroup->members[0].info.dnodeId); + ASSERT_EQ(group.members[1].info.dnodeId, pNewGroup->members[1].info.dnodeId); + ASSERT_EQ(group.isSync, pNewGroup->isSync); + ASSERT_EQ(group.assignedLeader.dnodeId, pNewGroup->assignedLeader.dnodeId); + + ASSERT_EQ(std::string(group.members[0].state.token), std::string(pNewGroup->members[0].state.token)); + ASSERT_EQ(std::string(group.members[1].state.token), std::string(pNewGroup->members[1].state.token)); + ASSERT_EQ(std::string(group.assignedLeader.token), std::string(pNewGroup->assignedLeader.token)); + ASSERT_EQ(group.version, pNewGroup->version); taosMemoryFree(pRow); taosMemoryFree(pRaw); @@ -129,9 +129,9 @@ TEST_F(ArbgroupTest, 02_process_heart_beat_rsp) { SArbGroup newGroup = {0}; bool updateToken = mndUpdateArbGroupByHeartBeat(&group, &rspMember, nowMs, dnodeId, &newGroup); - EXPECT_FALSE(updateToken); - EXPECT_NE(group.members[0].state.responsedHbSeq, rspMember.hbSeq); - EXPECT_NE(group.members[0].state.lastHbMs, nowMs); + ASSERT_EQ(updateToken, false); + ASSERT_NE(group.members[0].state.responsedHbSeq, rspMember.hbSeq); + ASSERT_NE(group.members[0].state.lastHbMs, nowMs); } { // old token @@ -144,9 +144,9 @@ TEST_F(ArbgroupTest, 02_process_heart_beat_rsp) { SArbGroup newGroup = {0}; bool updateToken = mndUpdateArbGroupByHeartBeat(&group, &rspMember, nowMs, dnodeId, &newGroup); - EXPECT_FALSE(updateToken); - EXPECT_EQ(group.members[0].state.responsedHbSeq, rspMember.hbSeq); - EXPECT_EQ(group.members[0].state.lastHbMs, nowMs); + ASSERT_EQ(updateToken, false); + ASSERT_EQ(group.members[0].state.responsedHbSeq, rspMember.hbSeq); + ASSERT_EQ(group.members[0].state.lastHbMs, nowMs); } { // new token @@ -159,14 +159,14 @@ TEST_F(ArbgroupTest, 02_process_heart_beat_rsp) { SArbGroup newGroup = {0}; bool updateToken = mndUpdateArbGroupByHeartBeat(&group, &rspMember, nowMs, dnodeId, &newGroup); - EXPECT_TRUE(updateToken); - EXPECT_EQ(group.members[0].state.responsedHbSeq, rspMember.hbSeq); - EXPECT_EQ(group.members[0].state.lastHbMs, nowMs); + ASSERT_EQ(updateToken, true); + ASSERT_EQ(group.members[0].state.responsedHbSeq, rspMember.hbSeq); + ASSERT_EQ(group.members[0].state.lastHbMs, nowMs); - EXPECT_EQ(std::string(newGroup.members[0].state.token), std::string(rspMember.memberToken)); - EXPECT_FALSE(newGroup.isSync); - EXPECT_EQ(newGroup.assignedLeader.dnodeId, 0); - EXPECT_EQ(std::string(newGroup.assignedLeader.token).size(), 0); + ASSERT_EQ(std::string(newGroup.members[0].state.token), std::string(rspMember.memberToken)); + ASSERT_EQ(newGroup.isSync, false); + ASSERT_EQ(newGroup.assignedLeader.dnodeId, 0); + ASSERT_EQ(std::string(newGroup.assignedLeader.token).size(), 0); } taosThreadMutexDestroy(&group.mutex); @@ -203,7 +203,7 @@ TEST_F(ArbgroupTest, 03_process_check_sync_rsp) { SArbGroup newGroup = {0}; bool updateIsSync = mndUpdateArbGroupByCheckSync(&group, vgId, member0Token, member1Token, newIsSync, &newGroup); - EXPECT_FALSE(updateIsSync); + ASSERT_EQ(updateIsSync, false); } { // newIsSync @@ -216,8 +216,8 @@ TEST_F(ArbgroupTest, 03_process_check_sync_rsp) { SArbGroup newGroup = {0}; bool updateIsSync = mndUpdateArbGroupByCheckSync(&group, vgId, member0Token, member1Token, newIsSync, &newGroup); - EXPECT_TRUE(updateIsSync); - EXPECT_TRUE(newGroup.isSync); + ASSERT_EQ(updateIsSync, true); + ASSERT_EQ(newGroup.isSync, true); } taosThreadMutexDestroy(&group.mutex); @@ -254,7 +254,7 @@ TEST_F(ArbgroupTest, 04_process_set_assigned_leader){ SArbGroup newGroup = {0}; bool updateAssigned = mndUpdateArbGroupBySetAssignedLeader(&group, vgId, memberToken, errcode, &newGroup); - EXPECT_FALSE(updateAssigned); + ASSERT_EQ(updateAssigned, false); } { // errcode != TSDB_CODE_SUCCESS @@ -265,7 +265,7 @@ TEST_F(ArbgroupTest, 04_process_set_assigned_leader){ SArbGroup newGroup = {0}; bool updateAssigned = mndUpdateArbGroupBySetAssignedLeader(&group, vgId, memberToken, errcode, &newGroup); - EXPECT_FALSE(updateAssigned); + ASSERT_EQ(updateAssigned, false); } { // errcode == TSDB_CODE_SUCCESS @@ -276,11 +276,81 @@ TEST_F(ArbgroupTest, 04_process_set_assigned_leader){ SArbGroup newGroup = {0}; bool updateAssigned = mndUpdateArbGroupBySetAssignedLeader(&group, vgId, memberToken, errcode, &newGroup); - EXPECT_TRUE(updateAssigned); - EXPECT_FALSE(newGroup.isSync); + ASSERT_EQ(updateAssigned, true); + ASSERT_EQ(newGroup.isSync, false); } taosThreadMutexDestroy(&group.mutex); } +TEST_F(ArbgroupTest, 05_check_sync_timer) { + const int32_t assgndDnodeId = 1; + const int32_t vgId = 5; + const int64_t nowMs = 173044838300; + + SArbGroup group = {0}; + group.vgId = vgId; + group.dbUid = 1234; + group.members[0].info.dnodeId = assgndDnodeId; + group.members[0].state.lastHbMs = nowMs - 10; + + group.members[1].info.dnodeId = 2; + group.members[1].state.lastHbMs = nowMs - 10; + + group.isSync = 1; + taosThreadMutexInit(&group.mutex, NULL); + + SArbAssignedLeader assgnedLeader = {0}; + assgnedLeader.dnodeId = assgndDnodeId; + assgnedLeader.acked = false; + strncpy(assgnedLeader.token, group.members[0].state.token, TSDB_ARB_TOKEN_SIZE); + + SArbAssignedLeader noneAsgndLeader = {0}; + noneAsgndLeader.dnodeId = 0; + noneAsgndLeader.acked = false; + + ECheckSyncOp op = CHECK_SYNC_NONE; + SArbGroup newGroup = {0}; + + // 1. asgnd,sync,noAck --> send set assigned + group.assignedLeader = assgnedLeader; + group.assignedLeader.acked = false; + group.isSync = true; + mndArbCheckSync(&group, nowMs, &op, &newGroup); + + ASSERT_EQ(op, CHECK_SYNC_SET_ASSIGNED_LEADER); + + // 2. asgnd,notSync,noAck --> send set assgnd + newGroup = {0}; + group.assignedLeader = assgnedLeader; + group.isSync = false; + group.assignedLeader.acked = false; + mndArbCheckSync(&group, nowMs, &op, &newGroup); + + ASSERT_EQ(op, CHECK_SYNC_SET_ASSIGNED_LEADER); + + // 3. noAsgnd,notSync,noAck(init) --> check sync + newGroup = {0}; + group.assignedLeader = noneAsgndLeader; + group.isSync = false; + group.assignedLeader.acked = false; + mndArbCheckSync(&group, nowMs, &op, &newGroup); + + ASSERT_EQ(op, CHECK_SYNC_CHECK_SYNC); + + // 4. noAsgnd,sync,noAck,one timeout--> update arbgroup (asgnd,sync,noAck) + newGroup = {0}; + group.assignedLeader = noneAsgndLeader; + group.isSync = true; + group.assignedLeader.acked = false; + group.members[1].state.lastHbMs = nowMs - 2 * tsArbSetAssignedTimeoutSec * 1000; // member1 timeout + mndArbCheckSync(&group, nowMs, &op, &newGroup); + + ASSERT_EQ(op, CHECK_SYNC_UPDATE); + ASSERT_EQ(newGroup.assignedLeader.dnodeId, assgndDnodeId); + ASSERT_EQ(std::string(newGroup.assignedLeader.token), std::string(group.members[0].state.token)); + ASSERT_EQ(newGroup.isSync, true); + ASSERT_EQ(newGroup.assignedLeader.acked, false); +} + #pragma GCC diagnostic pop diff --git a/source/dnode/mnode/impl/test/profile/profile.cpp b/source/dnode/mnode/impl/test/profile/profile.cpp index b1b94c65fb5..bfd8909a76c 100644 --- a/source/dnode/mnode/impl/test/profile/profile.cpp +++ b/source/dnode/mnode/impl/test/profile/profile.cpp @@ -39,7 +39,7 @@ TEST_F(MndTestProfile, 01_ConnectMsg) { strcpy(connectReq.db, ""); strcpy(connectReq.user, "root"); strcpy(connectReq.passwd, secretEncrypt); - strcpy(connectReq.sVer, version); + strcpy(connectReq.sVer, td_version); int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq); void* pReq = rpcMallocCont(contLen); @@ -76,7 +76,7 @@ TEST_F(MndTestProfile, 02_ConnectMsg_NotExistDB) { strcpy(connectReq.db, "not_exist_db"); strcpy(connectReq.user, "root"); strcpy(connectReq.passwd, secretEncrypt); - strcpy(connectReq.sVer, version); + strcpy(connectReq.sVer, td_version); int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq); void* pReq = rpcMallocCont(contLen); diff --git a/source/dnode/mnode/impl/test/show/show.cpp b/source/dnode/mnode/impl/test/show/show.cpp index 2e67ffa946d..92b914a8dc9 100644 --- a/source/dnode/mnode/impl/test/show/show.cpp +++ b/source/dnode/mnode/impl/test/show/show.cpp @@ -64,7 +64,7 @@ TEST_F(MndTestShow, 03_ShowMsg_Conn) { strcpy(connectReq.db, ""); strcpy(connectReq.user, "root"); strcpy(connectReq.passwd, secretEncrypt); - strcpy(connectReq.sVer, version); + strcpy(connectReq.sVer, td_version); int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq); void* pReq = rpcMallocCont(contLen); diff --git a/source/dnode/mnode/impl/test/stb/stb.cpp b/source/dnode/mnode/impl/test/stb/stb.cpp index aa12c107a14..e92231907ff 100644 --- a/source/dnode/mnode/impl/test/stb/stb.cpp +++ b/source/dnode/mnode/impl/test/stb/stb.cpp @@ -782,7 +782,7 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) { { void* pReq = BuildAlterStbDropColumnReq(stbname, "col1", &contLen); SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen); - ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_ALTER_OPTION); + ASSERT_EQ(pRsp->code, TSDB_CODE_PAR_INVALID_DROP_COL); rpcFreeCont(pRsp->pCont); } diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index 227ff15da9c..474b22cca01 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -400,8 +400,8 @@ static int32_t sdbReadFileImp(SSdb *pSdb) { pSdb->commitTerm = pSdb->applyTerm; pSdb->commitConfig = pSdb->applyConfig; memcpy(pSdb->tableVer, tableVer, sizeof(tableVer)); - mInfo("read sdb file:%s success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file, pSdb->commitIndex, - pSdb->commitTerm, pSdb->commitConfig); + mInfo("vgId:1, trans:0, read sdb file:%s success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file, + pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig); _OVER: if ((ret = taosCloseFile(&pFile)) != 0) { @@ -573,7 +573,8 @@ static int32_t sdbWriteFileImp(SSdb *pSdb, int32_t skip_type) { pSdb->commitIndex = pSdb->applyIndex; pSdb->commitTerm = pSdb->applyTerm; pSdb->commitConfig = pSdb->applyConfig; - mInfo("write sdb file success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s", + mInfo("vgId:1, trans:0, write sdb file success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 + " file:%s", pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig, curfile); } @@ -610,8 +611,8 @@ int32_t sdbWriteFile(SSdb *pSdb, int32_t delta) { if (code != 0) { mError("failed to write sdb file since %s", tstrerror(code)); } else { - mInfo("write sdb file success, apply index:%" PRId64 " term:%" PRId64 " config:%" PRId64, pSdb->applyIndex, - pSdb->applyTerm, pSdb->applyConfig); + mInfo("vgId:1, trans:0, write sdb file success, apply index:%" PRId64 ", term:%" PRId64 ", config:%" PRId64, + pSdb->applyIndex, pSdb->applyTerm, pSdb->applyConfig); } (void)taosThreadMutexUnlock(&pSdb->filelock); return code; diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index d61f3d80d34..6eee8c510be 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -38,24 +38,23 @@ int32_t sndBuildStreamTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProce streamTaskOpenAllUpstreamInput(pTask); streamTaskResetUpstreamStageInfo(pTask); - streamSetupScheduleTrigger(pTask); SCheckpointInfo *pChkInfo = &pTask->chkInfo; tqSetRestoreVersionInfo(pTask); char *p = streamTaskGetStatus(pTask).name; if (pTask->info.fillHistory) { - sndInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 + sndInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64 " nextProcessVer:%" PRId64 " child id:%d, level:%d, status:%s fill-history:%d, related stream task:0x%x trigger:%" PRId64 " ms", - SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, + SNODE_HANDLE, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory, (int32_t)pTask->streamTaskId.taskId, pTask->info.delaySchedParam); } else { - sndInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 + sndInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64 " nextProcessVer:%" PRId64 " child id:%d, level:%d, status:%s fill-history:%d, related fill-task:0x%x trigger:%" PRId64 " ms", - SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, + SNODE_HANDLE, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory, (int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam); } diff --git a/source/dnode/snode/src/snodeInitApi.c b/source/dnode/snode/src/snodeInitApi.c index 196fa56c99a..680a2fd83c6 100644 --- a/source/dnode/snode/src/snodeInitApi.c +++ b/source/dnode/snode/src/snodeInitApi.c @@ -46,10 +46,14 @@ void initStateStoreAPI(SStateStore* pStore) { pStore->streamStateSaveInfo = streamStateSaveInfo; pStore->streamStateGetInfo = streamStateGetInfo; pStore->streamStateSetNumber = streamStateSetNumber; + pStore->streamStateGetPrev = streamStateGetPrev; pStore->streamStateFillPut = streamStateFillPut; pStore->streamStateFillGet = streamStateFillGet; + pStore->streamStateFillAddIfNotExist = streamStateFillAddIfNotExist; pStore->streamStateFillDel = streamStateFillDel; + pStore->streamStateFillGetNext = streamStateFillGetNext; + pStore->streamStateFillGetPrev = streamStateFillGetPrev; pStore->streamStateCurNext = streamStateCurNext; pStore->streamStateCurPrev = streamStateCurPrev; @@ -60,9 +64,12 @@ void initStateStoreAPI(SStateStore* pStore) { pStore->streamStateFillSeekKeyPrev = streamStateFillSeekKeyPrev; pStore->streamStateFreeCur = streamStateFreeCur; - pStore->streamStateGetGroupKVByCur = streamStateGetGroupKVByCur; + pStore->streamStateFillGetGroupKVByCur = streamStateFillGetGroupKVByCur; pStore->streamStateGetKVByCur = streamStateGetKVByCur; + pStore->streamStateSetFillInfo = streamStateSetFillInfo; + pStore->streamStateClearExpiredState = streamStateClearExpiredState; + pStore->streamStateSessionAddIfNotExist = streamStateSessionAddIfNotExist; pStore->streamStateSessionPut = streamStateSessionPut; pStore->streamStateSessionGet = streamStateSessionGet; @@ -75,11 +82,6 @@ void initStateStoreAPI(SStateStore* pStore) { pStore->streamStateCountGetKeyByRange = streamStateCountGetKeyByRange; pStore->streamStateSessionAllocWinBuffByNextPosition = streamStateSessionAllocWinBuffByNextPosition; -//void initStreamStateAPI(SStorageAPI* pAPI) { -// initStateStoreAPI(&pAPI->stateStore); -// initFunctionStateStore(&pAPI->functionStore); -//} - pStore->updateInfoInit = updateInfoInit; pStore->updateInfoFillBlockData = updateInfoFillBlockData; pStore->updateInfoIsUpdated = updateInfoIsUpdated; @@ -100,6 +102,11 @@ void initStateStoreAPI(SStateStore* pStore) { pStore->streamStateSessionSeekKeyCurrentPrev = streamStateSessionSeekKeyCurrentPrev; pStore->streamStateSessionSeekKeyCurrentNext = streamStateSessionSeekKeyCurrentNext; + pStore->streamStateGroupPut = streamStateGroupPut; + pStore->streamStateGroupGetCur = streamStateGroupGetCur; + pStore->streamStateGroupCurNext = streamStateGroupCurNext; + pStore->streamStateGroupGetKVByCur = streamStateGroupGetKVByCur; + pStore->streamFileStateDestroy = streamFileStateDestroy; pStore->streamFileStateClear = streamFileStateClear; pStore->needClearDiskBuff = needClearDiskBuff; diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 204311aa98d..610ba436739 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -243,7 +243,7 @@ int32_t extractMsgFromWal(SWalReader *pReader, void **pItem, int64_t maxVer, con int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver); bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids); int32_t tqRetrieveDataBlock(STqReader *pReader, SSDataBlock **pRes, const char *idstr); -int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet); +int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet, int64_t *createTime); int32_t tqGetStreamExecInfo(SVnode *pVnode, int64_t streamId, int64_t *pDelay, bool *fhFinished); // sma diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index ed8f99ec753..c1123db7a33 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -342,7 +342,10 @@ typedef struct { rocksdb_writeoptions_t *writeoptions; rocksdb_readoptions_t *readoptions; rocksdb_writebatch_t *writebatch; - TdThreadMutex writeBatchMutex; + TdThreadMutex writeBatchMutex; + int32_t sver; + tb_uid_t suid; + tb_uid_t uid; STSchema *pTSchema; } SRocksCache; diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index fc98d6578be..512e0884286 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -163,7 +163,7 @@ int32_t metaDropTables(SMeta* pMeta, SArray* tbUids); int metaTtlFindExpired(SMeta* pMeta, int64_t timePointMs, SArray* tbUids, int32_t ttlDropMaxCount); int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp); int metaUpdateChangeTimeWithLock(SMeta* pMeta, tb_uid_t uid, int64_t changeTimeMs); -SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock); +SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, int64_t *createTime); int32_t metaGetTbTSchemaNotNull(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema); int32_t metaGetTbTSchemaMaybeNull(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema); STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock); @@ -222,6 +222,7 @@ int32_t tsdbCacheNewSTableColumn(STsdb* pTsdb, SArray* uids, int16_t cid, int8_t int32_t tsdbCacheDropSTableColumn(STsdb* pTsdb, SArray* uids, int16_t cid, bool hasPrimayKey); int32_t tsdbCacheNewNTableColumn(STsdb* pTsdb, int64_t uid, int16_t cid, int8_t col_type); int32_t tsdbCacheDropNTableColumn(STsdb* pTsdb, int64_t uid, int16_t cid, bool hasPrimayKey); +void tsdbCacheInvalidateSchema(STsdb* pTsdb, tb_uid_t suid, tb_uid_t uid, int32_t sver); int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq2* pMsg); int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq2* pMsg, SSubmitRsp2* pRsp); int32_t tsdbInsertTableData(STsdb* pTsdb, int64_t version, SSubmitTbData* pSubmitTbData, int32_t* affectedRows); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 484c5c0a161..e2ba8d9ccbc 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -371,7 +371,7 @@ int32_t metaTbCursorPrev(SMTbCursor *pTbCur, ETableType jumpTableType) { return 0; } -SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock) { +SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock, int64_t *createTime) { void *pData = NULL; int nData = 0; int64_t version; @@ -407,6 +407,9 @@ SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int } } else if (me.type == TSDB_CHILD_TABLE) { uid = me.ctbEntry.suid; + if (createTime != NULL){ + *createTime = me.ctbEntry.btime; + } tDecoderClear(&dc); goto _query; } else { @@ -617,7 +620,7 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock) { STSchema *pTSchema = NULL; SSchemaWrapper *pSW = NULL; - pSW = metaGetTableSchema(pMeta, uid, sver, lock); + pSW = metaGetTableSchema(pMeta, uid, sver, lock, NULL); if (!pSW) return NULL; pTSchema = tBuildTSchema(pSW->pSchema, pSW->nCols, pSW->version); diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c index 0936d8f0928..b2826ec45ac 100644 --- a/source/dnode/vnode/src/meta/metaSnapshot.c +++ b/source/dnode/vnode/src/meta/metaSnapshot.c @@ -552,7 +552,7 @@ int32_t setForSnapShot(SSnapContext* ctx, int64_t uid) { void taosXSetTablePrimaryKey(SSnapContext* ctx, int64_t uid) { bool ret = false; - SSchemaWrapper* schema = metaGetTableSchema(ctx->pMeta, uid, -1, 1); + SSchemaWrapper* schema = metaGetTableSchema(ctx->pMeta, uid, -1, 1, NULL); if (schema && schema->nCols >= 2 && schema->pSchema[1].flags & COL_IS_KEY) { ret = true; } diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 21d12ef77de..5c3516a962e 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -620,6 +620,8 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { } } if (uids) taosArrayDestroy(uids); + + tsdbCacheInvalidateSchema(pTsdb, pReq->suid, -1, pReq->schemaRow.version); } metaWLock(pMeta); @@ -1945,6 +1947,10 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl break; } + if (!TSDB_CACHE_NO(pMeta->pVnode->config)) { + tsdbCacheInvalidateSchema(pMeta->pVnode->pTsdb, 0, entry.uid, pSchema->version); + } + entry.version = version; // do actual write diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 4fdf299e50d..80c04a32765 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -238,13 +238,18 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui } static void tdRSmaTaskInit(SStreamMeta *pMeta, SRSmaInfoItem *pItem, SStreamTaskId *pId) { - STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; + STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; + SStreamTask *pTask = NULL; + streamMetaRLock(pMeta); - SStreamTask **ppTask = (SStreamTask **)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask && *ppTask) { - pItem->submitReqVer = (*ppTask)->chkInfo.checkpointVer; - pItem->fetchResultVer = (*ppTask)->info.delaySchedParam; + + int32_t code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code == 0) { + pItem->submitReqVer = pTask->chkInfo.checkpointVer; + pItem->fetchResultVer = pTask->info.delaySchedParam; + streamMetaReleaseTask(pMeta, pTask); } + streamMetaRUnLock(pMeta); } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index b75baea08d1..bd78f62caea 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -713,8 +713,10 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg static void freePtr(void* ptr) { taosMemoryFree(*(void**)ptr); } int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessVer) { - STQ* pTq = (STQ*)pTqObj; - int32_t vgId = TD_VID(pTq->pVnode); + STQ* pTq = (STQ*)pTqObj; + int32_t vgId = TD_VID(pTq->pVnode); + SCheckpointInfo* pChkInfo = NULL; + tqDebug("s-task:0x%x start to build task", pTask->id.taskId); int32_t code = streamTaskInit(pTask, pTq->pStreamMeta, &pTq->pVnode->msgCb, nextProcessVer); @@ -765,28 +767,27 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV } streamTaskResetUpstreamStageInfo(pTask); - streamSetupScheduleTrigger(pTask); - SCheckpointInfo* pChkInfo = &pTask->chkInfo; + pChkInfo = &pTask->chkInfo; tqSetRestoreVersionInfo(pTask); char* p = streamTaskGetStatus(pTask).name; const char* pNext = streamTaskGetStatusStr(pTask->status.taskStatus); if (pTask->info.fillHistory) { - tqInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 + tqInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64 " nextProcessVer:%" PRId64 " child id:%d, level:%d, cur-status:%s, next-status:%s fill-history:%d, related stream task:0x%x " "delaySched:%" PRId64 " ms, inputVer:%" PRId64, - vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, + vgId, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory, (int32_t)pTask->streamTaskId.taskId, pTask->info.delaySchedParam, nextProcessVer); } else { - tqInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 + tqInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64 " nextProcessVer:%" PRId64 " child id:%d, level:%d, cur-status:%s next-status:%s fill-history:%d, related fill-task:0x%x " "delaySched:%" PRId64 " ms, inputVer:%" PRId64, - vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, + vgId, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory, (int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam, nextProcessVer); @@ -887,13 +888,14 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { SStreamScanHistoryReq* pReq = (SStreamScanHistoryReq*)pMsg->pCont; SStreamMeta* pMeta = pTq->pStreamMeta; int32_t code = TSDB_CODE_SUCCESS; + SStreamTask* pTask = NULL; + SStreamTask* pStreamTask = NULL; - SStreamTask* pTask = NULL; code = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId, &pTask); if (pTask == NULL) { tqError("vgId:%d failed to acquire stream task:0x%x during scan history data, task may have been destroyed", pMeta->vgId, pReq->taskId); - return -1; + return code; } // do recovery step1 @@ -958,11 +960,11 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { ETaskStatus s = p.state; if (s == TASK_STATUS__PAUSE) { - tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs total:%.2fs, sched-status:%d", pTask->id.idStr, - el, pTask->execInfo.step1El, status); + tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs total:%.2fs, sched-status:%d", id, el, + pTask->execInfo.step1El, status); } else if (s == TASK_STATUS__STOP || s == TASK_STATUS__DROPPING) { - tqDebug("s-task:%s status:%p not continue scan-history data, total elapsed time:%.2fs quit", pTask->id.idStr, - p.name, pTask->execInfo.step1El); + tqDebug("s-task:%s status:%p not continue scan-history data, total elapsed time:%.2fs quit", id, p.name, + pTask->execInfo.step1El); } } @@ -979,7 +981,6 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { } // 1. get the related stream task - SStreamTask* pStreamTask = NULL; code = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId, &pStreamTask); if (pStreamTask == NULL) { tqError("failed to find s-task:0x%" PRIx64 ", it may have been destroyed, drop related fill-history task:%s", @@ -990,15 +991,15 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { atomic_store_32(&pTask->status.inScanHistorySentinel, 0); streamMetaReleaseTask(pMeta, pTask); - return code; // todo: handle failure + return code; } if (pStreamTask->info.taskLevel != TASK_LEVEL__SOURCE) { tqError("s-task:%s fill-history task related stream task level:%d, unexpected", id, pStreamTask->info.taskLevel); return TSDB_CODE_STREAM_INTERNAL_ERROR; } - code = streamTaskHandleEventAsync(pStreamTask->status.pSM, TASK_EVENT_HALT, handleStep2Async, pTq); + code = streamTaskHandleEventAsync(pStreamTask->status.pSM, TASK_EVENT_HALT, handleStep2Async, pTq); streamMetaReleaseTask(pMeta, pStreamTask); atomic_store_32(&pTask->status.inScanHistorySentinel, 0); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index f2f85773b50..95955e579ff 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -263,7 +263,7 @@ bool tqGetTablePrimaryKey(STqReader* pReader) { return pReader->hasPrimaryKey; } void tqSetTablePrimaryKey(STqReader* pReader, int64_t uid) { bool ret = false; - SSchemaWrapper* schema = metaGetTableSchema(pReader->pVnodeMeta, uid, -1, 1); + SSchemaWrapper* schema = metaGetTableSchema(pReader->pVnodeMeta, uid, -1, 1, NULL); if (schema && schema->nCols >= 2 && schema->pSchema[1].flags & COL_IS_KEY) { ret = true; } @@ -669,7 +669,7 @@ int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char* (pReader->cachedSchemaVer != sversion)) { tDeleteSchemaWrapper(pReader->pSchemaWrapper); - pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1); + pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1, NULL); if (pReader->pSchemaWrapper == NULL) { tqWarn("vgId:%d, cannot found schema wrapper for table: suid:%" PRId64 ", uid:%" PRId64 "version %d, possibly dropped table", @@ -961,10 +961,8 @@ int32_t tqProcessRowData(STqReader* pReader, SSubmitTbData* pSubmitTbData, SArra return code; } -int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet) { - tqDebug("tq reader retrieve data block %p, %d", pReader->msg.msgStr, pReader->nextBlk); - SSDataBlock* block = NULL; - +int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet, int64_t *createTime) { + tqTrace("tq reader retrieve data block %p, %d", pReader->msg.msgStr, pReader->nextBlk); SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk); if (pSubmitTbData == NULL) { return terrno; @@ -980,7 +978,7 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas pReader->lastBlkUid = uid; tDeleteSchemaWrapper(pReader->pSchemaWrapper); - pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1); + pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1, createTime); if (pReader->pSchemaWrapper == NULL) { tqWarn("vgId:%d, cannot found schema wrapper for table: suid:%" PRId64 ", version %d, possibly dropped table", pReader->pWalReader->pWal->cfg.vgId, uid, pReader->cachedSchemaVer); @@ -1115,12 +1113,20 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { break; } - SStreamTask* pTask = *(SStreamTask**)pIter; - if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && (pTask->exec.pExecutor != NULL)) { - int32_t code = qUpdateTableListForStreamScanner(pTask->exec.pExecutor, tbUidList, isAdd); - if (code != 0) { - tqError("vgId:%d, s-task:%s update qualified table error for stream task", vgId, pTask->id.idStr); - continue; + int64_t refId = *(int64_t*)pIter; + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, refId); + if (pTask != NULL) { + int32_t taskId = pTask->id.taskId; + + if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && (pTask->exec.pExecutor != NULL)) { + int32_t code = qUpdateTableListForStreamScanner(pTask->exec.pExecutor, tbUidList, isAdd); + if (code != 0) { + tqError("vgId:%d, s-task:0x%x update qualified table error for stream task", vgId, taskId); + } + } + int32_t ret = taosReleaseRef(streamTaskRefPool, refId); + if (ret) { + tqError("vgId:%d release task refId failed, refId:%" PRId64, vgId, refId); } } } diff --git a/source/dnode/vnode/src/tq/tqScan.c b/source/dnode/vnode/src/tq/tqScan.c index dbc1b16cf57..39eaac39b31 100644 --- a/source/dnode/vnode/src/tq/tqScan.c +++ b/source/dnode/vnode/src/tq/tqScan.c @@ -16,7 +16,8 @@ #include "tq.h" int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision) { - int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + blockGetEncodeSize(pBlock); + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + dataEncodeBufSize; void* buf = taosMemoryCalloc(1, dataStrLen); if (buf == NULL) { return terrno; @@ -28,7 +29,7 @@ int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t pRetrieve->compressed = 0; pRetrieve->numOfRows = htobe64((int64_t)pBlock->info.rows); - int32_t actualLen = blockEncode(pBlock, pRetrieve->data, numOfCols); + int32_t actualLen = blockEncode(pBlock, pRetrieve->data, dataEncodeBufSize, numOfCols); if(actualLen < 0){ taosMemoryFree(buf); return terrno; @@ -210,36 +211,21 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat if (pDataBlock != NULL && pDataBlock->info.rows > 0) { if (pRsp->withTbName) { - if (pOffset->type == TMQ_OFFSET__LOG) { - int64_t uid = pExec->pTqReader->lastBlkUid; - if (tqAddTbNameToRsp(pTq, uid, pRsp, 1) < 0) { - tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId); - continue; - } - } else { - char* tbName = taosStrdup(qExtractTbnameFromTask(task)); - if (tbName == NULL) { - tqError("vgId:%d, failed to add tbname to rsp msg, null", pTq->pVnode->config.vgId); - return terrno; - } - if (taosArrayPush(pRsp->blockTbName, &tbName) == NULL){ - tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId); - continue; - } + char* tbName = taosStrdup(qExtractTbnameFromTask(task)); + if (tbName == NULL) { + tqError("vgId:%d, failed to add tbname to rsp msg, null", pTq->pVnode->config.vgId); + return terrno; + } + if (taosArrayPush(pRsp->blockTbName, &tbName) == NULL){ + tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId); + continue; } } if (pRsp->withSchema) { - if (pOffset->type == TMQ_OFFSET__LOG) { - if (tqAddBlockSchemaToRsp(pExec, pRsp) != 0){ - tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId); - continue; - } - } else { - SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task)); - if(taosArrayPush(pRsp->blockSchema, &pSW) == NULL){ - tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId); - continue; - } + SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task)); + if(taosArrayPush(pRsp->blockSchema, &pSW) == NULL){ + tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId); + continue; } } @@ -249,12 +235,9 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat continue; } pRsp->blockNum++; - if (pOffset->type == TMQ_OFFSET__LOG) { - continue; - } else { - rowCnt += pDataBlock->info.rows; - if (rowCnt <= tmqRowSize) continue; - } + rowCnt += pDataBlock->info.rows; + if (rowCnt <= tmqRowSize) continue; + } // get meta @@ -296,6 +279,54 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat return code; } +static int32_t buildCreateTbInfo(SMqDataRsp* pRsp, SVCreateTbReq* pCreateTbReq){ + int32_t code = 0; + void* createReq = NULL; + if (pRsp->createTableNum == 0) { + pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t)); + if (pRsp->createTableLen == NULL) { + code = terrno; + goto END; + } + pRsp->createTableReq = taosArrayInit(0, sizeof(void*)); + if (pRsp->createTableReq == NULL) { + code = terrno; + goto END; + } + } + + uint32_t len = 0; + tEncodeSize(tEncodeSVCreateTbReq, pCreateTbReq, len, code); + if (TSDB_CODE_SUCCESS != code) { + goto END; + } + createReq = taosMemoryCalloc(1, len); + if (createReq == NULL){ + code = terrno; + goto END; + } + SEncoder encoder = {0}; + tEncoderInit(&encoder, createReq, len); + code = tEncodeSVCreateTbReq(&encoder, pCreateTbReq); + tEncoderClear(&encoder); + if (code < 0) { + goto END; + } + if (taosArrayPush(pRsp->createTableLen, &len) == NULL){ + code = terrno; + goto END; + } + if (taosArrayPush(pRsp->createTableReq, &createReq) == NULL){ + code = terrno; + goto END; + } + pRsp->createTableNum++; + + return 0; +END: + taosMemoryFree(createReq); + return code; +} static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int32_t* totalRows, int8_t sourceExcluded){ int32_t code = 0; @@ -315,7 +346,8 @@ static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int } SSubmitTbData* pSubmitTbDataRet = NULL; - code = tqRetrieveTaosxBlock(pReader, pBlocks, pSchemas, &pSubmitTbDataRet); + int64_t createTime = INT64_MAX; + code = tqRetrieveTaosxBlock(pReader, pBlocks, pSchemas, &pSubmitTbDataRet, &createTime); if (code != 0) { tqError("vgId:%d, failed to retrieve block", pTq->pVnode->config.vgId); goto END; @@ -333,46 +365,13 @@ static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int } } if (pHandle->fetchMeta != WITH_DATA && pSubmitTbDataRet->pCreateTbReq != NULL) { - if (pRsp->createTableNum == 0) { - pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t)); - if (pRsp->createTableLen == NULL) { - code = terrno; + if (pSubmitTbDataRet->ctimeMs - createTime <= 1000) { // judge if table is already created to avoid sending crateTbReq + code = buildCreateTbInfo(pRsp, pSubmitTbDataRet->pCreateTbReq); + if (code != 0){ + tqError("vgId:%d, failed to build create table info", pTq->pVnode->config.vgId); goto END; } - pRsp->createTableReq = taosArrayInit(0, sizeof(void*)); - if (pRsp->createTableReq == NULL) { - code = terrno; - goto END; - } - } - - uint32_t len = 0; - tEncodeSize(tEncodeSVCreateTbReq, pSubmitTbDataRet->pCreateTbReq, len, code); - if (TSDB_CODE_SUCCESS != code) { - goto END; - } - void* createReq = taosMemoryCalloc(1, len); - if (createReq == NULL){ - code = terrno; - goto END; - } - SEncoder encoder = {0}; - tEncoderInit(&encoder, createReq, len); - code = tEncodeSVCreateTbReq(&encoder, pSubmitTbDataRet->pCreateTbReq); - tEncoderClear(&encoder); - if (code < 0) { - taosMemoryFree(createReq); - goto END; - } - if (taosArrayPush(pRsp->createTableLen, &len) == NULL){ - taosMemoryFree(createReq); - goto END; - } - if (taosArrayPush(pRsp->createTableReq, &createReq) == NULL){ - taosMemoryFree(createReq); - goto END; } - pRsp->createTableNum++; } if (pHandle->fetchMeta == ONLY_META && pSubmitTbDataRet->pCreateTbReq == NULL) { goto END; diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index 3ec269ec220..29372c5da7d 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -79,7 +79,7 @@ static void doStartScanWal(void* param, void* tmrId) { SBuildScanWalMsgParam* pParam = (SBuildScanWalMsgParam*)param; - SStreamMeta* pMeta = taosAcquireRef(streamMetaId, pParam->metaId); + SStreamMeta* pMeta = taosAcquireRef(streamMetaRefPool, pParam->metaId); if (pMeta == NULL) { tqError("metaRid:%" PRId64 " not valid now, stream meta has been freed", pParam->metaId); taosMemoryFree(pParam); @@ -97,7 +97,7 @@ static void doStartScanWal(void* param, void* tmrId) { tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code)); } - code = taosReleaseRef(streamMetaId, pParam->metaId); + code = taosReleaseRef(streamMetaRefPool, pParam->metaId); if (code) { tqError("vgId:%d failed to release ref for streamMeta, rid:%" PRId64 " code:%s", vgId, pParam->metaId, tstrerror(code)); @@ -270,7 +270,12 @@ bool handleFillhistoryScanComplete(SStreamTask* pTask, int64_t ver) { bool taskReadyForDataFromWal(SStreamTask* pTask) { // non-source or fill-history tasks don't need to response the WAL scan action. - if ((pTask->info.taskLevel != TASK_LEVEL__SOURCE) || (pTask->status.downstreamReady == 0)) { + SSTaskBasicInfo* pInfo = &pTask->info; + if ((pInfo->taskLevel != TASK_LEVEL__SOURCE) || (pTask->status.downstreamReady == 0)) { + return false; + } + + if (pInfo->taskLevel == TASK_LEVEL__SOURCE && pInfo->trigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { return false; } @@ -282,7 +287,7 @@ bool taskReadyForDataFromWal(SStreamTask* pTask) { } // fill-history task has entered into the last phase, no need to anything - if ((pTask->info.fillHistory == 1) && pTask->status.appendTranstateBlock) { + if ((pInfo->fillHistory == 1) && pTask->status.appendTranstateBlock) { // the maximum version of data in the WAL has reached already, the step2 is done tqDebug("s-task:%s fill-history reach the maximum ver:%" PRId64 ", not scan wal anymore", pTask->id.idStr, pTask->dataRange.range.maxVer); @@ -419,9 +424,9 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) { streamMutexLock(&pTask->lock); - SStreamTaskState pState = streamTaskGetStatus(pTask); - if (pState.state != TASK_STATUS__READY) { - tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pState.name); + SStreamTaskState state = streamTaskGetStatus(pTask); + if (state.state != TASK_STATUS__READY) { + tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, state.name); streamMutexUnlock(&pTask->lock); streamMetaReleaseTask(pStreamMeta, pTask); continue; diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index b4866b8c653..e066938fc09 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -51,7 +51,8 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { pRsp->blockTbName = taosArrayInit(0, sizeof(void*)); pRsp->blockSchema = taosArrayInit(0, sizeof(void*)); - if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) { + if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || + pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) { if (pRsp->blockData != NULL) { taosArrayDestroy(pRsp->blockData); pRsp->blockData = NULL; @@ -71,6 +72,7 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { taosArrayDestroy(pRsp->blockSchema); pRsp->blockSchema = NULL; } + return terrno; } @@ -626,6 +628,9 @@ int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void* tmp = taosArrayGet(pDelBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); TSDB_CHECK_NULL(tmp, code, line, END, terrno) colDataSetNULL(tmp, i); + tmp = taosArrayGet(pDelBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX); + TSDB_CHECK_NULL(tmp, code, line, END, terrno) + colDataSetNULL(tmp, i); } if (type == 0) { @@ -683,19 +688,21 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b continue; } - STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; - SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask == NULL) { + STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; + SStreamTask* pTask = NULL; + + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code != 0) { tqError("vgId:%d failed to acquire task:0x%x in retrieving progress", pMeta->vgId, pId->taskId); continue; } - if ((*ppTask)->info.taskLevel != TASK_LEVEL__SOURCE) { + if (pTask->info.taskLevel != TASK_LEVEL__SOURCE) { + streamMetaReleaseTask(pMeta, pTask); continue; } // here we get the required stream source task - SStreamTask* pTask = *ppTask; *fhFinished = !HAS_RELATED_FILLHISTORY_TASK(pTask); int64_t ver = walReaderGetCurrentVer(pTask->exec.pWalReader); @@ -711,6 +718,7 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b SWalReader* pReader = walOpenReader(pTask->exec.pWalReader->pWal, NULL, 0); if (pReader == NULL) { tqError("failed to open wal reader to extract exec progress, vgId:%d", pMeta->vgId); + streamMetaReleaseTask(pMeta, pTask); continue; } @@ -736,6 +744,7 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b } walCloseReader(pReader); + streamMetaReleaseTask(pMeta, pTask); } streamMetaRUnLock(pMeta); diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index a00e92997c0..f31dd288475 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -88,6 +88,8 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) { } } + streamSetupScheduleTrigger(pTask); + double el = (taosGetTimestampMs() - st) / 1000.0; tqDebug("s-task:%s vgId:%d expand stream task completed, elapsed time:%.2fsec", pTask->id.idStr, vgId, el); @@ -138,13 +140,15 @@ int32_t tqStreamStartOneTaskAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t stream // this is to process request from transaction, always return true. int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored) { - int32_t vgId = pMeta->vgId; - char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); - int32_t len = pMsg->contLen - sizeof(SMsgHead); - SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS}; - int64_t st = taosGetTimestampMs(); - bool updated = false; - int32_t code = 0; + int32_t vgId = pMeta->vgId; + char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); + int32_t len = pMsg->contLen - sizeof(SMsgHead); + SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS}; + int64_t st = taosGetTimestampMs(); + bool updated = false; + int32_t code = 0; + SStreamTask* pTask = NULL; + SStreamTask* pHTask = NULL; SStreamTaskNodeUpdateMsg req = {0}; @@ -170,9 +174,9 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM streamMetaWLock(pMeta); // the task epset may be updated again and again, when replaying the WAL, the task may be in stop status. - STaskId id = {.streamId = req.streamId, .taskId = req.taskId}; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask == NULL || *ppTask == NULL) { + STaskId id = {.streamId = req.streamId, .taskId = req.taskId}; + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code != 0) { tqError("vgId:%d failed to acquire task:0x%x when handling update task epset, it may have been dropped", vgId, req.taskId); rsp.code = TSDB_CODE_SUCCESS; @@ -181,12 +185,13 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM return rsp.code; } - SStreamTask* pTask = *ppTask; - const char* idstr = pTask->id.idStr; + const char* idstr = pTask->id.idStr; if (req.transId <= 0) { tqError("vgId:%d invalid update nodeEp task, transId:%d, discard", vgId, req.taskId); rsp.code = TSDB_CODE_SUCCESS; + + streamMetaReleaseTask(pMeta, pTask); streamMetaWUnLock(pMeta); taosArrayDestroy(req.pNodeList); @@ -197,6 +202,8 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM bool update = streamMetaInitUpdateTaskList(pMeta, req.transId); if (!update) { rsp.code = TSDB_CODE_SUCCESS; + + streamMetaReleaseTask(pMeta, pTask); streamMetaWUnLock(pMeta); taosArrayDestroy(req.pNodeList); @@ -211,7 +218,10 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM tqDebug("s-task:%s (vgId:%d) already update in transId:%d, discard the nodeEp update msg", idstr, vgId, req.transId); rsp.code = TSDB_CODE_SUCCESS; + + streamMetaReleaseTask(pMeta, pTask); streamMetaWUnLock(pMeta); + taosArrayDestroy(req.pNodeList); return rsp.code; } @@ -227,24 +237,23 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM streamTaskStopMonitorCheckRsp(&pTask->taskCheckInfo, pTask->id.idStr); - SStreamTask** ppHTask = NULL; if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { - ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id)); - if (ppHTask == NULL || *ppHTask == NULL) { + code = streamMetaAcquireTaskUnsafe(pMeta, &pTask->hTaskInfo.id, &pHTask); + if (code != 0) { tqError( "vgId:%d failed to acquire fill-history task:0x%x when handling update, may have been dropped already, rel " "stream task:0x%x", vgId, (uint32_t)pTask->hTaskInfo.id.taskId, req.taskId); CLEAR_RELATED_FILLHISTORY_TASK(pTask); } else { - tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr); - bool updateEpSet = streamTaskUpdateEpsetInfo(*ppHTask, req.pNodeList); + tqDebug("s-task:%s fill-history task update nodeEp along with stream task", pHTask->id.idStr); + bool updateEpSet = streamTaskUpdateEpsetInfo(pHTask, req.pNodeList); if (updateEpSet) { updated = updateEpSet; } - streamTaskResetStatus(*ppHTask); - streamTaskStopMonitorCheckRsp(&(*ppHTask)->taskCheckInfo, (*ppHTask)->id.idStr); + streamTaskResetStatus(pHTask); + streamTaskStopMonitorCheckRsp(&pHTask->taskCheckInfo, pHTask->id.idStr); } } @@ -256,8 +265,8 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM tqError("s-task:%s vgId:%d failed to save task, code:%s", idstr, vgId, tstrerror(code)); } - if (ppHTask != NULL) { - code = streamMetaSaveTask(pMeta, *ppHTask); + if (pHTask != NULL) { + code = streamMetaSaveTask(pMeta, pHTask); if (code) { tqError("s-task:%s vgId:%d failed to save related history task, code:%s", idstr, vgId, tstrerror(code)); } @@ -271,15 +280,17 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM tqError("s-task:%s vgId:%d failed to stop task, code:%s", idstr, vgId, tstrerror(code)); } - if (ppHTask != NULL) { - code = streamTaskStop(*ppHTask); + if (pHTask != NULL) { + code = streamTaskStop(pHTask); if (code) { tqError("s-task:%s vgId:%d failed to stop related history task, code:%s", idstr, vgId, tstrerror(code)); } } // keep info - streamMetaAddIntoUpdateTaskList(pMeta, pTask, (ppHTask != NULL) ? (*ppHTask) : NULL, req.transId, st); + streamMetaAddIntoUpdateTaskList(pMeta, pTask, (pHTask != NULL) ? (pHTask) : NULL, req.transId, st); + streamMetaReleaseTask(pMeta, pTask); + streamMetaReleaseTask(pMeta, pHTask); rsp.code = TSDB_CODE_SUCCESS; @@ -605,6 +616,7 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve int32_t taskId = -1; int64_t streamId = -1; bool added = false; + int32_t size = sizeof(SStreamTask); if (tsDisableStream) { tqInfo("vgId:%d stream disabled, not deploy stream tasks", vgId); @@ -614,7 +626,6 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve tqDebug("vgId:%d receive new stream task deploy msg, start to build stream task", vgId); // 1.deserialize msg and build task - int32_t size = sizeof(SStreamTask); SStreamTask* pTask = taosMemoryCalloc(1, size); if (pTask == NULL) { tqError("vgId:%d failed to create stream task due to out of memory, alloc size:%d", vgId, size); @@ -643,7 +654,6 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve if (code < 0) { tqError("failed to add s-task:0x%x into vgId:%d meta, existed:%d, code:%s", vgId, taskId, numOfTasks, tstrerror(code)); - tFreeStreamTask(pTask); return code; } @@ -673,7 +683,6 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve } } else { tqWarn("vgId:%d failed to add s-task:0x%x, since already exists in meta store, total:%d", vgId, taskId, numOfTasks); - tFreeStreamTask(pTask); } return code; @@ -681,25 +690,25 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen) { SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg; + int32_t code = 0; + int32_t vgId = pMeta->vgId; + STaskId hTaskId = {0}; + SStreamTask* pTask = NULL; - int32_t code = 0; - int32_t vgId = pMeta->vgId; - STaskId hTaskId = {0}; tqDebug("vgId:%d receive msg to drop s-task:0x%x", vgId, pReq->taskId); streamMetaWLock(pMeta); - STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId}; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if ((ppTask != NULL) && ((*ppTask) != NULL)) { - int32_t unusedRetRef = streamMetaAcquireOneTask(*ppTask); - SStreamTask* pTask = *ppTask; - + STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId}; + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code == 0) { if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { hTaskId.streamId = pTask->hTaskInfo.id.streamId; hTaskId.taskId = pTask->hTaskInfo.id.taskId; } + // clear the relationship, and then release the stream tasks, to avoid invalid accessing of already freed + // related stream(history) task streamTaskSetRemoveBackendFiles(pTask); code = streamTaskClearHTaskAttr(pTask, pReq->resetRelHalt); streamMetaReleaseTask(pMeta, pTask); @@ -742,18 +751,19 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen int32_t tqStreamTaskProcessUpdateCheckpointReq(SStreamMeta* pMeta, bool restored, char* msg) { SVUpdateCheckpointInfoReq* pReq = (SVUpdateCheckpointInfoReq*)msg; + int32_t code = 0; + int32_t vgId = pMeta->vgId; + SStreamTask* pTask = NULL; - int32_t code = 0; - int32_t vgId = pMeta->vgId; tqDebug("vgId:%d receive msg to update-checkpoint-info for s-task:0x%x", vgId, pReq->taskId); streamMetaWLock(pMeta); - STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId}; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - - if (ppTask != NULL && (*ppTask) != NULL) { - code = streamTaskUpdateTaskCheckpointInfo(*ppTask, restored, pReq); + STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId}; + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code == 0) { + code = streamTaskUpdateTaskCheckpointInfo(pTask, restored, pReq); + streamMetaReleaseTask(pMeta, pTask); } else { // failed to get the task. int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta); tqError( @@ -763,7 +773,6 @@ int32_t tqStreamTaskProcessUpdateCheckpointReq(SStreamMeta* pMeta, bool restored } streamMetaWUnLock(pMeta); - // always return success when handling the requirement issued by mnode during transaction. return TSDB_CODE_SUCCESS; } @@ -789,11 +798,6 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) { tqInfo("vgId:%d tasks are all updated and stopped, restart all tasks, triggered by transId:%d, ts:%" PRId64, vgId, pMeta->updateInfo.completeTransId, pMeta->updateInfo.completeTs); - while (streamMetaTaskInTimer(pMeta)) { - tqDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId); - taosMsleep(100); - } - streamMetaWLock(pMeta); streamMetaClear(pMeta); diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 5583e464ed4..2cef541cdba 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -221,7 +221,7 @@ static int32_t tsdbOpenRocksCache(STsdb *pTsdb) { rocksdb_writebatch_t *writebatch = rocksdb_writebatch_create(); - TAOS_CHECK_GOTO(taosThreadMutexInit(&pTsdb->rCache.writeBatchMutex, NULL), &lino, _err6) ; + TAOS_CHECK_GOTO(taosThreadMutexInit(&pTsdb->rCache.writeBatchMutex, NULL), &lino, _err6); pTsdb->rCache.writebatch = writebatch; pTsdb->rCache.my_comparator = cmp; @@ -230,6 +230,9 @@ static int32_t tsdbOpenRocksCache(STsdb *pTsdb) { pTsdb->rCache.readoptions = readoptions; pTsdb->rCache.flushoptions = flushoptions; pTsdb->rCache.db = db; + pTsdb->rCache.sver = -1; + pTsdb->rCache.suid = -1; + pTsdb->rCache.uid = -1; pTsdb->rCache.pTSchema = NULL; TAOS_RETURN(code); @@ -723,34 +726,32 @@ static int32_t tsdbCacheDropTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; { SLastCol *pLastCol = NULL; - code = tsdbCacheDeserialize(values_list[0], values_list_sizes[0], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; - } - if (NULL != pLastCol) { - rocksdb_writebatch_delete(wb, keys_list[0], klen); + if (values_list[0] != NULL) { + code = tsdbCacheDeserialize(values_list[0], values_list_sizes[0], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } + if (NULL != pLastCol) { + rocksdb_writebatch_delete(wb, keys_list[0], klen); + } + taosMemoryFreeClear(pLastCol); } - taosMemoryFreeClear(pLastCol); pLastCol = NULL; - code = tsdbCacheDeserialize(values_list[1], values_list_sizes[1], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; - } - if (NULL != pLastCol) { - rocksdb_writebatch_delete(wb, keys_list[1], klen); + if (values_list[1] != NULL) { + code = tsdbCacheDeserialize(values_list[1], values_list_sizes[1], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } + if (NULL != pLastCol) { + rocksdb_writebatch_delete(wb, keys_list[1], klen); + } + taosMemoryFreeClear(pLastCol); } - taosMemoryFreeClear(pLastCol); rocksdb_free(values_list[0]); rocksdb_free(values_list[1]); @@ -1134,19 +1135,17 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray (void)taosThreadMutexLock(&pTsdb->lruMutex); for (int i = 0; i < num_keys; ++i) { - SLastUpdateCtx *updCtx = (SLastUpdateCtx *)taosArrayGet(updCtxArray, i); - - int8_t lflag = updCtx->lflag; - SRowKey *pRowKey = &updCtx->tsdbRowKey.key; - SColVal *pColVal = &updCtx->colVal; + SLastUpdateCtx *updCtx = &((SLastUpdateCtx *)TARRAY_DATA(updCtxArray))[i]; + int8_t lflag = updCtx->lflag; + SRowKey *pRowKey = &updCtx->tsdbRowKey.key; + SColVal *pColVal = &updCtx->colVal; if (lflag == LFLAG_LAST && !COL_VAL_IS_VALUE(pColVal)) { continue; } SLastKey *key = &(SLastKey){.lflag = lflag, .uid = uid, .cid = pColVal->cid}; - size_t klen = ROCKS_KEY_LEN; - LRUHandle *h = taosLRUCacheLookup(pCache, key, klen); + LRUHandle *h = taosLRUCacheLookup(pCache, key, ROCKS_KEY_LEN); if (h) { SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pCache, h); if (pLastCol->cacheStatus != TSDB_LAST_CACHE_NO_CACHE) { @@ -1218,14 +1217,13 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray SColVal *pColVal = &updCtx->colVal; SLastCol *pLastCol = NULL; - code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[i] != NULL) { + code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } } /* if (code) { @@ -1302,53 +1300,94 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray TAOS_RETURN(code); } +void tsdbCacheInvalidateSchema(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, int32_t sver) { + SRocksCache *pRCache = &pTsdb->rCache; + if (!pRCache->pTSchema || sver <= pTsdb->rCache.sver) return; + + if (suid > 0 && suid == pRCache->suid) { + pRCache->sver = -1; + pRCache->suid = -1; + } + if (suid == 0 && uid == pRCache->uid) { + pRCache->sver = -1; + pRCache->uid = -1; + } +} + +static int32_t tsdbUpdateSkm(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, int32_t sver) { + SRocksCache *pRCache = &pTsdb->rCache; + if (pRCache->pTSchema && sver == pRCache->sver) { + if (suid > 0 && suid == pRCache->suid) { + return 0; + } + if (suid == 0 && uid == pRCache->uid) { + return 0; + } + } + + pRCache->suid = suid; + pRCache->uid = uid; + pRCache->sver = sver; + tDestroyTSchema(pRCache->pTSchema); + return metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, uid, -1, &pRCache->pTSchema); +} + int32_t tsdbCacheRowFormatUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, int64_t version, int32_t nRow, SRow **aRow) { int32_t code = 0, lino = 0; // 1. prepare last - TSDBROW lRow = {.type = TSDBROW_ROW_FMT, .pTSRow = aRow[nRow - 1], .version = version}; - + TSDBROW lRow = {.type = TSDBROW_ROW_FMT, .pTSRow = aRow[nRow - 1], .version = version}; STSchema *pTSchema = NULL; int32_t sver = TSDBROW_SVERSION(&lRow); SArray *ctxArray = NULL; SSHashObj *iColHash = NULL; - TAOS_CHECK_GOTO(metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, uid, sver, &pTSchema), &lino, _exit); + TAOS_CHECK_GOTO(tsdbUpdateSkm(pTsdb, suid, uid, sver), &lino, _exit); + pTSchema = pTsdb->rCache.pTSchema; TSDBROW tRow = {.type = TSDBROW_ROW_FMT, .version = version}; int32_t nCol = pTSchema->numOfCols; - ctxArray = taosArrayInit(nCol, sizeof(SLastUpdateCtx)); - iColHash = tSimpleHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT)); + ctxArray = taosArrayInit(nCol * 2, sizeof(SLastUpdateCtx)); + if (ctxArray == NULL) { + TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, &lino, _exit); + } // 1. prepare by lrow STsdbRowKey tsdbRowKey = {0}; tsdbRowGetKey(&lRow, &tsdbRowKey); STSDBRowIter iter = {0}; - code = tsdbRowIterOpen(&iter, &lRow, pTSchema); - if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s tsdbRowIterOpen failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - TAOS_CHECK_GOTO(code, &lino, _exit); - } + TAOS_CHECK_GOTO(tsdbRowIterOpen(&iter, &lRow, pTSchema), &lino, _exit); + int32_t iCol = 0; for (SColVal *pColVal = tsdbRowIterNext(&iter); pColVal && iCol < nCol; pColVal = tsdbRowIterNext(&iter), iCol++) { SLastUpdateCtx updateCtx = {.lflag = LFLAG_LAST_ROW, .tsdbRowKey = tsdbRowKey, .colVal = *pColVal}; if (!taosArrayPush(ctxArray, &updateCtx)) { + tsdbRowClose(&iter); TAOS_CHECK_GOTO(terrno, &lino, _exit); } - if (!COL_VAL_IS_VALUE(pColVal)) { + if (COL_VAL_IS_VALUE(pColVal)) { + updateCtx.lflag = LFLAG_LAST; + if (!taosArrayPush(ctxArray, &updateCtx)) { + tsdbRowClose(&iter); + TAOS_CHECK_GOTO(terrno, &lino, _exit); + } + } else { + if (!iColHash) { + iColHash = tSimpleHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT)); + if (iColHash == NULL) { + tsdbRowClose(&iter); + TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, &lino, _exit); + } + } + if (tSimpleHashPut(iColHash, &iCol, sizeof(iCol), NULL, 0)) { + tsdbRowClose(&iter); TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, &lino, _exit); } - continue; - } - updateCtx.lflag = LFLAG_LAST; - if (!taosArrayPush(ctxArray, &updateCtx)) { - TAOS_CHECK_GOTO(terrno, &lino, _exit); } } tsdbRowClose(&iter); @@ -1393,7 +1432,10 @@ int32_t tsdbCacheRowFormatUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, int6 } _exit: - taosMemoryFreeClear(pTSchema); + if (code) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); + } + taosArrayDestroy(ctxArray); tSimpleHashCleanup(iColHash); @@ -1692,14 +1734,13 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA continue; } - code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[i] != NULL) { + code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } } SLastCol *pToFree = pLastCol; SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[j]; @@ -1959,14 +2000,13 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; for (int i = 0; i < numKeys; ++i) { SLastCol *pLastCol = NULL; - code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[i] != NULL) { + code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } } SIdxKey *idxKey = taosArrayGet(remainCols, i); SLastKey *pLastKey = &idxKey->key; diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index d508d759229..c7626dcf367 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -613,6 +613,16 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32 singleTableLastTs = pColVal->rowKey.ts; } + if (p->colVal.value.type != pColVal->colVal.value.type) { + // check for type/cid mismatch + tsdbError("last cache type mismatch, uid:%" PRIu64 + ", schema-type:%d, slotId:%d, cache-type:%d, cache-col:%d", + uid, p->colVal.value.type, slotIds[k], pColVal->colVal.value.type, pColVal->colVal.cid); + taosArrayClearEx(pRow, tsdbCacheFreeSLastColItem); + code = TSDB_CODE_INVALID_PARA; + goto _end; + } + if (!IS_VAR_DATA_TYPE(pColVal->colVal.value.type)) { p->colVal = pColVal->colVal; } else { diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index dae2b3a5eca..3ebcf50858b 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -409,7 +409,12 @@ int32_t vnodeSyncCommit(SVnode *pVnode) { vnodeAWait(&pVnode->commitTask); _exit: - vError("vgId:%d, %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code)); + if (code) { + vError("vgId:%d, %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code)); + } else { + vInfo("vgId:%d, sync commit end", TD_VID(pVnode)); + } + return code; } diff --git a/source/dnode/vnode/src/vnd/vnodeInitApi.c b/source/dnode/vnode/src/vnd/vnodeInitApi.c index 59a129cac83..d688d1323d3 100644 --- a/source/dnode/vnode/src/vnd/vnodeInitApi.c +++ b/source/dnode/vnode/src/vnd/vnodeInitApi.c @@ -162,10 +162,14 @@ void initStateStoreAPI(SStateStore* pStore) { pStore->streamStateSaveInfo = streamStateSaveInfo; pStore->streamStateGetInfo = streamStateGetInfo; pStore->streamStateSetNumber = streamStateSetNumber; + pStore->streamStateGetPrev = streamStateGetPrev; pStore->streamStateFillPut = streamStateFillPut; pStore->streamStateFillGet = streamStateFillGet; + pStore->streamStateFillAddIfNotExist = streamStateFillAddIfNotExist; pStore->streamStateFillDel = streamStateFillDel; + pStore->streamStateFillGetNext = streamStateFillGetNext; + pStore->streamStateFillGetPrev = streamStateFillGetPrev; pStore->streamStateCurNext = streamStateCurNext; pStore->streamStateCurPrev = streamStateCurPrev; @@ -176,9 +180,12 @@ void initStateStoreAPI(SStateStore* pStore) { pStore->streamStateFillSeekKeyPrev = streamStateFillSeekKeyPrev; pStore->streamStateFreeCur = streamStateFreeCur; - pStore->streamStateGetGroupKVByCur = streamStateGetGroupKVByCur; + pStore->streamStateFillGetGroupKVByCur = streamStateFillGetGroupKVByCur; pStore->streamStateGetKVByCur = streamStateGetKVByCur; + pStore->streamStateSetFillInfo = streamStateSetFillInfo; + pStore->streamStateClearExpiredState = streamStateClearExpiredState; + pStore->streamStateSessionAddIfNotExist = streamStateSessionAddIfNotExist; pStore->streamStateSessionPut = streamStateSessionPut; pStore->streamStateSessionGet = streamStateSessionGet; @@ -214,6 +221,11 @@ void initStateStoreAPI(SStateStore* pStore) { pStore->streamStateSessionSeekKeyCurrentPrev = streamStateSessionSeekKeyCurrentPrev; pStore->streamStateSessionSeekKeyCurrentNext = streamStateSessionSeekKeyCurrentNext; + pStore->streamStateGroupPut = streamStateGroupPut; + pStore->streamStateGroupGetCur = streamStateGroupGetCur; + pStore->streamStateGroupCurNext = streamStateGroupCurNext; + pStore->streamStateGroupGetKVByCur = streamStateGroupGetKVByCur; + pStore->streamFileStateDestroy = streamFileStateDestroy; pStore->streamFileStateClear = streamFileStateClear; pStore->needClearDiskBuff = needClearDiskBuff; diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 7c6a2e73130..0929953e1cf 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -702,7 +702,7 @@ int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) { } int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) { - SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 0); + SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 0, NULL); if (pSW) { *num = pSW->nCols; tDeleteSchemaWrapper(pSW); diff --git a/source/libs/azure/src/az.cpp b/source/libs/azure/src/az.cpp index 831694356a8..1380b58bbdd 100644 --- a/source/libs/azure/src/az.cpp +++ b/source/libs/azure/src/az.cpp @@ -22,13 +22,16 @@ #include "taoserror.h" #include "tglobal.h" +int32_t azBegin() { return TSDB_CODE_SUCCESS; } + +void azEnd() {} + #if defined(USE_S3) #include #include #include "td_block_blob_client.hpp" -// Add appropriate using namespace directives using namespace Azure::Storage; using namespace Azure::Storage::Blobs; @@ -40,10 +43,6 @@ extern char tsS3BucketName[TSDB_FQDN_LEN]; extern int8_t tsS3Enabled; extern int8_t tsS3EpNum; -int32_t azBegin() { return TSDB_CODE_SUCCESS; } - -void azEnd() {} - static void checkPrint(const char *fmt, ...) { va_list arg_ptr; va_start(arg_ptr, fmt); @@ -223,7 +222,6 @@ static int32_t azPutObjectFromFileOffsetImpl(const char *file, const char *objec uint8_t blobContent[] = "Hello Azure!"; // Create the block blob client // BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName); - // TDBlockBlobClient blobClient(containerClient.GetBlobClient(blobName)); TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name)); blobClient.UploadFrom(file, offset, size); @@ -416,7 +414,7 @@ int32_t azPutObjectFromFile2(const char *file, const char *object, int8_t withcp return 0; } -int32_t azGetObjectToFile(const char *object_name, const char *fileName) { +static int32_t azGetObjectToFileImpl(const char *object_name, const char *fileName) { int32_t code = TSDB_CODE_SUCCESS; std::string accountName = tsS3AccessKeyId[0]; std::string accountKey = tsS3AccessKeySecret[0]; @@ -450,7 +448,24 @@ int32_t azGetObjectToFile(const char *object_name, const char *fileName) { TAOS_RETURN(code); } -int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { +int32_t azGetObjectToFile(const char *object_name, const char *fileName) { + int32_t code = 0; + + try { + code = azGetObjectToFileImpl(object_name, fileName); + } catch (const std::exception &e) { + azError("%s: Reason Phrase: %s", __func__, e.what()); + + code = TAOS_SYSTEM_ERROR(EIO); + azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + + TAOS_RETURN(code); + } + + TAOS_RETURN(code); +} + +static int32_t azGetObjectsByPrefixImpl(const char *prefix, const char *path) { const std::string delimiter = "/"; std::string accountName = tsS3AccessKeyId[0]; std::string accountKey = tsS3AccessKeySecret[0]; @@ -497,6 +512,23 @@ int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { return 0; } +int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { + int32_t code = 0; + + try { + code = azGetObjectsByPrefixImpl(prefix, path); + } catch (const std::exception &e) { + azError("%s: Reason Phrase: %s", __func__, e.what()); + + code = TAOS_SYSTEM_ERROR(EIO); + azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + + TAOS_RETURN(code); + } + + TAOS_RETURN(code); +} + int32_t azDeleteObjects(const char *object_name[], int nobject) { for (int i = 0; i < nobject; ++i) { azDeleteObjectsByPrefix(object_name[i]); @@ -507,10 +539,6 @@ int32_t azDeleteObjects(const char *object_name[], int nobject) { #else -int32_t azBegin() { return TSDB_CODE_SUCCESS; } - -void azEnd() {} - int32_t azCheckCfg() { return TSDB_CODE_SUCCESS; } int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) { diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index b2417a8597f..6272ac7049a 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -35,7 +35,8 @@ extern SConfig* tsCfg; static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRetrieveTableRsp** pRsp) { - size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN; *pRsp = taosMemoryCalloc(1, rspSize); if (NULL == *pRsp) { return terrno; @@ -49,7 +50,7 @@ static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRe (*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows); (*pRsp)->numOfCols = htonl(numOfCols); - int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, numOfCols); + int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, numOfCols); if(len < 0) { taosMemoryFree(*pRsp); return terrno; @@ -292,7 +293,7 @@ static int32_t buildRetension(SArray* pRetension, char** ppRetentions) { } const int lMaxLen = 128; - char* p1 = taosMemoryCalloc(1, lMaxLen); + char* p1 = taosMemoryCalloc(1, lMaxLen); if (NULL == p1) { return terrno; } @@ -346,20 +347,20 @@ static const char* encryptAlgorithmStr(int8_t encryptAlgorithm) { } int32_t formatDurationOrKeep(char* buffer, int64_t bufSize, int32_t timeInMinutes) { - if (buffer == NULL || bufSize <= 0) { - return 0; - } - int32_t len = 0; - if (timeInMinutes % 1440 == 0) { - int32_t days = timeInMinutes / 1440; - len = tsnprintf(buffer, bufSize, "%dd", days); - } else if (timeInMinutes % 60 == 0) { - int32_t hours = timeInMinutes / 60; - len = tsnprintf(buffer, bufSize, "%dh", hours); - } else { - len = tsnprintf(buffer, bufSize, "%dm", timeInMinutes); - } - return len; + if (buffer == NULL || bufSize <= 0) { + return 0; + } + int32_t len = 0; + if (timeInMinutes % 1440 == 0) { + int32_t days = timeInMinutes / 1440; + len = tsnprintf(buffer, bufSize, "%dd", days); + } else if (timeInMinutes % 60 == 0) { + int32_t hours = timeInMinutes / 60; + len = tsnprintf(buffer, bufSize, "%dh", hours); + } else { + len = tsnprintf(buffer, bufSize, "%dm", timeInMinutes); + } + return len; } static int32_t setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbName, char* dbFName, SDbCfgInfo* pCfg) { @@ -410,27 +411,27 @@ static int32_t setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbName, int32_t lenKeep2 = formatDurationOrKeep(keep2Str, sizeof(keep2Str), pCfg->daysToKeep2); if (IS_SYS_DBNAME(dbName)) { - len += tsnprintf(buf2 + VARSTR_HEADER_SIZE, SHOW_CREATE_DB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, "CREATE DATABASE `%s`", dbName); - } else { len += tsnprintf(buf2 + VARSTR_HEADER_SIZE, SHOW_CREATE_DB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, - "CREATE DATABASE `%s` BUFFER %d CACHESIZE %d CACHEMODEL '%s' COMP %d DURATION %s " - "WAL_FSYNC_PERIOD %d MAXROWS %d MINROWS %d STT_TRIGGER %d KEEP %s,%s,%s PAGES %d PAGESIZE %d " - "PRECISION '%s' REPLICA %d " - "WAL_LEVEL %d VGROUPS %d SINGLE_STABLE %d TABLE_PREFIX %d TABLE_SUFFIX %d TSDB_PAGESIZE %d " - "WAL_RETENTION_PERIOD %d WAL_RETENTION_SIZE %" PRId64 - " KEEP_TIME_OFFSET %d ENCRYPT_ALGORITHM '%s' S3_CHUNKSIZE %d S3_KEEPLOCAL %dm S3_COMPACT %d", - dbName, pCfg->buffer, pCfg->cacheSize, cacheModelStr(pCfg->cacheLast), pCfg->compression, - durationStr, - pCfg->walFsyncPeriod, pCfg->maxRows, pCfg->minRows, pCfg->sstTrigger, - keep0Str, keep1Str, keep2Str, - pCfg->pages, pCfg->pageSize, prec, - pCfg->replications, pCfg->walLevel, pCfg->numOfVgroups, 1 == pCfg->numOfStables, hashPrefix, - pCfg->hashSuffix, pCfg->tsdbPageSize, pCfg->walRetentionPeriod, pCfg->walRetentionSize, - pCfg->keepTimeOffset, encryptAlgorithmStr(pCfg->encryptAlgorithm), pCfg->s3ChunkSize, - pCfg->s3KeepLocal, pCfg->s3Compact); + "CREATE DATABASE `%s`", dbName); + } else { + len += + tsnprintf(buf2 + VARSTR_HEADER_SIZE, SHOW_CREATE_DB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, + "CREATE DATABASE `%s` BUFFER %d CACHESIZE %d CACHEMODEL '%s' COMP %d DURATION %s " + "WAL_FSYNC_PERIOD %d MAXROWS %d MINROWS %d STT_TRIGGER %d KEEP %s,%s,%s PAGES %d PAGESIZE %d " + "PRECISION '%s' REPLICA %d " + "WAL_LEVEL %d VGROUPS %d SINGLE_STABLE %d TABLE_PREFIX %d TABLE_SUFFIX %d TSDB_PAGESIZE %d " + "WAL_RETENTION_PERIOD %d WAL_RETENTION_SIZE %" PRId64 + " KEEP_TIME_OFFSET %d ENCRYPT_ALGORITHM '%s' S3_CHUNKPAGES %d S3_KEEPLOCAL %dm S3_COMPACT %d", + dbName, pCfg->buffer, pCfg->cacheSize, cacheModelStr(pCfg->cacheLast), pCfg->compression, durationStr, + pCfg->walFsyncPeriod, pCfg->maxRows, pCfg->minRows, pCfg->sstTrigger, keep0Str, keep1Str, keep2Str, + pCfg->pages, pCfg->pageSize, prec, pCfg->replications, pCfg->walLevel, pCfg->numOfVgroups, + 1 == pCfg->numOfStables, hashPrefix, pCfg->hashSuffix, pCfg->tsdbPageSize, pCfg->walRetentionPeriod, + pCfg->walRetentionSize, pCfg->keepTimeOffset, encryptAlgorithmStr(pCfg->encryptAlgorithm), + pCfg->s3ChunkSize, pCfg->s3KeepLocal, pCfg->s3Compact); if (pRetentions) { - len += tsnprintf(buf2 + VARSTR_HEADER_SIZE + len, SHOW_CREATE_DB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, " RETENTIONS %s", pRetentions); + len += tsnprintf(buf2 + VARSTR_HEADER_SIZE + len, SHOW_CREATE_DB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, + " RETENTIONS %s", pRetentions); } } @@ -510,30 +511,30 @@ void appendColumnFields(char* buf, int32_t* len, STableCfg* pCfg) { #define LTYPE_LEN (32 + 60) // 60 byte for compress info char type[LTYPE_LEN]; snprintf(type, LTYPE_LEN, "%s", tDataTypes[pSchema->type].name); - int typeLen = strlen(type); + int typeLen = strlen(type); if (TSDB_DATA_TYPE_VARCHAR == pSchema->type || TSDB_DATA_TYPE_VARBINARY == pSchema->type || TSDB_DATA_TYPE_GEOMETRY == pSchema->type) { typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, "(%d)", (int32_t)(pSchema->bytes - VARSTR_HEADER_SIZE)); } else if (TSDB_DATA_TYPE_NCHAR == pSchema->type) { typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, "(%d)", - (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); + (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } if (useCompress(pCfg->tableType) && pCfg->pSchemaExt) { typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, " ENCODE \'%s\'", - columnEncodeStr(COMPRESS_L1_TYPE_U32(pCfg->pSchemaExt[i].compress))); + columnEncodeStr(COMPRESS_L1_TYPE_U32(pCfg->pSchemaExt[i].compress))); typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, " COMPRESS \'%s\'", - columnCompressStr(COMPRESS_L2_TYPE_U32(pCfg->pSchemaExt[i].compress))); + columnCompressStr(COMPRESS_L2_TYPE_U32(pCfg->pSchemaExt[i].compress))); typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, " LEVEL \'%s\'", - columnLevelStr(COMPRESS_L2_TYPE_LEVEL_U32(pCfg->pSchemaExt[i].compress))); + columnLevelStr(COMPRESS_L2_TYPE_LEVEL_U32(pCfg->pSchemaExt[i].compress))); } if (!(pSchema->flags & COL_IS_KEY)) { - *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s", - ((i > 0) ? ", " : ""), pSchema->name, type); + *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), + "%s`%s` %s", ((i > 0) ? ", " : ""), pSchema->name, type); } else { char* pk = "PRIMARY KEY"; - *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s %s", - ((i > 0) ? ", " : ""), pSchema->name, type, pk); + *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), + "%s`%s` %s %s", ((i > 0) ? ", " : ""), pSchema->name, type, pk); } } } @@ -545,14 +546,15 @@ void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) { snprintf(type, sizeof(type), "%s", tDataTypes[pSchema->type].name); if (TSDB_DATA_TYPE_VARCHAR == pSchema->type || TSDB_DATA_TYPE_VARBINARY == pSchema->type || TSDB_DATA_TYPE_GEOMETRY == pSchema->type) { - snprintf(type + strlen(type), sizeof(type) - strlen(type), "(%d)", (int32_t)(pSchema->bytes - VARSTR_HEADER_SIZE)); + snprintf(type + strlen(type), sizeof(type) - strlen(type), "(%d)", + (int32_t)(pSchema->bytes - VARSTR_HEADER_SIZE)); } else if (TSDB_DATA_TYPE_NCHAR == pSchema->type) { snprintf(type + strlen(type), sizeof(type) - strlen(type), "(%d)", (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } - *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s", - ((i > 0) ? ", " : ""), pSchema->name, type); + *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), + "%s`%s` %s", ((i > 0) ? ", " : ""), pSchema->name, type); } } @@ -560,7 +562,7 @@ void appendTagNameFields(char* buf, int32_t* len, STableCfg* pCfg) { for (int32_t i = 0; i < pCfg->numOfTags; ++i) { SSchema* pSchema = pCfg->pSchemas + pCfg->numOfColumns + i; *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - "%s`%s`", ((i > 0) ? ", " : ""), pSchema->name); + "%s`%s`", ((i > 0) ? ", " : ""), pSchema->name); } } @@ -582,7 +584,7 @@ int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) { return terrno; } *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - "%s", pJson); + "%s", pJson); taosMemoryFree(pJson); return TSDB_CODE_SUCCESS; @@ -596,12 +598,12 @@ int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) { SSchema* pSchema = pCfg->pSchemas + pCfg->numOfColumns + i; if (i > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - ", "); + ", "); } if (j >= valueNum) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - "NULL"); + "NULL"); continue; } @@ -624,14 +626,15 @@ int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) { code = dataConverToStr(buf + VARSTR_HEADER_SIZE + *len, leftSize, type, pTagVal->pData, pTagVal->nData, &tlen); TAOS_CHECK_ERRNO(code); } else { - code = dataConverToStr(buf + VARSTR_HEADER_SIZE + *len, leftSize, type, &pTagVal->i64, tDataTypes[type].bytes, &tlen); + code = dataConverToStr(buf + VARSTR_HEADER_SIZE + *len, leftSize, type, &pTagVal->i64, tDataTypes[type].bytes, + &tlen); TAOS_CHECK_ERRNO(code); } *len += tlen; j++; } else { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - "NULL"); + "NULL"); } } _exit: @@ -643,38 +646,38 @@ int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) { void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg* pCfg) { if (pCfg->commentLen > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " COMMENT '%s'", pCfg->pComment); + " COMMENT '%s'", pCfg->pComment); } else if (0 == pCfg->commentLen) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " COMMENT ''"); + " COMMENT ''"); } if (NULL != pDbCfg->pRetensions && pCfg->watermark1 > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " WATERMARK %" PRId64 "a", pCfg->watermark1); + " WATERMARK %" PRId64 "a", pCfg->watermark1); if (pCfg->watermark2 > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - ", %" PRId64 "a", pCfg->watermark2); + ", %" PRId64 "a", pCfg->watermark2); } } if (NULL != pDbCfg->pRetensions && pCfg->delay1 > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " MAX_DELAY %" PRId64 "a", pCfg->delay1); + " MAX_DELAY %" PRId64 "a", pCfg->delay1); if (pCfg->delay2 > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - ", %" PRId64 "a", pCfg->delay2); + ", %" PRId64 "a", pCfg->delay2); } } int32_t funcNum = taosArrayGetSize(pCfg->pFuncs); if (NULL != pDbCfg->pRetensions && funcNum > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " ROLLUP("); + " ROLLUP("); for (int32_t i = 0; i < funcNum; ++i) { char* pFunc = taosArrayGet(pCfg->pFuncs, i); *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - "%s%s", ((i > 0) ? ", " : ""), pFunc); + "%s%s", ((i > 0) ? ", " : ""), pFunc); } *len += snprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), ")"); @@ -682,7 +685,7 @@ void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg* if (pCfg->ttl > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " TTL %d", pCfg->ttl); + " TTL %d", pCfg->ttl); } if (TSDB_SUPER_TABLE == pCfg->tableType || TSDB_NORMAL_TABLE == pCfg->tableType) { @@ -696,18 +699,18 @@ void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg* if (nSma < pCfg->numOfColumns && nSma > 0) { bool smaOn = false; *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " SMA("); + " SMA("); for (int32_t i = 0; i < pCfg->numOfColumns; ++i) { if (IS_BSMA_ON(pCfg->pSchemas + i)) { if (smaOn) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, - SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), ",`%s`", - (pCfg->pSchemas + i)->name); + SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), ",`%s`", + (pCfg->pSchemas + i)->name); } else { smaOn = true; *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, - SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "`%s`", - (pCfg->pSchemas + i)->name); + SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "`%s`", + (pCfg->pSchemas + i)->name); } } } @@ -736,20 +739,20 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p if (TSDB_SUPER_TABLE == pCfg->tableType) { len += tsnprintf(buf2 + VARSTR_HEADER_SIZE, SHOW_CREATE_TB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, - "CREATE STABLE `%s` (", tbName); + "CREATE STABLE `%s` (", tbName); appendColumnFields(buf2, &len, pCfg); len += tsnprintf(buf2 + VARSTR_HEADER_SIZE + len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + len), - ") TAGS ("); + ") TAGS ("); appendTagFields(buf2, &len, pCfg); len += snprintf(buf2 + VARSTR_HEADER_SIZE + len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + len), ")"); appendTableOptions(buf2, &len, pDbCfg, pCfg); } else if (TSDB_CHILD_TABLE == pCfg->tableType) { len += tsnprintf(buf2 + VARSTR_HEADER_SIZE, SHOW_CREATE_TB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, - "CREATE TABLE `%s` USING `%s` (", tbName, pCfg->stbName); + "CREATE TABLE `%s` USING `%s` (", tbName, pCfg->stbName); appendTagNameFields(buf2, &len, pCfg); len += tsnprintf(buf2 + VARSTR_HEADER_SIZE + len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + len), - ") TAGS ("); + ") TAGS ("); code = appendTagValues(buf2, &len, pCfg); TAOS_CHECK_ERRNO(code); len += @@ -757,7 +760,7 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p appendTableOptions(buf2, &len, pDbCfg, pCfg); } else { len += tsnprintf(buf2 + VARSTR_HEADER_SIZE, SHOW_CREATE_TB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, - "CREATE TABLE `%s` (", tbName); + "CREATE TABLE `%s` (", tbName); appendColumnFields(buf2, &len, pCfg); len += snprintf(buf2 + VARSTR_HEADER_SIZE + len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + len), ")"); @@ -793,7 +796,7 @@ static int32_t setCreateViewResultIntoDataBlock(SSDataBlock* pBlock, SShowCreate } SViewMeta* pMeta = pStmt->pViewMeta; - if(NULL == pMeta) { + if (NULL == pMeta) { qError("exception: view meta is null"); return TSDB_CODE_APP_ERROR; } diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index b82bba250f0..42c214fac73 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -1966,7 +1966,8 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) { pBlock->info.rows = rowNum; - int32_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + int32_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN; SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)taosMemoryCalloc(1, rspSize); if (NULL == rsp) { @@ -1977,7 +1978,7 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) { rsp->completed = 1; rsp->numOfRows = htobe64((int64_t)rowNum); - int32_t len = blockEncode(pBlock, rsp->data + PAYLOAD_PREFIX_LEN, taosArrayGetSize(pBlock->pDataBlock)); + int32_t len = blockEncode(pBlock, rsp->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, taosArrayGetSize(pBlock->pDataBlock)); if(len < 0) { qError("qExplainGetRspFromCtx: blockEncode failed"); QRY_ERR_JRET(terrno); diff --git a/source/libs/executor/CMakeLists.txt b/source/libs/executor/CMakeLists.txt index 014b5383755..9a49076b6b0 100644 --- a/source/libs/executor/CMakeLists.txt +++ b/source/libs/executor/CMakeLists.txt @@ -7,7 +7,7 @@ if(${TD_DARWIN}) endif(${TD_DARWIN}) if(${BUILD_WITH_ANALYSIS}) - add_definitions(-DUSE_ANAL) + add_definitions(-DUSE_ANALYTICS) endif() target_link_libraries(executor diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 572ff88be90..039c0fa68b1 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -454,6 +454,29 @@ typedef struct SSteamOpBasicInfo { bool updateOperatorInfo; } SSteamOpBasicInfo; +typedef struct SStreamFillSupporter { + int32_t type; // fill type + SInterval interval; + SResultRowData prev; + TSKEY prevOriginKey; + SResultRowData cur; + SResultRowData next; + TSKEY nextOriginKey; + SResultRowData nextNext; + SFillColInfo* pAllColInfo; // fill exprs and not fill exprs + SExprSupp notFillExprSup; + int32_t numOfAllCols; // number of all exprs, including the tags columns + int32_t numOfFillCols; + int32_t numOfNotFillCols; + int32_t rowSize; + SSHashObj* pResMap; + bool hasDelete; + SStorageAPI* pAPI; + STimeWindow winRange; + int32_t pkColBytes; + __compar_fn_t comparePkColFn; +} SStreamFillSupporter; + typedef struct SStreamScanInfo { SSteamOpBasicInfo basic; SExprInfo* pPseudoExpr; @@ -477,6 +500,7 @@ typedef struct SStreamScanInfo { STqReader* tqReader; uint64_t groupId; + bool igCheckGroupId; struct SUpdateInfo* pUpdateInfo; EStreamScanMode scanMode; @@ -493,6 +517,7 @@ typedef struct SStreamScanInfo { STimeWindow updateWin; STimeWindowAggSupp twAggSup; SSDataBlock* pUpdateDataRes; + SStreamFillSupporter* pFillSup; // status for tmq SNodeList* pGroupTags; SNode* pTagCond; @@ -511,6 +536,8 @@ typedef struct SStreamScanInfo { SSDataBlock* pCheckpointRes; int8_t pkColType; int32_t pkColLen; + bool useGetResultRange; + STimeWindow lastScanRange; } SStreamScanInfo; typedef struct { @@ -553,6 +580,7 @@ typedef struct SIntervalAggOperatorInfo { EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model] STimeWindowAggSupp twAggSup; SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation. + bool cleanGroupResInfo; struct SOperatorInfo* pOperator; // for limit optimization bool limited; @@ -780,25 +808,6 @@ typedef struct SStreamPartitionOperatorInfo { SSDataBlock* pCreateTbRes; } SStreamPartitionOperatorInfo; -typedef struct SStreamFillSupporter { - int32_t type; // fill type - SInterval interval; - SResultRowData prev; - SResultRowData cur; - SResultRowData next; - SResultRowData nextNext; - SFillColInfo* pAllColInfo; // fill exprs and not fill exprs - SExprSupp notFillExprSup; - int32_t numOfAllCols; // number of all exprs, including the tags columns - int32_t numOfFillCols; - int32_t numOfNotFillCols; - int32_t rowSize; - SSHashObj* pResMap; - bool hasDelete; - SStorageAPI* pAPI; - STimeWindow winRange; -} SStreamFillSupporter; - typedef struct SStreamFillOperatorInfo { SSteamOpBasicInfo basic; SStreamFillSupporter* pFillSup; @@ -812,8 +821,70 @@ typedef struct SStreamFillOperatorInfo { int32_t primaryTsCol; int32_t primarySrcSlotId; SStreamFillInfo* pFillInfo; + SStreamAggSupporter* pStreamAggSup; + SArray* pCloseTs; + SArray* pUpdated; + SGroupResInfo groupResInfo; } SStreamFillOperatorInfo; +typedef struct SStreamTimeSliceOperatorInfo { + SSteamOpBasicInfo basic; + STimeWindowAggSupp twAggSup; + SStreamAggSupporter streamAggSup; + SStreamFillSupporter* pFillSup; + SStreamFillInfo* pFillInfo; + SSDataBlock* pRes; + SSDataBlock* pDelRes; + bool recvCkBlock; + SSDataBlock* pCheckpointRes; + int32_t fillType; + SResultRowData leftRow; + SResultRowData valueRow; + SResultRowData rightRow; + int32_t primaryTsIndex; + SExprSupp scalarSup; // scalar calculation + bool ignoreExpiredData; + bool ignoreExpiredDataSaved; + bool destHasPrimaryKey; + SArray* historyPoints; + SArray* pUpdated; // SWinKey + SArray* historyWins; + SSHashObj* pUpdatedMap; + int32_t delIndex; + SArray* pDelWins; // SWinKey + SSHashObj* pDeletedMap; + uint64_t numOfDatapack; + SGroupResInfo groupResInfo; + bool ignoreNull; + bool isHistoryOp; + SArray* pCloseTs; + struct SOperatorInfo* pOperator; +} SStreamTimeSliceOperatorInfo; + +typedef struct SStreamIntervalSliceOperatorInfo { + SSteamOpBasicInfo basic; + SOptrBasicInfo binfo; + STimeWindowAggSupp twAggSup; + SStreamAggSupporter streamAggSup; + SExprSupp scalarSup; + SInterval interval; + bool recvCkBlock; + SSDataBlock* pCheckpointRes; + int32_t primaryTsIndex; + SSHashObj* pUpdatedMap; // SWinKey + SArray* pUpdated; // SWinKey + SSHashObj* pDeletedMap; + SArray* pDelWins; + SSDataBlock* pDelRes; + int32_t delIndex; + bool destHasPrimaryKey; + int64_t endTs; + SGroupResInfo groupResInfo; + struct SOperatorInfo* pOperator; + bool hasFill; + bool hasInterpoFunc; +} SStreamIntervalSliceOperatorInfo; + #define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED) #define OPTR_SET_OPENED(_optr) ((_optr)->status |= OP_OPENED) @@ -831,8 +902,10 @@ void cleanupExprSupp(SExprSupp* pSup); void cleanupResultInfoInStream(SExecTaskInfo* pTaskInfo, void* pState, SExprSupp* pSup, SGroupResInfo* pGroupResInfo); -void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, - SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap); +void cleanupResultInfoInHashMap(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, + SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap); +void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SGroupResInfo* pGroupResInfo, + SAggSupporter *pAggSup, bool cleanHashmap); void cleanupResultInfoWithoutHash(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo); @@ -934,54 +1007,57 @@ int32_t copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResu void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t numOfExprs, const int32_t* rowEntryOffset); void doClearBufferedBlocks(SStreamScanInfo* pInfo); -void streamOpReleaseState(struct SOperatorInfo* pOperator); -void streamOpReloadState(struct SOperatorInfo* pOperator); -void destroyStreamAggSupporter(SStreamAggSupporter* pSup); -void clearGroupResInfo(SGroupResInfo* pGroupResInfo); -int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResultBlock, SFunctionStateStore* pStore); -int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, int32_t numOfOutput, int64_t gap, - SStreamState* pState, int32_t keySize, int16_t keyType, SStateStore* pStore, - SReadHandle* pHandle, STimeWindowAggSupp* pTwAggSup, const char* taskIdStr, - SStorageAPI* pApi, int32_t tsIndex); -int32_t initDownStream(struct SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, uint16_t type, - int32_t tsColIndex, STimeWindowAggSupp* pTwSup, struct SSteamOpBasicInfo* pBasic); -int32_t getMaxTsWins(const SArray* pAllWins, SArray* pMaxWins); -void initGroupResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList); -void getSessionHashKey(const SSessionKey* pKey, SSessionKey* pHashKey); -int32_t deleteSessionWinState(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SSHashObj* pMapUpdate, - SSHashObj* pMapDelete, SSHashObj* pPkDelete, bool needAdd); -int32_t getAllSessionWindow(SSHashObj* pHashMap, SSHashObj* pStUpdated); -int32_t closeSessionWindow(SSHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SSHashObj* pClosed); -int32_t copyUpdateResult(SSHashObj** ppWinUpdated, SArray* pUpdated, __compar_fn_t compar); -int32_t sessionKeyCompareAsc(const void* pKey1, const void* pKey2); -void removeSessionDeleteResults(SSHashObj* pHashMap, SArray* pWins); -int32_t doOneWindowAggImpl(SColumnInfoData* pTimeWindowData, SResultWindowInfo* pCurWin, SResultRow** pResult, - int32_t startIndex, int32_t winRows, int32_t rows, int32_t numOutput, - struct SOperatorInfo* pOperator, int64_t winDelta); -void setSessionWinOutputInfo(SSHashObj* pStUpdated, SResultWindowInfo* pWinInfo); -int32_t saveSessionOutputBuf(SStreamAggSupporter* pAggSup, SResultWindowInfo* pWinInfo); -int32_t saveResult(SResultWindowInfo winInfo, SSHashObj* pStUpdated); -int32_t saveDeleteRes(SSHashObj* pStDelete, SSessionKey key); -void removeSessionResult(SStreamAggSupporter* pAggSup, SSHashObj* pHashMap, SSHashObj* pResMap, SSessionKey* pKey); -void doBuildDeleteDataBlock(struct SOperatorInfo* pOp, SSHashObj* pStDeleted, SSDataBlock* pBlock, void** Ite); -void doBuildSessionResult(struct SOperatorInfo* pOperator, void* pState, SGroupResInfo* pGroupResInfo, - SSDataBlock* pBlock); -int32_t getSessionWindowInfoByKey(SStreamAggSupporter* pAggSup, SSessionKey* pKey, SResultWindowInfo* pWinInfo); -void getNextSessionWinInfo(SStreamAggSupporter* pAggSup, SSHashObj* pStUpdated, SResultWindowInfo* pCurWin, - SResultWindowInfo* pNextWin); -int32_t compactTimeWindow(SExprSupp* pSup, SStreamAggSupporter* pAggSup, STimeWindowAggSupp* pTwAggSup, - SExecTaskInfo* pTaskInfo, SResultWindowInfo* pCurWin, SResultWindowInfo* pNextWin, - SSHashObj* pStUpdated, SSHashObj* pStDeleted, bool addGap); -void releaseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI); -void resetWinRange(STimeWindow* winRange); -bool checkExpiredData(SStateStore* pAPI, SUpdateInfo* pUpdateInfo, STimeWindowAggSupp* pTwSup, uint64_t tableId, - TSKEY ts, void* pPkVal, int32_t len); -int64_t getDeleteMark(SWindowPhysiNode* pWinPhyNode, int64_t interval); -void resetUnCloseSessionWinInfo(SSHashObj* winMap); -void setStreamOperatorCompleted(struct SOperatorInfo* pOperator); -void reloadAggSupFromDownStream(struct SOperatorInfo* downstream, SStreamAggSupporter* pAggSup); -void destroyFlusedPos(void* pRes); +void streamOpReleaseState(struct SOperatorInfo* pOperator); +void streamOpReloadState(struct SOperatorInfo* pOperator); +void destroyStreamAggSupporter(SStreamAggSupporter* pSup); +void clearGroupResInfo(SGroupResInfo* pGroupResInfo); +int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfCols, + SSDataBlock* pResultBlock, SFunctionStateStore* pStore); +int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, int32_t numOfOutput, int64_t gap, + SStreamState* pState, int32_t keySize, int16_t keyType, SStateStore* pStore, + SReadHandle* pHandle, STimeWindowAggSupp* pTwAggSup, const char* taskIdStr, + SStorageAPI* pApi, int32_t tsIndex, int8_t stateType, int32_t ratio); +int32_t initDownStream(struct SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, uint16_t type, + int32_t tsColIndex, STimeWindowAggSupp* pTwSup, struct SSteamOpBasicInfo* pBasic); +int32_t getMaxTsWins(const SArray* pAllWins, SArray* pMaxWins); +void initGroupResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList); +void getSessionHashKey(const SSessionKey* pKey, SSessionKey* pHashKey); +int32_t deleteSessionWinState(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SSHashObj* pMapUpdate, + SSHashObj* pMapDelete, SSHashObj* pPkDelete, bool needAdd); +int32_t getAllSessionWindow(SSHashObj* pHashMap, SSHashObj* pStUpdated); +int32_t closeSessionWindow(SSHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SSHashObj* pClosed); +int32_t copyUpdateResult(SSHashObj** ppWinUpdated, SArray* pUpdated, __compar_fn_t compar); +int32_t sessionKeyCompareAsc(const void* pKey1, const void* pKey2); +void removeSessionDeleteResults(SSHashObj* pHashMap, SArray* pWins); +int32_t doOneWindowAggImpl(SColumnInfoData* pTimeWindowData, SResultWindowInfo* pCurWin, SResultRow** pResult, + int32_t startIndex, int32_t winRows, int32_t rows, int32_t numOutput, + struct SOperatorInfo* pOperator, int64_t winDelta); +void setSessionWinOutputInfo(SSHashObj* pStUpdated, SResultWindowInfo* pWinInfo); +int32_t saveSessionOutputBuf(SStreamAggSupporter* pAggSup, SResultWindowInfo* pWinInfo); +int32_t saveResult(SResultWindowInfo winInfo, SSHashObj* pStUpdated); +int32_t saveDeleteRes(SSHashObj* pStDelete, SSessionKey key); +void removeSessionResult(SStreamAggSupporter* pAggSup, SSHashObj* pHashMap, SSHashObj* pResMap, SSessionKey* pKey); +void doBuildDeleteDataBlock(struct SOperatorInfo* pOp, SSHashObj* pStDeleted, SSDataBlock* pBlock, void** Ite); +void doBuildSessionResult(struct SOperatorInfo* pOperator, void* pState, SGroupResInfo* pGroupResInfo, + SSDataBlock* pBlock); +int32_t getSessionWindowInfoByKey(SStreamAggSupporter* pAggSup, SSessionKey* pKey, SResultWindowInfo* pWinInfo); +void getNextSessionWinInfo(SStreamAggSupporter* pAggSup, SSHashObj* pStUpdated, SResultWindowInfo* pCurWin, + SResultWindowInfo* pNextWin); +int32_t compactTimeWindow(SExprSupp* pSup, SStreamAggSupporter* pAggSup, STimeWindowAggSupp* pTwAggSup, + SExecTaskInfo* pTaskInfo, SResultWindowInfo* pCurWin, SResultWindowInfo* pNextWin, + SSHashObj* pStUpdated, SSHashObj* pStDeleted, bool addGap); +void releaseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI); +void resetWinRange(STimeWindow* winRange); +bool checkExpiredData(SStateStore* pAPI, SUpdateInfo* pUpdateInfo, STimeWindowAggSupp* pTwSup, uint64_t tableId, + TSKEY ts, void* pPkVal, int32_t len); +int64_t getDeleteMark(SWindowPhysiNode* pWinPhyNode, int64_t interval); +void resetUnCloseSessionWinInfo(SSHashObj* winMap); +void setStreamOperatorCompleted(struct SOperatorInfo* pOperator); +void reloadAggSupFromDownStream(struct SOperatorInfo* downstream, SStreamAggSupporter* pAggSup); +void destroyFlusedPos(void* pRes); +bool isIrowtsPseudoColumn(SExprInfo* pExprInfo); +bool isIsfilledPseudoColumn(SExprInfo* pExprInfo); +bool isInterpFunc(SExprInfo* pExprInfo); int32_t encodeSSessionKey(void** buf, SSessionKey* key); void* decodeSSessionKey(void* buf, SSessionKey* key); @@ -1015,6 +1091,8 @@ int32_t doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, S int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, SDataBlockInfo* pDataBlockInfo, TSKEY* primaryKeys, int32_t prevPosition, int32_t order); int32_t extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoData* p, int32_t status); +bool getIgoreNullRes(SExprSupp* pExprSup); +bool checkNullRow(SExprSupp* pExprSup, SSDataBlock* pSrcBlock, int32_t index, bool ignoreNull); #ifdef __cplusplus } diff --git a/source/libs/executor/inc/operator.h b/source/libs/executor/inc/operator.h index 7dfc7080d65..91aef934520 100644 --- a/source/libs/executor/inc/operator.h +++ b/source/libs/executor/inc/operator.h @@ -85,6 +85,8 @@ int32_t createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode* pExNo int32_t createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, STableListInfo* pTableList, SExecTaskInfo* pTaskInfo, SOperatorInfo** pInfo); +int32_t createTableSeqScanOperatorInfo(void* pReadHandle, SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo); + int32_t createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo, SOperatorInfo** pInfo); int32_t createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode*pTagIndexCond, SExecTaskInfo* pTaskInfo, SOperatorInfo** pInfo); @@ -165,6 +167,8 @@ int32_t createAnomalywindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p int32_t createDynQueryCtrlOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SDynQueryCtrlPhysiNode* pPhyciNode, SExecTaskInfo* pTaskInfo, SOperatorInfo** pInfo); +int32_t createStreamTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle, SOperatorInfo** ppOptInfo); + // clang-format on SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t cleanup, diff --git a/source/libs/executor/inc/streamexecutorInt.h b/source/libs/executor/inc/streamexecutorInt.h index ab00dceb20f..27686b0081a 100644 --- a/source/libs/executor/inc/streamexecutorInt.h +++ b/source/libs/executor/inc/streamexecutorInt.h @@ -22,11 +22,87 @@ extern "C" { #include "executorInt.h" #include "tutil.h" +#define FILL_POS_INVALID 0 +#define FILL_POS_START 1 +#define FILL_POS_MID 2 +#define FILL_POS_END 3 + +#define HAS_NON_ROW_DATA(pRowData) (pRowData->key == INT64_MIN) +#define HAS_ROW_DATA(pRowData) (pRowData && pRowData->key != INT64_MIN) + +#define IS_INVALID_WIN_KEY(ts) ((ts) == INT64_MIN) +#define IS_VALID_WIN_KEY(ts) ((ts) != INT64_MIN) +#define SET_WIN_KEY_INVALID(ts) ((ts) = INT64_MIN) + +#define IS_NORMAL_INTERVAL_OP(op) \ + ((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL || \ + (op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) + +#define IS_CONTINUE_INTERVAL_OP(op) ((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL) + +#define IS_FILL_CONST_VALUE(type) ((type == TSDB_FILL_NULL || type == TSDB_FILL_NULL_F || type == TSDB_FILL_SET_VALUE || type == TSDB_FILL_SET_VALUE_F)) + +typedef struct SSliceRowData { + TSKEY key; + char pRowVal[]; +} SSliceRowData; + +typedef struct SSlicePoint { + SWinKey key; + SSliceRowData* pLeftRow; + SSliceRowData* pRightRow; + SRowBuffPos* pResPos; +} SSlicePoint; + void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type); bool needSaveStreamOperatorInfo(SSteamOpBasicInfo* pBasicInfo); void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo); +void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo); + +int64_t getDeleteMarkFromOption(SStreamNodeOption* pOption); +void removeDeleteResults(SSHashObj* pUpdatedMap, SArray* pDelWins); +int32_t copyIntervalDeleteKey(SSHashObj* pMap, SArray* pWins); +bool hasSrcPrimaryKeyCol(SSteamOpBasicInfo* pInfo); +int32_t getNexWindowPos(SInterval* pInterval, SDataBlockInfo* pBlockInfo, TSKEY* tsCols, int32_t startPos, TSKEY eKey, + STimeWindow* pNextWin); +int32_t saveWinResult(SWinKey* pKey, SRowBuffPos* pPos, SSHashObj* pUpdatedMap); +void doBuildDeleteResultImpl(SStateStore* pAPI, SStreamState* pState, SArray* pWins, int32_t* index, + SSDataBlock* pBlock); + +SStreamFillInfo* initStreamFillInfo(SStreamFillSupporter* pFillSup, SSDataBlock* pRes); +SResultCellData* getResultCell(SResultRowData* pRaw, int32_t index); + +void destroyStreamFillSupporter(SStreamFillSupporter* pFillSup); +bool hasCurWindow(SStreamFillSupporter* pFillSup); +bool hasPrevWindow(SStreamFillSupporter* pFillSup); +bool hasNextWindow(SStreamFillSupporter* pFillSup); +void copyNotFillExpData(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo); +int32_t setRowCell(SColumnInfoData* pCol, int32_t rowId, const SResultCellData* pCell); +bool hasRemainCalc(SStreamFillInfo* pFillInfo); +void destroySPoint(void* ptr); +void destroyStreamFillInfo(SStreamFillInfo* pFillInfo); +int32_t checkResult(SStreamFillSupporter* pFillSup, TSKEY ts, uint64_t groupId, bool* pRes); +void resetStreamFillSup(SStreamFillSupporter* pFillSup); +void setPointBuff(SSlicePoint* pPoint, SStreamFillSupporter* pFillSup); + +int32_t saveTimeSliceWinResult(SWinKey* pKey, SSHashObj* pUpdatedMap); + +int winPosCmprImpl(const void* pKey1, const void* pKey2); + +void reuseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI); +SResultCellData* getSliceResultCell(SResultCellData* pRowVal, int32_t index); +int32_t getDownstreamRes(struct SOperatorInfo* downstream, SSDataBlock** ppRes, SColumnInfo** ppPkCol); +void destroyFlusedppPos(void* ppRes); +void doBuildStreamIntervalResult(struct SOperatorInfo* pOperator, void* pState, SSDataBlock* pBlock, + SGroupResInfo* pGroupResInfo); +void transBlockToSliceResultRow(const SSDataBlock* pBlock, int32_t rowId, TSKEY ts, SSliceRowData* pRowVal, + int32_t rowSize, void* pPkData, SColumnInfoData* pPkCol); +int32_t getQualifiedRowNumDesc(SExprSupp* pExprSup, SSDataBlock* pBlock, TSKEY* tsCols, int32_t rowId, bool ignoreNull); -void reuseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI); +int32_t createStreamIntervalSliceOperatorInfo(struct SOperatorInfo* downstream, SPhysiNode* pPhyNode, + SExecTaskInfo* pTaskInfo, SReadHandle* pHandle, + struct SOperatorInfo** ppOptInfo); +int32_t buildAllResultKey(SStreamAggSupporter* pAggSup, TSKEY ts, SArray* pUpdated); #ifdef __cplusplus } diff --git a/source/libs/executor/inc/tfill.h b/source/libs/executor/inc/tfill.h index b06aa7d1c84..6072063bbfe 100644 --- a/source/libs/executor/inc/tfill.h +++ b/source/libs/executor/inc/tfill.h @@ -35,6 +35,7 @@ typedef struct SFillColInfo { SExprInfo* pExpr; bool notFillCol; // denote if this column needs fill operation SVariant fillVal; + bool fillNull; } SFillColInfo; typedef struct SFillLinearInfo { @@ -106,7 +107,9 @@ typedef struct SStreamFillInfo { TSKEY end; // endKey for fill TSKEY current; // current Key for fill TSKEY preRowKey; + TSKEY prePointKey; TSKEY nextRowKey; + TSKEY nextPointKey; SResultRowData* pResRow; SStreamFillLinearInfo* pLinearInfo; bool needFill; @@ -115,6 +118,8 @@ typedef struct SStreamFillInfo { SArray* delRanges; int32_t delIndex; uint64_t curGroupId; + bool hasNext; + SResultRowData* pNonFillRow; } SStreamFillInfo; int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows); @@ -125,12 +130,14 @@ void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struc void taosFillUpdateStartTimestampInfo(SFillInfo* pFillInfo, int64_t ts); bool taosFillNotStarted(const SFillInfo* pFillInfo); SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr, - int32_t numOfNotFillCols, const struct SNodeListNode* val); + int32_t numOfNotFillCols, SExprInfo* pFillNullExpr, int32_t numOfFillNullExprs, + const struct SNodeListNode* val); bool taosFillHasMoreResults(struct SFillInfo* pFillInfo); -int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity, - SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t slotId, - int32_t order, const char* id, SExecTaskInfo* pTaskInfo, SFillInfo** ppFillInfo); +int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t fillNullCols, + int32_t capacity, SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, + int32_t slotId, int32_t order, const char* id, SExecTaskInfo* pTaskInfo, + SFillInfo** ppFillInfo); void* taosDestroyFillInfo(struct SFillInfo* pFillInfo); int32_t taosFillResultDataBlock(struct SFillInfo* pFillInfo, SSDataBlock* p, int32_t capacity); diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c index 91b435fbec3..829ca6da509 100644 --- a/source/libs/executor/src/aggregateoperator.c +++ b/source/libs/executor/src/aggregateoperator.c @@ -49,6 +49,7 @@ typedef struct SAggOperatorInfo { SSDataBlock* pNewGroupBlock; bool hasCountFunc; SOperatorInfo* pOperator; + bool cleanGroupResInfo; } SAggOperatorInfo; static void destroyAggOperatorInfo(void* param); @@ -121,6 +122,7 @@ int32_t createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pA pInfo->binfo.outputTsOrder = pAggNode->node.outputTsOrder; pInfo->hasCountFunc = pAggNode->hasCountLikeFunc; pInfo->pOperator = pOperator; + pInfo->cleanGroupResInfo = false; setOperatorInfo(pOperator, "TableAggregate", QUERY_NODE_PHYSICAL_PLAN_HASH_AGG, !pAggNode->node.forceCreateNonBlockingOptr, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -159,8 +161,8 @@ void destroyAggOperatorInfo(void* param) { cleanupBasicInfo(&pInfo->binfo); if (pInfo->pOperator) { - cleanupResultInfoWithoutHash(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, - &pInfo->groupResInfo); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup, + pInfo->cleanGroupResInfo); pInfo->pOperator = NULL; } cleanupAggSup(&pInfo->aggSup); @@ -191,6 +193,7 @@ static bool nextGroupedResult(SOperatorInfo* pOperator) { int32_t order = pAggInfo->binfo.inputTsOrder; SSDataBlock* pBlock = pAggInfo->pNewGroupBlock; + pAggInfo->cleanGroupResInfo = false; if (pBlock) { pAggInfo->pNewGroupBlock = NULL; tSimpleHashClear(pAggInfo->aggSup.pResultRowHashTable); @@ -263,6 +266,7 @@ static bool nextGroupedResult(SOperatorInfo* pOperator) { code = initGroupedResultInfo(&pAggInfo->groupResInfo, pAggInfo->aggSup.pResultRowHashTable, 0); QUERY_CHECK_CODE(code, lino, _end); + pAggInfo->cleanGroupResInfo = true; _end: if (code != TSDB_CODE_SUCCESS) { @@ -627,7 +631,7 @@ void cleanupResultInfoInStream(SExecTaskInfo* pTaskInfo, void* pState, SExprSupp } } -void cleanupResultInfoWithoutHash(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, +void cleanupResultInfoInGroupResInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo) { int32_t numOfExprs = pSup->numOfExprs; int32_t* rowEntryOffset = pSup->rowEntryInfoOffset; @@ -663,7 +667,7 @@ void cleanupResultInfoWithoutHash(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDi } } -void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, +void cleanupResultInfoInHashMap(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap) { int32_t numOfExprs = pSup->numOfExprs; int32_t* rowEntryOffset = pSup->rowEntryInfoOffset; @@ -701,6 +705,14 @@ void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* } } +void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SGroupResInfo* pGroupResInfo, + SAggSupporter *pAggSup, bool cleanGroupResInfo) { + if (cleanGroupResInfo) { + cleanupResultInfoInGroupResInfo(pTaskInfo, pSup, pAggSup->pResultBuf, pGroupResInfo); + } else { + cleanupResultInfoInHashMap(pTaskInfo, pSup, pAggSup->pResultBuf, pGroupResInfo, pAggSup->pResultRowHashTable); + } +} void cleanupAggSup(SAggSupporter* pAggSup) { taosMemoryFreeClear(pAggSup->keyBuf); tSimpleHashCleanup(pAggSup->pResultRowHashTable); diff --git a/source/libs/executor/src/anomalywindowoperator.c b/source/libs/executor/src/anomalywindowoperator.c index 7f3430b8374..94cc5d91295 100644 --- a/source/libs/executor/src/anomalywindowoperator.c +++ b/source/libs/executor/src/anomalywindowoperator.c @@ -19,14 +19,14 @@ #include "functionMgt.h" #include "operator.h" #include "querytask.h" -#include "tanal.h" +#include "tanalytics.h" #include "tcommon.h" #include "tcompare.h" #include "tdatablock.h" #include "tjson.h" #include "ttime.h" -#ifdef USE_ANAL +#ifdef USE_ANALYTICS typedef struct { SArray* blocks; // SSDataBlock* @@ -55,7 +55,7 @@ typedef struct { static void anomalyDestroyOperatorInfo(void* param); static int32_t anomalyAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRes); -static void anomalyAggregateBlocks(SOperatorInfo* pOperator); +static int32_t anomalyAggregateBlocks(SOperatorInfo* pOperator); static int32_t anomalyCacheBlock(SAnomalyWindowOperatorInfo* pInfo, SSDataBlock* pBlock); int32_t createAnomalywindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* physiNode, SExecTaskInfo* pTaskInfo, @@ -78,6 +78,7 @@ int32_t createAnomalywindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p code = TSDB_CODE_ANAL_ALGO_NOT_FOUND; goto _error; } + if (taosAnalGetAlgoUrl(pInfo->algoName, ANAL_ALGO_TYPE_ANOMALY_DETECT, pInfo->algoUrl, sizeof(pInfo->algoUrl)) != 0) { qError("failed to get anomaly_window algorithm url from %s", pInfo->algoName); code = TSDB_CODE_ANAL_ALGO_NOT_LOAD; @@ -86,7 +87,7 @@ int32_t createAnomalywindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p pOperator->exprSupp.hasWindowOrGroup = true; pInfo->tsSlotId = ((SColumnNode*)pAnomalyNode->window.pTspk)->slotId; - strncpy(pInfo->anomalyOpt, pAnomalyNode->anomalyOpt, sizeof(pInfo->anomalyOpt)); + tstrncpy(pInfo->anomalyOpt, pAnomalyNode->anomalyOpt, sizeof(pInfo->anomalyOpt)); if (pAnomalyNode->window.pExprs != NULL) { int32_t numOfScalarExpr = 0; @@ -198,7 +199,9 @@ static int32_t anomalyAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRe QUERY_CHECK_CODE(code, lino, _end); } else { qDebug("group:%" PRId64 ", read finish for new group coming, blocks:%d", pSupp->groupId, numOfBlocks); - anomalyAggregateBlocks(pOperator); + code = anomalyAggregateBlocks(pOperator); + QUERY_CHECK_CODE(code, lino, _end); + pSupp->groupId = pBlock->info.id.groupId; numOfBlocks = 1; pSupp->cachedRows = pBlock->info.rows; @@ -217,7 +220,7 @@ static int32_t anomalyAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRe if (numOfBlocks > 0) { qDebug("group:%" PRId64 ", read finish, blocks:%d", pInfo->anomalySup.groupId, numOfBlocks); - anomalyAggregateBlocks(pOperator); + code = anomalyAggregateBlocks(pOperator); } int64_t cost = taosGetTimestampUs() - st; @@ -229,6 +232,7 @@ static int32_t anomalyAggregateNext(SOperatorInfo* pOperator, SSDataBlock** ppRe pTaskInfo->code = code; T_LONG_JMP(pTaskInfo->env, code); } + (*ppRes) = (pBInfo->pRes->info.rows == 0) ? NULL : pBInfo->pRes; return code; } @@ -338,8 +342,8 @@ static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) { SAnalBuf analBuf = {.bufType = ANAL_BUF_TYPE_JSON}; char dataBuf[64] = {0}; int32_t code = 0; + int64_t ts = 0; - int64_t ts = 0; // int64_t ts = taosGetTimestampMs(); snprintf(analBuf.fileName, sizeof(analBuf.fileName), "%s/tdengine-anomaly-%" PRId64 "-%" PRId64, tsTempDir, ts, pSupp->groupId); @@ -431,6 +435,7 @@ static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) { if (code != 0) { qError("failed to analysis window since %s", tstrerror(code)); } + taosAnalBufDestroy(&analBuf); if (pJson != NULL) tjsonDelete(pJson); return code; @@ -473,7 +478,7 @@ static int32_t anomalyBuildResult(SOperatorInfo* pOperator) { return code; } -static void anomalyAggregateBlocks(SOperatorInfo* pOperator) { +static int32_t anomalyAggregateBlocks(SOperatorInfo* pOperator) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SAnomalyWindowOperatorInfo* pInfo = pOperator->info; @@ -623,6 +628,8 @@ static void anomalyAggregateBlocks(SOperatorInfo* pOperator) { pSupp->curWin.ekey = 0; pSupp->curWin.skey = 0; pSupp->curWinIndex = 0; + + return code; } #else diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index 4a52dd38375..ed0d07b23c3 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -45,6 +45,7 @@ typedef struct SDataDispatchHandle { SDataBlockDescNode* pSchema; STaosQueue* pDataBlocks; SDataDispatchBuf nextOutput; + int32_t outPutColCounts; int32_t status; bool queryEnd; uint64_t useconds; @@ -55,6 +56,65 @@ typedef struct SDataDispatchHandle { TdThreadMutex mutex; } SDataDispatchHandle; +static int32_t inputSafetyCheck(SDataDispatchHandle* pHandle, const SInputData* pInput) { + if(tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER) { + return TSDB_CODE_SUCCESS; + } + if (pInput == NULL || pInput->pData == NULL || pInput->pData->info.rows <= 0) { + qError("invalid input data"); + return TSDB_CODE_QRY_INVALID_INPUT; + } + SDataBlockDescNode* pSchema = pHandle->pSchema; + if (pSchema == NULL || pSchema->totalRowSize != pInput->pData->info.rowSize) { + qError("invalid schema"); + return TSDB_CODE_QRY_INVALID_INPUT; + } + + if (pHandle->outPutColCounts > taosArrayGetSize(pInput->pData->pDataBlock)) { + qError("invalid column number, schema:%d, input:%zu", pHandle->outPutColCounts, taosArrayGetSize(pInput->pData->pDataBlock)); + return TSDB_CODE_QRY_INVALID_INPUT; + } + + SNode* pNode; + int32_t colNum = 0; + FOREACH(pNode, pHandle->pSchema->pSlots) { + SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode; + if (pSlotDesc->output) { + SColumnInfoData* pColInfoData = taosArrayGet(pInput->pData->pDataBlock, colNum); + if (pColInfoData == NULL) { + return -1; + } + if (pColInfoData->info.bytes < 0) { + qError("invalid column bytes, schema:%d, input:%d", pSlotDesc->dataType.bytes, pColInfoData->info.bytes); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + if (!IS_VAR_DATA_TYPE(pColInfoData->info.type) && + TYPE_BYTES[pColInfoData->info.type] != pColInfoData->info.bytes) { + qError("invalid column bytes, schema:%d, input:%d", TYPE_BYTES[pColInfoData->info.type], + pColInfoData->info.bytes); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + if (pColInfoData->info.type != pSlotDesc->dataType.type) { + qError("invalid column type, schema:%d, input:%d", pSlotDesc->dataType.type, pColInfoData->info.type); + return TSDB_CODE_QRY_INVALID_INPUT; + } + if (pColInfoData->info.bytes != pSlotDesc->dataType.bytes) { + qError("invalid column bytes, schema:%d, input:%d", pSlotDesc->dataType.bytes, pColInfoData->info.bytes); + return TSDB_CODE_QRY_INVALID_INPUT; + } + + if (IS_INVALID_TYPE(pColInfoData->info.type)) { + qError("invalid column type, type:%d", pColInfoData->info.type); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + ++colNum; + } + } + + + return TSDB_CODE_SUCCESS; +} + // clang-format off // data format: // +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+ @@ -68,6 +128,12 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* int32_t numOfCols = 0; SNode* pNode; + int32_t code = inputSafetyCheck(pHandle, pInput); + if (code) { + qError("failed to check input data, code:%d", code); + return code; + } + FOREACH(pNode, pHandle->pSchema->pSlots) { SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode; if (pSlotDesc->output) { @@ -85,17 +151,18 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* pBuf->useSize = sizeof(SDataCacheEntry); { + // allocate additional 8 bytes to avoid invalid write if compress failed to reduce the size + size_t dataEncodeBufSize = pBuf->allocSize + 8; if ((pBuf->allocSize > tsCompressMsgSize) && (tsCompressMsgSize > 0) && pHandle->pManager->cfg.compress) { if (pHandle->pCompressBuf == NULL) { - // allocate additional 8 bytes to avoid invalid write if compress failed to reduce the size - pHandle->pCompressBuf = taosMemoryMalloc(pBuf->allocSize + 8); + pHandle->pCompressBuf = taosMemoryMalloc(dataEncodeBufSize); if (NULL == pHandle->pCompressBuf) { QRY_RET(terrno); } - pHandle->bufSize = pBuf->allocSize + 8; + pHandle->bufSize = dataEncodeBufSize; } else { - if (pHandle->bufSize < pBuf->allocSize + 8) { - pHandle->bufSize = pBuf->allocSize + 8; + if (pHandle->bufSize < dataEncodeBufSize) { + pHandle->bufSize = dataEncodeBufSize; void* p = taosMemoryRealloc(pHandle->pCompressBuf, pHandle->bufSize); if (p != NULL) { pHandle->pCompressBuf = p; @@ -106,7 +173,7 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* } } - int32_t dataLen = blockEncode(pInput->pData, pHandle->pCompressBuf, numOfCols); + int32_t dataLen = blockEncode(pInput->pData, pHandle->pCompressBuf, dataEncodeBufSize, numOfCols); if(dataLen < 0) { qError("failed to encode data block, code: %d", dataLen); return terrno; @@ -124,7 +191,7 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* TAOS_MEMCPY(pEntry->data, pHandle->pCompressBuf, dataLen); } } else { - pEntry->dataLen = blockEncode(pInput->pData, pEntry->data, numOfCols); + pEntry->dataLen = blockEncode(pInput->pData, pEntry->data, pBuf->allocSize, numOfCols); if(pEntry->dataLen < 0) { qError("failed to encode data block, code: %d", pEntry->dataLen); return terrno; @@ -315,7 +382,6 @@ static int32_t getCacheSize(struct SDataSinkHandle* pHandle, uint64_t* size) { return TSDB_CODE_SUCCESS; } - static int32_t getSinkFlags(struct SDataSinkHandle* pHandle, uint64_t* pFlags) { SDataDispatchHandle* pDispatcher = (SDataDispatchHandle*)pHandle; @@ -323,9 +389,60 @@ static int32_t getSinkFlags(struct SDataSinkHandle* pHandle, uint64_t* pFlags) { return TSDB_CODE_SUCCESS; } +static int32_t blockDescNodeCheck(SDataBlockDescNode* pInputDataBlockDesc) { + if(tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER) { + return TSDB_CODE_SUCCESS; + } + + if (pInputDataBlockDesc == NULL) { + qError("invalid schema"); + return TSDB_CODE_QRY_INVALID_INPUT; + } + + SNode* pNode; + int32_t realOutputRowSize = 0; + FOREACH(pNode, pInputDataBlockDesc->pSlots) { + SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode; + if (pSlotDesc->output) { + realOutputRowSize += pSlotDesc->dataType.bytes; + } else { + // Slots must be sorted, and slots with 'output' set to true must come first + break; + } + } + if (realOutputRowSize != pInputDataBlockDesc->outputRowSize) { + qError("invalid schema, realOutputRowSize:%d, outputRowSize:%d", realOutputRowSize, pInputDataBlockDesc->outputRowSize); + return TSDB_CODE_QRY_INVALID_INPUT; + } + return TSDB_CODE_SUCCESS; +} + +int32_t getOutputColCounts(SDataBlockDescNode* pInputDataBlockDesc) { + if (pInputDataBlockDesc == NULL) { + qError("invalid schema"); + return 0; + } + SNode* pNode; + int32_t numOfCols = 0; + FOREACH(pNode, pInputDataBlockDesc->pSlots) { + SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode; + if (pSlotDesc->output) { + ++numOfCols; + } else { + // Slots must be sorted, and slots with 'output' set to true must come first + break; + } + } + return numOfCols; +} int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle) { int32_t code; + code = blockDescNodeCheck(pDataSink->pInputDataBlockDesc); + if (code) { + qError("failed to check input data block desc, code:%d", code); + return code; + } SDataDispatchHandle* dispatcher = taosMemoryCalloc(1, sizeof(SDataDispatchHandle)); if (NULL == dispatcher) { @@ -343,6 +460,7 @@ int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pD dispatcher->pManager = pManager; pManager = NULL; dispatcher->pSchema = pDataSink->pInputDataBlockDesc; + dispatcher->outPutColCounts = getOutputColCounts(dispatcher->pSchema); dispatcher->status = DS_BUF_EMPTY; dispatcher->queryEnd = false; code = taosOpenQueue(&dispatcher->pDataBlocks); diff --git a/source/libs/executor/src/dynqueryctrloperator.c b/source/libs/executor/src/dynqueryctrloperator.c index eb49057d890..62f199387ef 100644 --- a/source/libs/executor/src/dynqueryctrloperator.c +++ b/source/libs/executor/src/dynqueryctrloperator.c @@ -528,7 +528,12 @@ static void seqJoinLaunchNewRetrieveImpl(SOperatorInfo* pOperator, SSDataBlock** qDebug("%s dynamic post task begin", GET_TASKID(pOperator->pTaskInfo)); code = pOperator->pDownstream[1]->fpSet.getNextExtFn(pOperator->pDownstream[1], pParam, ppRes); if (*ppRes && (code == 0)) { - blockDataCheck(*ppRes, false); + code = blockDataCheck(*ppRes); + if (code) { + qError("Invalid block data, blockDataCheck failed, error:%s", tstrerror(code)); + pOperator->pTaskInfo->code = code; + T_LONG_JMP(pOperator->pTaskInfo->env, pOperator->pTaskInfo->code); + } pPost->isStarted = true; pStbJoin->execInfo.postBlkNum++; pStbJoin->execInfo.postBlkRows += (*ppRes)->info.rows; diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index 20502ffa39b..1bef4040842 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -320,7 +320,7 @@ static int32_t initDataSource(int32_t numOfSources, SExchangeInfo* pInfo, const if (!pInfo->pTaskId) { return terrno; } - strncpy(pInfo->pTaskId, id, len); + tstrncpy(pInfo->pTaskId, id, len); for (int32_t i = 0; i < numOfSources; ++i) { SSourceDataInfo dataInfo = {0}; dataInfo.status = EX_SOURCE_DATA_NOT_READY; diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 1ea2e72ca3c..cd26eb1be6b 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -390,6 +390,7 @@ SSDataBlock* createDataBlockFromDescNode(SDataBlockDescNode* pNode) { createColumnInfoData(pDescNode->dataType.type, pDescNode->dataType.bytes, pDescNode->slotId); idata.info.scale = pDescNode->dataType.scale; idata.info.precision = pDescNode->dataType.precision; + idata.info.noData = pDescNode->reserve; code = blockDataAppendColInfo(pBlock, &idata); if (code != TSDB_CODE_SUCCESS) { @@ -449,8 +450,8 @@ int32_t prepareDataBlockBuf(SSDataBlock* pDataBlock, SColMatchInfo* pMatchInfo) EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) { STransTagExprCtx* pCtx = pContext; - SMetaReader* mr = pCtx->pReader; - bool isTagCol = false, isTbname = false; + SMetaReader* mr = pCtx->pReader; + bool isTagCol = false, isTbname = false; if (nodeType(*pNode) == QUERY_NODE_COLUMN) { SColumnNode* pCol = (SColumnNode*)*pNode; if (pCol->colType == COLUMN_TYPE_TBNAME) @@ -572,7 +573,7 @@ int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, static EDealRes getColumn(SNode** pNode, void* pContext) { tagFilterAssist* pData = (tagFilterAssist*)pContext; - SColumnNode* pSColumnNode = NULL; + SColumnNode* pSColumnNode = NULL; if (QUERY_NODE_COLUMN == nodeType((*pNode))) { pSColumnNode = *(SColumnNode**)pNode; } else if (QUERY_NODE_FUNCTION == nodeType((*pNode))) { @@ -595,7 +596,7 @@ static EDealRes getColumn(SNode** pNode, void* pContext) { return DEAL_RES_CONTINUE; } - void* data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId)); + void* data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId)); if (!data) { int32_t tempRes = taosHashPut(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode))); @@ -1636,7 +1637,7 @@ int32_t getGroupIdFromTagsVal(void* pVnode, uint64_t uid, SNodeList* pGroupNode, } SNodeList* groupNew = NULL; - int32_t code = nodesCloneList(pGroupNode, &groupNew); + int32_t code = nodesCloneList(pGroupNode, &groupNew); if (TSDB_CODE_SUCCESS != code) { pAPI->metaReaderFn.clearReader(&mr); return code; @@ -1919,7 +1920,7 @@ int32_t createExprFromOneNode(SExprInfo* pExp, SNode* pNode, int16_t slotId) { if (!pFuncNode->pParameterList && (memcmp(pExprNode->_function.functionName, name, len) == 0) && pExprNode->_function.functionName[len] == 0) { pFuncNode->pParameterList = NULL; - int32_t code = nodesMakeList(&pFuncNode->pParameterList); + int32_t code = nodesMakeList(&pFuncNode->pParameterList); SValueNode* res = NULL; if (TSDB_CODE_SUCCESS == code) { code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&res); @@ -2925,6 +2926,10 @@ char* getStreamOpName(uint16_t opType) { return "stream event"; case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT: return "stream count"; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC: + return "stream interp"; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL: + return "interval continue"; } return ""; } @@ -2952,7 +2957,9 @@ void printSpecDataBlock(SSDataBlock* pBlock, const char* flag, const char* opStr qDebug("%s===stream===%s %s: Block is Null", taskIdStr, flag, opStr); return; } else if (pBlock->info.rows == 0) { - qDebug("%s===stream===%s %s: Block is Empty. block type %d", taskIdStr, flag, opStr, pBlock->info.type); + qDebug("%s===stream===%s %s: Block is Empty. block type %d.skey:%" PRId64 ",ekey:%" PRId64 ",version%" PRId64, + taskIdStr, flag, opStr, pBlock->info.type, pBlock->info.window.skey, pBlock->info.window.ekey, + pBlock->info.version); return; } if (qDebugFlag & DEBUG_DEBUG) { diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index d63664ec3ac..82331956427 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -131,7 +131,7 @@ static void clearStreamBlock(SOperatorInfo* pOperator) { } } -void resetTaskInfo(qTaskInfo_t tinfo) { +void qResetTaskInfoCode(qTaskInfo_t tinfo) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; pTaskInfo->code = 0; clearStreamBlock(pTaskInfo->pRoot); @@ -546,8 +546,9 @@ int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableI return code; } -int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, - int32_t* tversion, int32_t idx, bool* tbGet) { +int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, int32_t dbNameBuffLen, char* tableName, + int32_t tbaleNameBuffLen, int32_t* sversion, int32_t* tversion, int32_t idx, + bool* tbGet) { *tbGet = false; if (tinfo == NULL || dbName == NULL || tableName == NULL) { @@ -568,12 +569,12 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table *sversion = pSchemaInfo->sw->version; *tversion = pSchemaInfo->tversion; if (pSchemaInfo->dbname) { - strcpy(dbName, pSchemaInfo->dbname); + tstrncpy(dbName, pSchemaInfo->dbname, dbNameBuffLen); } else { dbName[0] = 0; } if (pSchemaInfo->tablename) { - strcpy(tableName, pSchemaInfo->tablename); + tstrncpy(tableName, pSchemaInfo->tablename, tbaleNameBuffLen); } else { tableName[0] = 0; } @@ -705,12 +706,12 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo if (pTaskInfo->pOpParam && !pTaskInfo->paramSet) { pTaskInfo->paramSet = true; code = pTaskInfo->pRoot->fpSet.getNextExtFn(pTaskInfo->pRoot, pTaskInfo->pOpParam, &pRes); - blockDataCheck(pRes, false); } else { code = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot, &pRes); - blockDataCheck(pRes, false); } + QUERY_CHECK_CODE(code, lino, _end); + code = blockDataCheck(pRes); QUERY_CHECK_CODE(code, lino, _end); if (pRes == NULL) { @@ -755,7 +756,8 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo } code = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot, &pRes); - blockDataCheck(pRes, false); + QUERY_CHECK_CODE(code, lino, _end); + code = blockDataCheck(pRes); QUERY_CHECK_CODE(code, lino, _end); } @@ -854,7 +856,11 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) { qError("%s failed at line %d, code:%s %s", __func__, __LINE__, tstrerror(code), GET_TASKID(pTaskInfo)); } - blockDataCheck(*pRes, false); + code = blockDataCheck(*pRes); + if (code) { + pTaskInfo->code = code; + qError("%s failed at line %d, code:%s %s", __func__, __LINE__, tstrerror(code), GET_TASKID(pTaskInfo)); + } uint64_t el = (taosGetTimestampUs() - st); @@ -1098,6 +1104,23 @@ int32_t qStreamRecoverFinish(qTaskInfo_t tinfo) { return code; } +static int32_t getOpratorIntervalInfo(SOperatorInfo* pOperator, int64_t* pWaterMark, SInterval* pInterval, STimeWindow* pLastWindow) { + if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { + return getOpratorIntervalInfo(pOperator->pDownstream[0], pWaterMark, pInterval, pLastWindow); + } + SStreamScanInfo* pScanOp = (SStreamScanInfo*) pOperator->info; + *pWaterMark = pScanOp->twAggSup.waterMark; + *pInterval = pScanOp->interval; + *pLastWindow = pScanOp->lastScanRange; + return TSDB_CODE_SUCCESS; +} + +int32_t qGetStreamIntervalExecInfo(qTaskInfo_t tinfo, int64_t* pWaterMark, SInterval* pInterval, STimeWindow* pLastWindow) { + SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; + SOperatorInfo* pOperator = pTaskInfo->pRoot; + return getOpratorIntervalInfo(pOperator, pWaterMark, pInterval, pLastWindow); +} + int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; SOperatorInfo* pOperator = pTaskInfo->pRoot; @@ -1162,6 +1185,19 @@ int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) { qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark); + pSup->calTriggerSaved = pSup->calTrigger; + pSup->deleteMarkSaved = pSup->deleteMark; + pSup->calTrigger = STREAM_TRIGGER_AT_ONCE; + pSup->deleteMark = INT64_MAX; + pInfo->ignoreExpiredDataSaved = pInfo->ignoreExpiredData; + pInfo->ignoreExpiredData = false; + qInfo("save stream task:%s, param for state: %d", GET_TASKID(pTaskInfo), pInfo->ignoreExpiredData); + } else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC) { + SStreamTimeSliceOperatorInfo* pInfo = pOperator->info; + STimeWindowAggSupp* pSup = &pInfo->twAggSup; + + qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark); + pSup->calTriggerSaved = pSup->calTrigger; pSup->deleteMarkSaved = pSup->deleteMark; pSup->calTrigger = STREAM_TRIGGER_AT_ONCE; @@ -1500,6 +1536,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond); tstrncpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName, TSDB_TABLE_NAME_LEN); +// pTaskInfo->streamInfo.suid = mtInfo.suid == 0 ? mtInfo.uid : mtInfo.suid; tDeleteSchemaWrapper(pTaskInfo->streamInfo.schema); pTaskInfo->streamInfo.schema = mtInfo.schema; diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c index 64a07c46537..1b823bf69d9 100644 --- a/source/libs/executor/src/executorInt.c +++ b/source/libs/executor/src/executorInt.c @@ -616,11 +616,12 @@ int32_t doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* p } } } - code = TSDB_CODE_SUCCESS; - + code = blockDataCheck(pBlock); + QUERY_CHECK_CODE(code, lino, _err); _err: - blockDataCheck(pBlock, true); - + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } colDataDestroy(p); taosMemoryFree(p); return code; @@ -701,7 +702,7 @@ int32_t copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResu QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno); char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo); for (int32_t k = 0; k < pRow->numOfRows; ++k) { - code = colDataSetVal(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes); + code = colDataSetValOrCover(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes); QUERY_CHECK_CODE(code, lino, _end); } } diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index d530382f7cf..1595c90419a 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -53,6 +53,7 @@ typedef struct SFillOperatorInfo { SExprInfo* pExprInfo; int32_t numOfExpr; SExprSupp noFillExprSupp; + SExprSupp fillNullExprSupp; } SFillOperatorInfo; static void destroyFillOperatorInfo(void* param); @@ -140,6 +141,15 @@ void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int code = projectApplyFunctions(pNoFillSupp->pExprInfo, pInfo->pRes, pBlock, pNoFillSupp->pCtx, pNoFillSupp->numOfExprs, NULL); QUERY_CHECK_CODE(code, lino, _end); + + if (pInfo->fillNullExprSupp.pExprInfo) { + pInfo->pRes->info.rows = 0; + code = setInputDataBlock(&pInfo->fillNullExprSupp, pBlock, order, scanFlag, false); + QUERY_CHECK_CODE(code, lino, _end); + code = projectApplyFunctions(pInfo->fillNullExprSupp.pExprInfo, pInfo->pRes, pBlock, pInfo->fillNullExprSupp.pCtx, + pInfo->fillNullExprSupp.numOfExprs, NULL); + } + pInfo->pRes->info.id.groupId = pBlock->info.id.groupId; _end: @@ -327,6 +337,7 @@ void destroyFillOperatorInfo(void* param) { pInfo->pFinalRes = NULL; cleanupExprSupp(&pInfo->noFillExprSupp); + cleanupExprSupp(&pInfo->fillNullExprSupp); taosMemoryFreeClear(pInfo->p); taosArrayDestroy(pInfo->matchInfo.pList); @@ -334,10 +345,11 @@ void destroyFillOperatorInfo(void* param) { } static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t numOfCols, SExprInfo* pNotFillExpr, - int32_t numOfNotFillCols, SNodeListNode* pValNode, STimeWindow win, int32_t capacity, - const char* id, SInterval* pInterval, int32_t fillType, int32_t order, - SExecTaskInfo* pTaskInfo) { - SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pValNode); + int32_t numOfNotFillCols, SExprInfo* pFillNullExpr, int32_t numOfFillNullExprs, + SNodeListNode* pValNode, STimeWindow win, int32_t capacity, const char* id, + SInterval* pInterval, int32_t fillType, int32_t order, SExecTaskInfo* pTaskInfo) { + SFillColInfo* pColInfo = + createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pFillNullExpr, numOfFillNullExprs, pValNode); if (!pColInfo) { qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno)); return terrno; @@ -348,8 +360,8 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t // STimeWindow w = {0}; // getInitialStartTimeWindow(pInterval, startKey, &w, order == TSDB_ORDER_ASC); pInfo->pFillInfo = NULL; - int32_t code = taosCreateFillInfo(startKey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo, - pInfo->primaryTsCol, order, id, pTaskInfo, &pInfo->pFillInfo); + int32_t code = taosCreateFillInfo(startKey, numOfCols, numOfNotFillCols, numOfFillNullExprs, capacity, pInterval, + fillType, pColInfo, pInfo->primaryTsCol, order, id, pTaskInfo, &pInfo->pFillInfo); if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); return code; @@ -455,6 +467,13 @@ int32_t createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFi initExprSupp(pNoFillSupp, pNoFillSupp->pExprInfo, pNoFillSupp->numOfExprs, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); + code = createExprInfo(pPhyFillNode->pFillNullExprs, NULL, &pInfo->fillNullExprSupp.pExprInfo, + &pInfo->fillNullExprSupp.numOfExprs); + QUERY_CHECK_CODE(code, lino, _error); + code = initExprSupp(&pInfo->fillNullExprSupp, pInfo->fillNullExprSupp.pExprInfo, pInfo->fillNullExprSupp.numOfExprs, + &pTaskInfo->storageAPI.functionStore); + QUERY_CHECK_CODE(code, lino, _error); + SInterval* pInterval = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType ? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval @@ -482,7 +501,9 @@ int32_t createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFi code = extractColMatchInfo(pPhyFillNode->pFillExprs, pPhyFillNode->node.pOutputDataBlockDesc, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID, &pInfo->matchInfo); + QUERY_CHECK_CODE(code, lino, _error); code = initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pNoFillSupp->pExprInfo, pNoFillSupp->numOfExprs, + pInfo->fillNullExprSupp.pExprInfo, pInfo->fillNullExprSupp.numOfExprs, (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, pResultInfo->capacity, pTaskInfo->id.str, pInterval, type, order, pTaskInfo); if (code != TSDB_CODE_SUCCESS) { diff --git a/source/libs/executor/src/forecastoperator.c b/source/libs/executor/src/forecastoperator.c index 0afa933ee83..20dc9e28ba7 100644 --- a/source/libs/executor/src/forecastoperator.c +++ b/source/libs/executor/src/forecastoperator.c @@ -19,14 +19,14 @@ #include "operator.h" #include "querytask.h" #include "storageapi.h" -#include "tanal.h" +#include "tanalytics.h" #include "tcommon.h" #include "tcompare.h" #include "tdatablock.h" #include "tfill.h" #include "ttime.h" -#ifdef USE_ANAL +#ifdef USE_ANALYTICS typedef struct { char algoName[TSDB_ANAL_ALGO_NAME_LEN]; diff --git a/source/libs/executor/src/groupcacheoperator.c b/source/libs/executor/src/groupcacheoperator.c index 10b372319b3..d47ab366b61 100644 --- a/source/libs/executor/src/groupcacheoperator.c +++ b/source/libs/executor/src/groupcacheoperator.c @@ -765,7 +765,7 @@ static FORCE_INLINE int32_t getBlkFromDownstreamOperator(struct SOperatorInfo* p } } - blockDataCheck(pBlock, false); + code = blockDataCheck(pBlock); *ppRes = pBlock; return code; diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 9cf2a3ea176..fec35c33711 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -88,8 +88,8 @@ static void destroyGroupOperatorInfo(void* param) { cleanupExprSupp(&pInfo->scalarSup); if (pInfo->pOperator != NULL) { - cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, - &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup, + false); pInfo->pOperator = NULL; } @@ -1263,7 +1263,10 @@ static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) { QUERY_CHECK_CONDITION((hasRemainPartion(pInfo)), code, lino, _end, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); SPartitionDataInfo* pParInfo = (SPartitionDataInfo*)pInfo->parIte; blockDataCleanup(pDest); - int32_t rows = taosArrayGetSize(pParInfo->rowIds); + int32_t rows = taosArrayGetSize(pParInfo->rowIds); + code = blockDataEnsureCapacity(pDest, rows); + QUERY_CHECK_CODE(code, lino, _end); + SSDataBlock* pSrc = pInfo->pInputDataBlock; for (int32_t i = 0; i < rows; i++) { int32_t rowIndex = *(int32_t*)taosArrayGet(pParInfo->rowIds, i); @@ -1505,6 +1508,7 @@ static int32_t doStreamHashPartitionNext(SOperatorInfo* pOperator, SSDataBlock** case STREAM_CREATE_CHILD_TABLE: case STREAM_RETRIEVE: case STREAM_CHECKPOINT: + case STREAM_GET_RESULT: case STREAM_GET_ALL: { (*ppRes) = pBlock; return code; diff --git a/source/libs/executor/src/mergeoperator.c b/source/libs/executor/src/mergeoperator.c index 7fd6b91e522..0dfe89e10e4 100644 --- a/source/libs/executor/src/mergeoperator.c +++ b/source/libs/executor/src/mergeoperator.c @@ -65,11 +65,14 @@ static int32_t sortMergeloadNextDataBlock(void* param, SSDataBlock** ppBlock); int32_t sortMergeloadNextDataBlock(void* param, SSDataBlock** ppBlock) { SOperatorInfo* pOperator = (SOperatorInfo*)param; - int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock); - blockDataCheck(*ppBlock, false); + int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock); if (code) { qError("failed to get next data block from upstream, %s code:%s", __func__, tstrerror(code)); } + code = blockDataCheck(*ppBlock); + if (code) { + qError("failed to check data block got from upstream, %s code:%s", __func__, tstrerror(code)); + } return code; } @@ -526,7 +529,8 @@ int32_t doMultiwayMerge(SOperatorInfo* pOperator, SSDataBlock** pResBlock) { if ((*pResBlock) != NULL) { pOperator->resultInfo.totalRows += (*pResBlock)->info.rows; - blockDataCheck(*pResBlock, false); + code = blockDataCheck(*pResBlock); + QUERY_CHECK_CODE(code, lino, _end); } else { setOperatorCompleted(pOperator); } diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 7914f9f3202..3b10dce63fe 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -631,6 +631,8 @@ int32_t createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHand code = createStreamCountAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, pHandle, &pOptr); } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT == type) { code = createCountwindowOperatorInfo(ops[0], pPhyNode, pTaskInfo, &pOptr); + } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC == type) { + code = createStreamTimeSliceOperatorInfo(ops[0], pPhyNode, pTaskInfo, pHandle, &pOptr); } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_ANOMALY == type) { code = createAnomalywindowOperatorInfo(ops[0], pPhyNode, pTaskInfo, &pOptr); } else { @@ -870,15 +872,25 @@ int32_t setOperatorParams(struct SOperatorInfo* pOperator, SOperatorParam* pInpu SSDataBlock* getNextBlockFromDownstream(struct SOperatorInfo* pOperator, int32_t idx) { SSDataBlock* p = NULL; - int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, true, &p); - blockDataCheck(p, false); - return (code == 0)? p:NULL; + int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, true, &p); + if (code == TSDB_CODE_SUCCESS) { + code = blockDataCheck(p); + if (code != TSDB_CODE_SUCCESS) { + qError("blockDataCheck failed, code:%s", tstrerror(code)); + } + } + return (code == 0) ? p : NULL; } SSDataBlock* getNextBlockFromDownstreamRemain(struct SOperatorInfo* pOperator, int32_t idx) { SSDataBlock* p = NULL; - int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, false, &p); - blockDataCheck(p, false); + int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, false, &p); + if (code == TSDB_CODE_SUCCESS) { + code = blockDataCheck(p); + if (code != TSDB_CODE_SUCCESS) { + qError("blockDataCheck failed, code:%s", tstrerror(code)); + } + } return (code == 0)? p:NULL; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index bae9926f637..5b5d5c5d11a 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -25,6 +25,7 @@ #include "tdatablock.h" #include "tmsg.h" +#include "ttime.h" #include "operator.h" #include "query.h" @@ -1461,6 +1462,18 @@ static void destroyTableScanOperatorInfo(void* param) { taosMemoryFreeClear(param); } +static void resetClolumnReserve(SSDataBlock* pBlock, int32_t dataRequireFlag) { + if (pBlock && dataRequireFlag == FUNC_DATA_REQUIRED_NOT_LOAD) { + int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, i); + if (pCol) { + pCol->info.noData = true; + } + } + } +} + int32_t createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo) { @@ -1511,6 +1524,7 @@ int32_t createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHa pInfo->base.readerAPI = pTaskInfo->storageAPI.tsdReader; initResultSizeInfo(&pOperator->resultInfo, 4096); pInfo->pResBlock = createDataBlockFromDescNode(pDescNode); + resetClolumnReserve(pInfo->pResBlock, pInfo->base.dataBlockLoadFlag); QUERY_CHECK_NULL(pInfo->pResBlock, code, lino, _error, terrno); code = prepareDataBlockBuf(pInfo->pResBlock, &pInfo->base.matchInfo); @@ -1636,10 +1650,18 @@ static bool isCountWindow(SStreamScanInfo* pInfo) { return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT; } +static bool isTimeSlice(SStreamScanInfo* pInfo) { + return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC; +} + static void setGroupId(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t groupColIndex, int32_t rowIndex) { SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, groupColIndex); uint64_t* groupCol = (uint64_t*)pColInfo->pData; - pInfo->groupId = groupCol[rowIndex]; + if (colDataIsNull_s(pColInfo, rowIndex)) { + pInfo->igCheckGroupId = true; + } else { + pInfo->groupId = groupCol[rowIndex]; + } } void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin, uint64_t ver) { @@ -1913,6 +1935,12 @@ static int32_t doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32_t ts continue; } + if (pInfo->igCheckGroupId == true) { + pResult->info.calWin = pInfo->updateWin; + (*ppRes) = pResult; + goto _end; + } + if (pInfo->partitionSup.needCalc) { SSDataBlock* tmpBlock = NULL; code = createOneDataBlock(pResult, true, &tmpBlock); @@ -1987,10 +2015,10 @@ int32_t appendOneRowToSpecialBlockImpl(SSDataBlock* pBlock, TSKEY* pStartTs, TSK code = colDataSetVal(pEndTsCol, pBlock->info.rows, (const char*)pEndTs, false); QUERY_CHECK_CODE(code, lino, _end); - code = colDataSetVal(pUidCol, pBlock->info.rows, (const char*)pUid, false); + code = colDataSetVal(pUidCol, pBlock->info.rows, (const char*)pUid, pUid == NULL); QUERY_CHECK_CODE(code, lino, _end); - code = colDataSetVal(pGpCol, pBlock->info.rows, (const char*)pGp, false); + code = colDataSetVal(pGpCol, pBlock->info.rows, (const char*)pGp, pGp == NULL); QUERY_CHECK_CODE(code, lino, _end); code = colDataSetVal(pCalStartCol, pBlock->info.rows, (const char*)pCalStartTs, false); @@ -2068,6 +2096,7 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr if (pSrcBlock->info.rows == 0) { return TSDB_CODE_SUCCESS; } + SSHashObj* pScanRange = tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT)); SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo; SColumnInfoData* pStartTsCol = taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX); TSKEY* startData = (TSKEY*)pStartTsCol->pData; @@ -2102,6 +2131,7 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX); SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX); SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); + SColumnInfoData* pDestTableNameInxCol = taosArrayGet(pDestBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX); for (int32_t i = 0; i < pSrcBlock->info.rows; i++) { uint64_t groupId = pSrcGp[i]; if (groupId == 0) { @@ -2128,6 +2158,14 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr qError("generate session scan range failed. rang start:%" PRIx64 ", end:%" PRIx64, startData[i], endData[i]); continue; } + + SSessionKey checkKey = {.groupId = groupId, .win.skey = startWin.win.skey, .win.ekey = endWin.win.ekey}; + if (tSimpleHashGet(pScanRange, &checkKey, sizeof(SSessionKey)) != NULL) { + continue; + } + code = tSimpleHashPut(pScanRange, &checkKey, sizeof(SSessionKey), NULL, 0); + QUERY_CHECK_CODE(code, lino, _end); + code = colDataSetVal(pDestStartCol, i, (const char*)&startWin.win.skey, false); QUERY_CHECK_CODE(code, lino, _end); @@ -2139,11 +2177,12 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr QUERY_CHECK_CODE(code, lino, _end); colDataSetNULL(pDestCalStartTsCol, i); - colDataSetNULL(pDestCalEndTsCol, i); + colDataSetNULL(pDestTableNameInxCol, i); pDestBlock->info.rows++; } _end: + tSimpleHashCleanup(pScanRange); if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); } @@ -2192,6 +2231,7 @@ static int32_t generateCountScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcB SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX); SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX); SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); + SColumnInfoData* pDestTableNameInxCol = taosArrayGet(pDestBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX); for (int32_t i = 0; i < pSrcBlock->info.rows; i++) { uint64_t groupId = pSrcGp[i]; if (groupId == 0) { @@ -2220,6 +2260,169 @@ static int32_t generateCountScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcB code = colDataSetVal(pDestCalEndTsCol, i, (const char*)&range.win.ekey, false); QUERY_CHECK_CODE(code, lino, _end); + colDataSetNULL(pDestTableNameInxCol, i); + + pDestBlock->info.rows++; + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t setDelRangeEndKey(SStreamAggSupporter* pAggSup, SStreamFillSupporter* pFillSup, SWinKey* pEndKey, STimeWindow* pScanRange, bool* pRes) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SSlicePoint nextPoint = {.key.groupId = pEndKey->groupId}; + int32_t vLen = 0; + int32_t winCode = TSDB_CODE_SUCCESS; + code = pAggSup->stateStore.streamStateFillGetNext(pAggSup->pState, pEndKey, &nextPoint.key, (void**)&nextPoint.pResPos, &vLen, &winCode); + QUERY_CHECK_CODE(code, lino, _end); + if (winCode == TSDB_CODE_SUCCESS) { + setPointBuff(&nextPoint, pFillSup); + if (HAS_ROW_DATA(nextPoint.pLeftRow) && pEndKey->ts < nextPoint.pLeftRow->key) { + pScanRange->ekey = nextPoint.pLeftRow->key; + *pRes = true; + } else if (pEndKey->ts < nextPoint.pRightRow->key) { + pScanRange->ekey = nextPoint.pRightRow->key; + *pRes = true; + } else { + *pEndKey = nextPoint.key; + pScanRange->ekey = TMAX(nextPoint.pRightRow->key, nextPoint.key.ts); + *pRes = false; + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t getTimeSliceWinRange(SStreamAggSupporter* pAggSup, SStreamFillSupporter* pFillSup, SInterval* pInterval, TSKEY start, TSKEY end, + int64_t groupId, STimeWindow* pScanRange) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t winCode = TSDB_CODE_SUCCESS; + SResultRowInfo dumyInfo = {0}; + dumyInfo.cur.pageId = -1; + STimeWindow sWin = getActiveTimeWindow(NULL, &dumyInfo, start, pInterval, TSDB_ORDER_ASC); + SWinKey startKey = {.groupId = groupId, .ts = sWin.skey}; + + sWin = getActiveTimeWindow(NULL, &dumyInfo, end, pInterval, TSDB_ORDER_ASC); + SWinKey endKey = {.groupId = groupId, .ts = sWin.ekey}; + + SSlicePoint prevPoint = {.key.groupId = groupId}; + SSlicePoint nextPoint = {.key.groupId = groupId}; + int32_t vLen = 0; + code = pAggSup->stateStore.streamStateFillGetPrev(pAggSup->pState, &startKey, &prevPoint.key, (void**)&prevPoint.pResPos, &vLen, &winCode); + QUERY_CHECK_CODE(code, lino, _end); + if (winCode == TSDB_CODE_SUCCESS) { + setPointBuff(&prevPoint, pFillSup); + if (HAS_ROW_DATA(prevPoint.pRightRow)) { + pScanRange->skey = prevPoint.pRightRow->key; + } else { + pScanRange->skey = prevPoint.pLeftRow->key; + } + } else { + pScanRange->skey = startKey.ts; + } + + bool res = false; + SWinKey curKey = endKey; + code = setDelRangeEndKey(pAggSup, pFillSup, &curKey, pScanRange, &res); + QUERY_CHECK_CODE(code, lino, _end); + if (res == false) { + code = setDelRangeEndKey(pAggSup, pFillSup, &curKey, pScanRange, &res); + QUERY_CHECK_CODE(code, lino, _end); + } + if (res == false) { + pScanRange->ekey = TMAX(endKey.ts, pScanRange->ekey); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t generateTimeSliceScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock, + EStreamType mode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + blockDataCleanup(pDestBlock); + if (pSrcBlock->info.rows == 0) { + return TSDB_CODE_SUCCESS; + } + SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo; + SColumnInfoData* pStartTsCol = taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX); + TSKEY* startData = (TSKEY*)pStartTsCol->pData; + SColumnInfoData* pEndTsCol = taosArrayGet(pSrcBlock->pDataBlock, END_TS_COLUMN_INDEX); + TSKEY* endData = (TSKEY*)pEndTsCol->pData; + SColumnInfoData* pUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX); + uint64_t* uidCol = (uint64_t*)pUidCol->pData; + SColumnInfoData* pGpCol = taosArrayGet(pSrcBlock->pDataBlock, GROUPID_COLUMN_INDEX); + uint64_t* pSrcGp = (uint64_t*)pGpCol->pData; + SColumnInfoData* pSrcPkCol = NULL; + if (taosArrayGetSize(pSrcBlock->pDataBlock) > PRIMARY_KEY_COLUMN_INDEX) { + pSrcPkCol = taosArrayGet(pSrcBlock->pDataBlock, PRIMARY_KEY_COLUMN_INDEX); + } + int64_t ver = pSrcBlock->info.version - 1; + + if (pInfo->partitionSup.needCalc && + (startData[0] != endData[0] || (hasPrimaryKeyCol(pInfo) && mode == STREAM_DELETE_DATA))) { + code = getPreVersionDataBlock(uidCol[0], startData[0], endData[0], ver, GET_TASKID(pTaskInfo), pInfo, pSrcBlock); + QUERY_CHECK_CODE(code, lino, _end); + startData = (TSKEY*)pStartTsCol->pData; + endData = (TSKEY*)pEndTsCol->pData; + uidCol = (uint64_t*)pUidCol->pData; + pSrcGp = (uint64_t*)pGpCol->pData; + } + + code = blockDataEnsureCapacity(pDestBlock, pSrcBlock->info.rows); + QUERY_CHECK_CODE(code, lino, _end); + + SColumnInfoData* pDestStartCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX); + SColumnInfoData* pDestEndCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX); + SColumnInfoData* pDestUidCol = taosArrayGet(pDestBlock->pDataBlock, UID_COLUMN_INDEX); + SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX); + SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX); + SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); + for (int32_t i = 0; i < pSrcBlock->info.rows; i++) { + uint64_t groupId = pSrcGp[i]; + if (groupId == 0) { + void* pVal = NULL; + if (hasPrimaryKeyCol(pInfo) && pSrcPkCol) { + pVal = colDataGetData(pSrcPkCol, i); + } + groupId = getGroupIdByData(pInfo, uidCol[i], startData[i], ver, pVal); + } + + STimeWindow scanRange = {0}; + code = getTimeSliceWinRange(pInfo->windowSup.pStreamAggSup, pInfo->pFillSup, &pInfo->interval, startData[i], endData[i], groupId, + &scanRange); + QUERY_CHECK_CODE(code, lino, _end); + + code = colDataSetVal(pDestStartCol, i, (const char*)&scanRange.skey, false); + QUERY_CHECK_CODE(code, lino, _end); + + code = colDataSetVal(pDestEndCol, i, (const char*)&scanRange.ekey, false); + QUERY_CHECK_CODE(code, lino, _end); + + colDataSetNULL(pDestUidCol, i); + code = colDataSetVal(pDestGpCol, i, (const char*)&groupId, false); + QUERY_CHECK_CODE(code, lino, _end); + + code = colDataSetVal(pDestCalStartTsCol, i, (const char*)&scanRange.skey, false); + QUERY_CHECK_CODE(code, lino, _end); + + code = colDataSetVal(pDestCalEndTsCol, i, (const char*)&scanRange.ekey, false); + QUERY_CHECK_CODE(code, lino, _end); + pDestBlock->info.rows++; } @@ -2238,6 +2441,7 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS if (pSrcBlock->info.rows == 0) { return TSDB_CODE_SUCCESS; } + SSHashObj* pScanRange = tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT)); SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo; SColumnInfoData* pSrcStartTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pSrcEndTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, END_TS_COLUMN_INDEX); @@ -2274,6 +2478,7 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS SColumnInfoData* pGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX); SColumnInfoData* pCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX); SColumnInfoData* pCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); + SColumnInfoData* pDestTableNameInxCol = taosArrayGet(pDestBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX); for (int32_t i = 0; i < pSrcBlock->info.rows;) { uint64_t srcUid = srcUidData[i]; uint64_t groupId = srcGp[i]; @@ -2297,6 +2502,13 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS code = colDataSetVal(pDeUidCol, pDestBlock->info.rows, (const char*)(&srcUid), false); QUERY_CHECK_CODE(code, lino, _end); + SSessionKey checkKey = {.groupId = groupId, .win = win}; + if (tSimpleHashGet(pScanRange, &checkKey, sizeof(SSessionKey)) != NULL) { + continue; + } + code = tSimpleHashPut(pScanRange, &checkKey, sizeof(SSessionKey), NULL, 0); + QUERY_CHECK_CODE(code, lino, _end); + code = colDataSetVal(pStartTsCol, pDestBlock->info.rows, (const char*)(&win.skey), false); QUERY_CHECK_CODE(code, lino, _end); @@ -2306,10 +2518,13 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS code = colDataSetVal(pGpCol, pDestBlock->info.rows, (const char*)(&groupId), false); QUERY_CHECK_CODE(code, lino, _end); + colDataSetNULL(pDestTableNameInxCol, pDestBlock->info.rows); + pDestBlock->info.rows++; } _end: + tSimpleHashCleanup(pScanRange); if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); } @@ -2484,6 +2699,9 @@ static int32_t generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, } else if (isCountWindow(pInfo)) { code = generateCountScanRange(pInfo, pSrcBlock, pDestBlock, type); QUERY_CHECK_CODE(code, lino, _end); + } else if (isTimeSlice(pInfo)) { + code = generateTimeSliceScanRange(pInfo, pSrcBlock, pDestBlock, type); + QUERY_CHECK_CODE(code, lino, _end); } else { code = generateDeleteResultBlock(pInfo, pSrcBlock, pDestBlock); QUERY_CHECK_CODE(code, lino, _end); @@ -3061,6 +3279,7 @@ static int32_t filterDelBlockByUid(SSDataBlock* pDst, const SSDataBlock* pSrc, S colDataSetNULL(taosArrayGet(pDst->pDataBlock, GROUPID_COLUMN_INDEX), j); colDataSetNULL(taosArrayGet(pDst->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX), j); colDataSetNULL(taosArrayGet(pDst->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX), j); + colDataSetNULL(taosArrayGet(pDst->pDataBlock, TABLE_NAME_COLUMN_INDEX), j); j++; } } @@ -3134,31 +3353,78 @@ static int32_t doCheckUpdate(SStreamScanInfo* pInfo, TSKEY endKey, SSDataBlock* } int32_t streamScanOperatorEncode(SStreamScanInfo* pInfo, void** pBuff, int32_t* pLen) { - int32_t code = TSDB_CODE_SUCCESS; - int32_t lino = 0; - int32_t len = 0; - code = pInfo->stateStore.updateInfoSerialize(NULL, 0, pInfo->pUpdateInfo, &len); - QUERY_CHECK_CODE(code, lino, _end); + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t len = 0; + SEncoder* pEnCoder = NULL; + SEncoder* pScanEnCoder = NULL; len += encodeSTimeWindowAggSupp(NULL, &pInfo->twAggSup); + SEncoder encoder = {0}; + pEnCoder = &encoder; + tEncoderInit(pEnCoder, NULL, 0); + if (tStartEncode(pEnCoder) != 0) { + code = TSDB_CODE_STREAM_INTERNAL_ERROR; + QUERY_CHECK_CODE(code, lino, _end); + } + code = pInfo->stateStore.updateInfoSerialize(pEnCoder, pInfo->pUpdateInfo); + QUERY_CHECK_CODE(code, lino, _end); + + if (tEncodeI64(pEnCoder, pInfo->lastScanRange.skey) < 0) { + code = TSDB_CODE_STREAM_INTERNAL_ERROR; + QUERY_CHECK_CODE(code, lino, _end); + } + if (tEncodeI64(pEnCoder, pInfo->lastScanRange.ekey) < 0) { + code = TSDB_CODE_STREAM_INTERNAL_ERROR; + QUERY_CHECK_CODE(code, lino, _end); + } + + tEndEncode(pEnCoder); + len += encoder.pos; + tEncoderClear(pEnCoder); + pEnCoder = NULL; + *pBuff = taosMemoryCalloc(1, len); if (!(*pBuff)) { code = terrno; QUERY_CHECK_CODE(code, lino, _end); } void* buf = *pBuff; - (void)encodeSTimeWindowAggSupp(&buf, &pInfo->twAggSup); + int32_t stwLen = encodeSTimeWindowAggSupp(&buf, &pInfo->twAggSup); - int32_t tmp = 0; - code = pInfo->stateStore.updateInfoSerialize(buf, len, pInfo->pUpdateInfo, &tmp); + SEncoder scanEncoder = {0}; + pScanEnCoder = &scanEncoder; + tEncoderInit(pScanEnCoder, buf, len - stwLen); + if (tStartEncode(pScanEnCoder) != 0) { + code = TSDB_CODE_FAILED; + QUERY_CHECK_CODE(code, lino, _end); + } + code = pInfo->stateStore.updateInfoSerialize(pScanEnCoder, pInfo->pUpdateInfo); QUERY_CHECK_CODE(code, lino, _end); + if (tEncodeI64(pScanEnCoder, pInfo->lastScanRange.skey) < 0) { + code = TSDB_CODE_STREAM_INTERNAL_ERROR; + QUERY_CHECK_CODE(code, lino, _end); + } + if (tEncodeI64(pScanEnCoder, pInfo->lastScanRange.ekey) < 0) { + code = TSDB_CODE_STREAM_INTERNAL_ERROR; + QUERY_CHECK_CODE(code, lino, _end); + } + *pLen = len; _end: if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); } + if (pEnCoder != NULL) { + tEndEncode(pEnCoder); + tEncoderClear(pEnCoder); + } + if (pScanEnCoder != NULL) { + tEndEncode(pScanEnCoder); + tEncoderClear(pScanEnCoder); + } return code; } @@ -3187,35 +3453,86 @@ void streamScanOperatorSaveCheckpoint(SStreamScanInfo* pInfo) { // other properties are recovered from the execution plan void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SDecoder* pDeCoder = NULL; if (!pBuff || len == 0) { - return; + lino = __LINE__; + goto _end; } void* buf = pBuff; buf = decodeSTimeWindowAggSupp(buf, &pInfo->twAggSup); int32_t tlen = len - encodeSTimeWindowAggSupp(NULL, &pInfo->twAggSup); if (tlen == 0) { - return; + lino = __LINE__; + goto _end; } void* pUpInfo = taosMemoryCalloc(1, sizeof(SUpdateInfo)); if (!pUpInfo) { - return; + lino = __LINE__; + goto _end; + } + SDecoder decoder = {0}; + pDeCoder = &decoder; + tDecoderInit(pDeCoder, buf, tlen); + if (tStartDecode(pDeCoder) < 0) { + lino = __LINE__; + goto _end; } - int32_t code = pInfo->stateStore.updateInfoDeserialize(buf, tlen, pUpInfo); + + code = pInfo->stateStore.updateInfoDeserialize(pDeCoder, pUpInfo); if (code == TSDB_CODE_SUCCESS) { pInfo->stateStore.updateInfoDestroy(pInfo->pUpdateInfo); pInfo->pUpdateInfo = pUpInfo; } else { taosMemoryFree(pUpInfo); + lino = __LINE__; + goto _end; + } + + if (tDecodeIsEnd(pDeCoder)) { + lino = __LINE__; + goto _end; + } + + SET_WIN_KEY_INVALID(pInfo->lastScanRange.skey); + SET_WIN_KEY_INVALID(pInfo->lastScanRange.ekey); + + if (tDecodeI64(pDeCoder, &pInfo->lastScanRange.skey) < 0) { + lino = __LINE__; + goto _end; + } + + if (tDecodeI64(pDeCoder, &pInfo->lastScanRange.ekey) < 0) { + lino = __LINE__; + goto _end; + } + +_end: + if (pDeCoder != NULL) { + tEndDecode(pDeCoder); + tDecoderClear(pDeCoder); } + qInfo("%s end at line %d", __func__, lino); } + static bool hasScanRange(SStreamScanInfo* pInfo) { SStreamAggSupporter* pSup = pInfo->windowSup.pStreamAggSup; return pSup && pSup->pScanBlock->info.rows > 0 && (isStateWindow(pInfo) || isCountWindow(pInfo)); } static bool isStreamWindow(SStreamScanInfo* pInfo) { - return isIntervalWindow(pInfo) || isSessionWindow(pInfo) || isStateWindow(pInfo) || isCountWindow(pInfo); + return isIntervalWindow(pInfo) || isSessionWindow(pInfo) || isStateWindow(pInfo) || isCountWindow(pInfo) || + isTimeSlice(pInfo); +} + +static int32_t copyGetResultBlock(SSDataBlock* dest, TSKEY start, TSKEY end) { + int32_t code = blockDataEnsureCapacity(dest, 1); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + return appendDataToSpecialBlock(dest, &start, &end, NULL, NULL, NULL); } static int32_t doStreamScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { @@ -3443,13 +3760,27 @@ static int32_t doStreamScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { } } } break; + case STREAM_GET_RESULT: { + pInfo->blockType = STREAM_INPUT__DATA_SUBMIT; + pInfo->updateResIndex = 0; + pInfo->lastScanRange = pBlock->info.window; + TSKEY endKey = taosTimeGetIntervalEnd(pBlock->info.window.skey, &pInfo->interval); + if (pInfo->useGetResultRange == true) { + endKey = pBlock->info.window.ekey; + } + code = copyGetResultBlock(pInfo->pUpdateRes, pBlock->info.window.skey, endKey); + QUERY_CHECK_CODE(code, lino, _end); + pInfo->pUpdateInfo->maxDataVersion = -1; + prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex, NULL); + pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE; + } break; case STREAM_CHECKPOINT: { qError("stream check point error. msg type: STREAM_INPUT__DATA_BLOCK"); } break; default: break; } - printDataBlock(pBlock, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); + printSpecDataBlock(pBlock, getStreamOpName(pOperator->operatorType), "block recv", GET_TASKID(pTaskInfo)); setStreamOperatorState(&pInfo->basic, pBlock->info.type); (*ppRes) = pBlock; return code; @@ -3657,6 +3988,9 @@ static int32_t doStreamScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { // printDataBlock(pInfo->pCheckpointRes, "stream scan ck", GET_TASKID(pTaskInfo)); (*ppRes) = pInfo->pCheckpointRes; return code; + } else { + qError("stream scan error, invalid block type %d, %s", pInfo->blockType, id); + code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; } _end: @@ -3940,6 +4274,8 @@ void streamScanReleaseState(SOperatorInfo* pOperator) { int32_t lino = 0; SStreamScanInfo* pInfo = pOperator->info; void* pBuff = NULL; + SEncoder* pEnCoder = NULL; + SEncoder* pScanEnCoder = NULL; if (!pInfo->pState) { return; } @@ -3947,32 +4283,63 @@ void streamScanReleaseState(SOperatorInfo* pOperator) { qDebug("stask:%s streamScanReleaseState cancel", GET_TASKID(pOperator->pTaskInfo)); return; } - int32_t len = 0; - code = pInfo->stateStore.updateInfoSerialize(NULL, 0, pInfo->pUpdateInfo, &len); + int32_t len = 0; + SEncoder encoder = {0}; + pEnCoder = &encoder; + tEncoderInit(pEnCoder, NULL, 0); + if (tStartEncode(pEnCoder) != 0) { + code = TSDB_CODE_FAILED; + QUERY_CHECK_CODE(code, lino, _end); + } + code = pInfo->stateStore.updateInfoSerialize(pEnCoder, pInfo->pUpdateInfo); QUERY_CHECK_CODE(code, lino, _end); + tEndEncode(pEnCoder); + len += encoder.pos; + tEncoderClear(pEnCoder); + pEnCoder = NULL; + pBuff = taosMemoryCalloc(1, len); if (!pBuff) { code = terrno; QUERY_CHECK_CODE(code, lino, _end); } - int32_t tmp = 0; - code = pInfo->stateStore.updateInfoSerialize(pBuff, len, pInfo->pUpdateInfo, &tmp); + SEncoder scanEncoder = {0}; + pScanEnCoder = &scanEncoder; + tEncoderInit(pScanEnCoder, pBuff, len); + if (tStartEncode(pScanEnCoder) != 0) { + code = TSDB_CODE_FAILED; + QUERY_CHECK_CODE(code, lino, _end); + } + code = pInfo->stateStore.updateInfoSerialize(pScanEnCoder, pInfo->pUpdateInfo); QUERY_CHECK_CODE(code, lino, _end); + tEndEncode(pScanEnCoder); + tEncoderClear(pScanEnCoder); + pScanEnCoder = NULL; + pInfo->stateStore.streamStateSaveInfo(pInfo->pState, STREAM_SCAN_OP_STATE_NAME, strlen(STREAM_SCAN_OP_STATE_NAME), pBuff, len); _end: if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); } + if (pEnCoder != NULL) { + tEndEncode(pEnCoder); + tEncoderClear(pEnCoder); + } + if (pScanEnCoder != NULL) { + tEndEncode(pScanEnCoder); + tEncoderClear(pScanEnCoder); + } taosMemoryFree(pBuff); } void streamScanReloadState(SOperatorInfo* pOperator) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; + SDecoder* pDeCoder = NULL; SStreamScanInfo* pInfo = pOperator->info; if (!pInfo->pState) { return; @@ -3993,7 +4360,14 @@ void streamScanReloadState(SOperatorInfo* pOperator) { QUERY_CHECK_CODE(code, lino, _end); } - int32_t winCode = pInfo->stateStore.updateInfoDeserialize(pBuff, len, pUpInfo); + SDecoder decoder = {0}; + pDeCoder = &decoder; + tDecoderInit(pDeCoder, pBuff, len); + if (tStartDecode(pDeCoder) < 0) { + lino = __LINE__; + goto _end; + } + int32_t winCode = pInfo->stateStore.updateInfoDeserialize(pDeCoder, pUpInfo); taosMemoryFree(pBuff); if (winCode == TSDB_CODE_SUCCESS && pInfo->pUpdateInfo) { if (pInfo->pUpdateInfo->minTS < 0) { @@ -4030,6 +4404,10 @@ void streamScanReloadState(SOperatorInfo* pOperator) { } _end: + if (pDeCoder != NULL) { + tEndDecode(pDeCoder); + tDecoderClear(pDeCoder); + } if (code != TSDB_CODE_SUCCESS) { qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); } @@ -4220,6 +4598,7 @@ int32_t createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; pInfo->windowSup = (SWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN}; pInfo->groupId = 0; + pInfo->igCheckGroupId = false; pInfo->pStreamScanOp = pOperator; pInfo->deleteDataIndex = 0; code = createSpecialDataBlock(STREAM_DELETE_DATA, &pInfo->pDeleteDataRes); @@ -4245,10 +4624,14 @@ int32_t createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pInfo->pState = pTaskInfo->streamInfo.pState; pInfo->stateStore = pTaskInfo->storageAPI.stateStore; pInfo->readerFn = pTaskInfo->storageAPI.tqReaderFn; + pInfo->pFillSup = NULL; + pInfo->useGetResultRange = false; code = createSpecialDataBlock(STREAM_CHECKPOINT, &pInfo->pCheckpointRes); QUERY_CHECK_CODE(code, lino, _error); + SET_WIN_KEY_INVALID(pInfo->lastScanRange.skey); + SET_WIN_KEY_INVALID(pInfo->lastScanRange.ekey); // for stream if (pTaskInfo->streamInfo.pState) { void* buff = NULL; @@ -6345,7 +6728,7 @@ int32_t fillTableCountScanDataBlock(STableCountScanSupp* pSupp, char* dbName, ch QUERY_CHECK_NULL(colInfoData, code, lino, _end, terrno); if (strlen(stbName) != 0) { char varStbName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - strncpy(varDataVal(varStbName), stbName, TSDB_TABLE_NAME_LEN); + tstrncpy(varDataVal(varStbName), stbName, TSDB_TABLE_NAME_LEN); varDataSetLen(varStbName, strlen(stbName)); code = colDataSetVal(colInfoData, 0, varStbName, false); QUERY_CHECK_CODE(code, lino, _end); diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 1c241dffec2..a6ca20c5ee7 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -334,10 +334,14 @@ static int32_t getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t loadNextDataBlock(void* param, SSDataBlock** ppBlock) { SOperatorInfo* pOperator = (SOperatorInfo*)param; - int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock); - blockDataCheck(*ppBlock, false); + int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock); if (code) { qError("failed to get next data block from upstream, %s code:%s", __func__, tstrerror(code)); + } else { + code = blockDataCheck(*ppBlock); + if (code) { + qError("failed to check block data, %s code:%s", __func__, tstrerror(code)); + } } return code; } @@ -630,7 +634,8 @@ int32_t fetchNextGroupSortDataBlock(void* param, SSDataBlock** ppBlock) { QUERY_CHECK_CODE(code, lino, _end); if (block != NULL) { - blockDataCheck(block, false); + code = blockDataCheck(block); + QUERY_CHECK_CODE(code, lino, _end); if (block->info.id.groupId == grpSortOpInfo->currGroupId) { grpSortOpInfo->childOpStatus = CHILD_OP_SAME_GROUP; *ppBlock = block; diff --git a/source/libs/executor/src/streamcountwindowoperator.c b/source/libs/executor/src/streamcountwindowoperator.c index 33b3e7748cc..b8c3ec90f98 100644 --- a/source/libs/executor/src/streamcountwindowoperator.c +++ b/source/libs/executor/src/streamcountwindowoperator.c @@ -818,8 +818,8 @@ void streamCountReloadState(SOperatorInfo* pOperator) { } } -int32_t createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, - SExecTaskInfo* pTaskInfo, SReadHandle* pHandle, SOperatorInfo** pOptrInfo) { +int32_t createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, + SReadHandle* pHandle, SOperatorInfo** pOptrInfo) { QRY_PARAM_CHECK(pOptrInfo); SCountWinodwPhysiNode* pCountNode = (SCountWinodwPhysiNode*)pPhyNode; @@ -869,7 +869,8 @@ int32_t createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pInfo->primaryTsIndex = ((SColumnNode*)pCountNode->window.pTspk)->slotId; code = initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfCols, 0, pTaskInfo->streamInfo.pState, sizeof(COUNT_TYPE), 0, &pTaskInfo->storageAPI.stateStore, pHandle, &pInfo->twAggSup, - GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI, pInfo->primaryTsIndex); + GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI, pInfo->primaryTsIndex, + STREAM_STATE_BUFF_SORT, 1); QUERY_CHECK_CODE(code, lino, _error); pInfo->streamAggSup.windowCount = pCountNode->windowCount; @@ -903,7 +904,7 @@ int32_t createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pInfo->recvGetAll = false; pInfo->pPkDeleted = tSimpleHashInit(64, hashFn); QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno); - pInfo->destHasPrimaryKey = pCountNode->window.destHasPrimayKey; + pInfo->destHasPrimaryKey = pCountNode->window.destHasPrimaryKey; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT; setOperatorInfo(pOperator, getStreamOpName(pOperator->operatorType), QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT, true, diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index 8a706f6d4e3..29b3f473baf 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -525,8 +525,8 @@ int32_t doStreamEventDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOpera QUERY_CHECK_CODE(code, lino, _end); buf = decodeSResultWindowInfo(buf, &winfo, pInfo->streamAggSup.resultRowSize); - code = - tSimpleHashPut(pInfo->streamAggSup.pResultRows, &winfo.sessionWin, sizeof(SSessionKey), &winfo, sizeof(SResultWindowInfo)); + code = tSimpleHashPut(pInfo->streamAggSup.pResultRows, &winfo.sessionWin, sizeof(SSessionKey), &winfo, + sizeof(SResultWindowInfo)); QUERY_CHECK_CODE(code, lino, _end); } @@ -917,7 +917,8 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pInfo->primaryTsIndex = tsSlotId; code = initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfCols, 0, pTaskInfo->streamInfo.pState, sizeof(bool) + sizeof(bool), 0, &pTaskInfo->storageAPI.stateStore, pHandle, - &pInfo->twAggSup, GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI, pInfo->primaryTsIndex); + &pInfo->twAggSup, GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI, pInfo->primaryTsIndex, + STREAM_STATE_BUFF_SORT, 1); QUERY_CHECK_CODE(code, lino, _error); _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); @@ -955,7 +956,7 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pInfo->recvGetAll = false; pInfo->pPkDeleted = tSimpleHashInit(64, hashFn); QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno); - pInfo->destHasPrimaryKey = pEventNode->window.destHasPrimayKey; + pInfo->destHasPrimaryKey = pEventNode->window.destHasPrimaryKey; pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "StreamEventAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT, true, OP_NOT_OPENED, diff --git a/source/libs/executor/src/streamexecutorInt.c b/source/libs/executor/src/streamexecutorInt.c index 875ae003508..b94798934c5 100644 --- a/source/libs/executor/src/streamexecutorInt.c +++ b/source/libs/executor/src/streamexecutorInt.c @@ -28,3 +28,8 @@ bool needSaveStreamOperatorInfo(SSteamOpBasicInfo* pBasicInfo) { void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo) { pBasicInfo->updateOperatorInfo = false; } + +void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) { + pBasicInfo->primaryPkIndex = -1; + pBasicInfo->updateOperatorInfo = false; +} diff --git a/source/libs/executor/src/streamfilloperator.c b/source/libs/executor/src/streamfilloperator.c index 826220581a4..ccf1f7c9e50 100644 --- a/source/libs/executor/src/streamfilloperator.c +++ b/source/libs/executor/src/streamfilloperator.c @@ -21,6 +21,7 @@ #include "ttypes.h" #include "executorInt.h" +#include "streamexecutorInt.h" #include "tcommon.h" #include "thash.h" #include "ttime.h" @@ -77,12 +78,17 @@ void* destroyFillColumnInfo(SFillColInfo* pFillCol, int32_t start, int32_t end) destroyExprInfo(pFillCol[i].pExpr, 1); taosVariantDestroy(&pFillCol[i].fillVal); } - taosMemoryFreeClear(pFillCol[start].pExpr); + if (start < end) { + taosMemoryFreeClear(pFillCol[start].pExpr); + } taosMemoryFree(pFillCol); return NULL; } void destroyStreamFillSupporter(SStreamFillSupporter* pFillSup) { + if (pFillSup == NULL) { + return; + } pFillSup->pAllColInfo = destroyFillColumnInfo(pFillSup->pAllColInfo, pFillSup->numOfFillCols, pFillSup->numOfAllCols); tSimpleHashCleanup(pFillSup->pResMap); pFillSup->pResMap = NULL; @@ -109,10 +115,15 @@ void destroyStreamFillLinearInfo(SStreamFillLinearInfo* pFillLinear) { } void destroyStreamFillInfo(SStreamFillInfo* pFillInfo) { + if (pFillInfo == NULL) { + return; + } if (pFillInfo->type == TSDB_FILL_SET_VALUE || pFillInfo->type == TSDB_FILL_SET_VALUE_F || pFillInfo->type == TSDB_FILL_NULL || pFillInfo->type == TSDB_FILL_NULL_F) { taosMemoryFreeClear(pFillInfo->pResRow->pRowVal); taosMemoryFreeClear(pFillInfo->pResRow); + taosMemoryFreeClear(pFillInfo->pNonFillRow->pRowVal); + taosMemoryFreeClear(pFillInfo->pNonFillRow); } destroyStreamFillLinearInfo(pFillInfo->pLinearInfo); pFillInfo->pLinearInfo = NULL; @@ -133,6 +144,10 @@ static void destroyStreamFillOperatorInfo(void* param) { pInfo->pDelRes = NULL; taosArrayDestroy(pInfo->matchInfo.pList); pInfo->matchInfo.pList = NULL; + taosArrayDestroy(pInfo->pUpdated); + clearGroupResInfo(&pInfo->groupResInfo); + taosArrayDestroy(pInfo->pCloseTs); + taosMemoryFree(pInfo); } @@ -141,7 +156,7 @@ static void resetFillWindow(SResultRowData* pRowData) { taosMemoryFreeClear(pRowData->pRowVal); } -void resetPrevAndNextWindow(SStreamFillSupporter* pFillSup, void* pState, SStorageAPI* pAPI) { +static void resetPrevAndNextWindow(SStreamFillSupporter* pFillSup) { if (pFillSup->cur.pRowVal != pFillSup->prev.pRowVal && pFillSup->cur.pRowVal != pFillSup->next.pRowVal) { resetFillWindow(&pFillSup->cur); } else { @@ -156,13 +171,13 @@ void resetPrevAndNextWindow(SStreamFillSupporter* pFillSup, void* pState, SStora void getWindowFromDiscBuf(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId, SStreamFillSupporter* pFillSup) { SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI; void* pState = pOperator->pTaskInfo->streamInfo.pState; - resetPrevAndNextWindow(pFillSup, pState, pAPI); + resetPrevAndNextWindow(pFillSup); SWinKey key = {.ts = ts, .groupId = groupId}; void* curVal = NULL; int32_t curVLen = 0; bool hasCurKey = true; - int32_t code = pAPI->stateStore.streamStateFillGet(pState, &key, (void**)&curVal, &curVLen); + int32_t code = pAPI->stateStore.streamStateFillGet(pState, &key, (void**)&curVal, &curVLen, NULL); if (code == TSDB_CODE_SUCCESS) { pFillSup->cur.key = key.ts; pFillSup->cur.pRowVal = curVal; @@ -177,7 +192,7 @@ void getWindowFromDiscBuf(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId, SWinKey preKey = {.ts = INT64_MIN, .groupId = groupId}; void* preVal = NULL; int32_t preVLen = 0; - code = pAPI->stateStore.streamStateGetGroupKVByCur(pCur, &preKey, (const void**)&preVal, &preVLen); + code = pAPI->stateStore.streamStateFillGetGroupKVByCur(pCur, &preKey, (const void**)&preVal, &preVLen); if (code == TSDB_CODE_SUCCESS) { pFillSup->prev.key = preKey.ts; @@ -196,7 +211,7 @@ void getWindowFromDiscBuf(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId, SWinKey nextKey = {.ts = INT64_MIN, .groupId = groupId}; void* nextVal = NULL; int32_t nextVLen = 0; - code = pAPI->stateStore.streamStateGetGroupKVByCur(pCur, &nextKey, (const void**)&nextVal, &nextVLen); + code = pAPI->stateStore.streamStateFillGetGroupKVByCur(pCur, &nextKey, (const void**)&nextVal, &nextVLen); if (code == TSDB_CODE_SUCCESS) { pFillSup->next.key = nextKey.ts; pFillSup->next.pRowVal = nextVal; @@ -205,7 +220,7 @@ void getWindowFromDiscBuf(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId, SWinKey nextNextKey = {.groupId = groupId}; void* nextNextVal = NULL; int32_t nextNextVLen = 0; - code = pAPI->stateStore.streamStateGetGroupKVByCur(pCur, &nextNextKey, (const void**)&nextNextVal, &nextNextVLen); + code = pAPI->stateStore.streamStateFillGetGroupKVByCur(pCur, &nextNextKey, (const void**)&nextNextVal, &nextNextVLen); if (code == TSDB_CODE_SUCCESS) { pFillSup->nextNext.key = nextNextKey.ts; pFillSup->nextNext.pRowVal = nextNextVal; @@ -215,12 +230,10 @@ void getWindowFromDiscBuf(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId, pAPI->stateStore.streamStateFreeCur(pCur); } -static bool hasPrevWindow(SStreamFillSupporter* pFillSup) { return pFillSup->prev.key != INT64_MIN; } -static bool hasNextWindow(SStreamFillSupporter* pFillSup) { return pFillSup->next.key != INT64_MIN; } -static bool hasNextNextWindow(SStreamFillSupporter* pFillSup) { - return pFillSup->nextNext.key != INT64_MIN; - return false; -} +bool hasCurWindow(SStreamFillSupporter* pFillSup) { return pFillSup->cur.key != INT64_MIN; } +bool hasPrevWindow(SStreamFillSupporter* pFillSup) { return pFillSup->prev.key != INT64_MIN; } +bool hasNextWindow(SStreamFillSupporter* pFillSup) { return pFillSup->next.key != INT64_MIN; } +static bool hasNextNextWindow(SStreamFillSupporter* pFillSup) { return pFillSup->nextNext.key != INT64_MIN; } static void transBlockToResultRow(const SSDataBlock* pBlock, int32_t rowId, TSKEY ts, SResultRowData* pRowVal) { int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); @@ -244,29 +257,6 @@ static void transBlockToResultRow(const SSDataBlock* pBlock, int32_t rowId, TSKE pRowVal->key = ts; } -static void calcDeltaData(SSDataBlock* pBlock, int32_t rowId, SResultRowData* pRowVal, SArray* pDelta, - SFillColInfo* pFillCol, int32_t numOfCol, int32_t winCount, int32_t order) { - for (int32_t i = 0; i < numOfCol; i++) { - if (!pFillCol[i].notFillCol) { - int32_t slotId = GET_DEST_SLOT_ID(pFillCol + i); - SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); - char* var = colDataGetData(pCol, rowId); - double start = 0; - GET_TYPED_DATA(start, double, pCol->info.type, var); - SResultCellData* pCell = getResultCell(pRowVal, slotId); - double end = 0; - GET_TYPED_DATA(end, double, pCell->type, pCell->pData); - double delta = 0; - if (order == TSDB_ORDER_ASC) { - delta = (end - start) / winCount; - } else { - delta = (start - end) / winCount; - } - taosArraySet(pDelta, slotId, &delta); - } - } -} - static void calcRowDeltaData(SResultRowData* pEndRow, SArray* pEndPoins, SFillColInfo* pFillCol, int32_t numOfCol) { for (int32_t i = 0; i < numOfCol; i++) { if (!pFillCol[i].notFillCol) { @@ -461,7 +451,7 @@ void setFillValueInfo(SSDataBlock* pBlock, TSKEY ts, int32_t rowId, SStreamFillS } } -static int32_t checkResult(SStreamFillSupporter* pFillSup, TSKEY ts, uint64_t groupId, bool* pRes) { +int32_t checkResult(SStreamFillSupporter* pFillSup, TSKEY ts, uint64_t groupId, bool* pRes) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SWinKey key = {.groupId = groupId, .ts = ts}; @@ -523,7 +513,7 @@ static int32_t buildFillResult(SResultRowData* pResRow, SStreamFillSupporter* pF return code; } -static bool hasRemainCalc(SStreamFillInfo* pFillInfo) { +bool hasRemainCalc(SStreamFillInfo* pFillInfo) { if (pFillInfo->current != INT64_MIN && pFillInfo->current <= pFillInfo->end) { return true; } @@ -736,7 +726,6 @@ static void doStreamFillImpl(SOperatorInfo* pOperator) { } while (pInfo->srcRowIndex < pBlock->info.rows) { - TSKEY ts = tsCol[pInfo->srcRowIndex]; code = keepBlockRowInDiscBuf(pOperator, pFillInfo, pBlock, tsCol, pInfo->srcRowIndex, groupId, pFillSup->rowSize); QUERY_CHECK_CODE(code, lino, _end); doFillResults(pOperator, pFillSup, pFillInfo, pBlock, tsCol, pInfo->srcRowIndex - 1, pRes); @@ -874,12 +863,12 @@ static void getWindowInfoByKey(SStorageAPI* pAPI, void* pState, TSKEY ts, int64_ SWinKey key = {.ts = ts, .groupId = groupId}; void* val = NULL; int32_t len = 0; - int32_t code = pAPI->stateStore.streamStateFillGet(pState, &key, (void**)&val, &len); + int32_t code = pAPI->stateStore.streamStateFillGet(pState, &key, (void**)&val, &len, NULL); if (code != TSDB_CODE_SUCCESS) { qDebug("get window info by key failed, Data may be deleted, try next window. ts:%" PRId64 ", groupId:%" PRId64, ts, groupId); SStreamStateCur* pCur = pAPI->stateStore.streamStateFillSeekKeyNext(pState, &key); - code = pAPI->stateStore.streamStateGetGroupKVByCur(pCur, &key, (const void**)&val, &len); + code = pAPI->stateStore.streamStateFillGetGroupKVByCur(pCur, &key, (const void**)&val, &len); pAPI->stateStore.streamStateFreeCur(pCur); qDebug("get window info by key ts:%" PRId64 ", groupId:%" PRId64 ", res%d", ts, groupId, code); } @@ -957,7 +946,7 @@ static int32_t doDeleteFillResult(SOperatorInfo* pOperator) { SWinKey delKey = {.groupId = delGroupId, .ts = delTs}; if (delTs == nextKey.ts) { pAPI->stateStore.streamStateCurNext(pOperator->pTaskInfo->streamInfo.pState, pCur); - winCode = pAPI->stateStore.streamStateGetGroupKVByCur(pCur, &nextKey, NULL, NULL); + winCode = pAPI->stateStore.streamStateFillGetGroupKVByCur(pCur, &nextKey, NULL, NULL); // ts will be deleted later if (delTs != ts) { pAPI->stateStore.streamStateFillDel(pOperator->pTaskInfo->streamInfo.pState, &delKey); @@ -986,9 +975,12 @@ static int32_t doDeleteFillResult(SOperatorInfo* pOperator) { return code; } -static void resetStreamFillInfo(SStreamFillOperatorInfo* pInfo) { - tSimpleHashClear(pInfo->pFillSup->pResMap); - pInfo->pFillSup->hasDelete = false; +void resetStreamFillSup(SStreamFillSupporter* pFillSup) { + tSimpleHashClear(pFillSup->pResMap); + pFillSup->hasDelete = false; +} +void resetStreamFillInfo(SStreamFillOperatorInfo* pInfo) { + resetStreamFillSup(pInfo->pFillSup); taosArrayClear(pInfo->pFillInfo->delRanges); pInfo->pFillInfo->delIndex = 0; } @@ -1165,6 +1157,273 @@ static int32_t doStreamFillNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { return code; } +void doBuildForceFillResultImpl(SOperatorInfo* pOperator, SStreamFillSupporter* pFillSup, + SStreamFillInfo* pFillInfo, SSDataBlock* pBlock, SGroupResInfo* pGroupResInfo) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI; + void* pState = pOperator->pTaskInfo->streamInfo.pState; + bool res = false; + int32_t numOfRows = getNumOfTotalRes(pGroupResInfo); + for (; pGroupResInfo->index < numOfRows; pGroupResInfo->index++) { + SWinKey* pKey = (SWinKey*)taosArrayGet(pGroupResInfo->pRows, pGroupResInfo->index); + if (pBlock->info.id.groupId == 0) { + pBlock->info.id.groupId = pKey->groupId; + } else if (pBlock->info.id.groupId != pKey->groupId) { + break; + } + void* val = NULL; + int32_t len = 0; + int32_t winCode = pAPI->stateStore.streamStateFillGet(pOperator->pTaskInfo->streamInfo.pState, pKey, (void**)&val, &len, NULL); + qDebug("===stream=== build force fill res. key:%" PRId64 ",groupId:%" PRId64".res:%d", pKey->ts, pKey->groupId, winCode); + if (winCode == TSDB_CODE_SUCCESS) { + pFillSup->cur.key = pKey->ts; + pFillSup->cur.pRowVal = val; + code = buildFillResult(&pFillSup->cur, pFillSup, pKey->ts, pBlock, &res); + QUERY_CHECK_CODE(code, lino, _end); + resetFillWindow(&pFillSup->cur); + } else { + SStreamStateCur* pCur = pAPI->stateStore.streamStateFillSeekKeyPrev(pState, pKey); + SWinKey preKey = {.ts = INT64_MIN, .groupId = pKey->groupId}; + void* preVal = NULL; + int32_t preVLen = 0; + winCode = pAPI->stateStore.streamStateFillGetGroupKVByCur(pCur, &preKey, (const void**)&preVal, &preVLen); + if (winCode == TSDB_CODE_SUCCESS) { + pFillSup->cur.key = pKey->ts; + pFillSup->cur.pRowVal = preVal; + if (pFillInfo->type == TSDB_FILL_PREV) { + code = buildFillResult(&pFillSup->cur, pFillSup, pKey->ts, pBlock, &res); + QUERY_CHECK_CODE(code, lino, _end); + } else { + copyNotFillExpData(pFillSup, pFillInfo); + pFillInfo->pResRow->key = pKey->ts; + code = buildFillResult(pFillInfo->pResRow, pFillSup, pKey->ts, pBlock, &res); + QUERY_CHECK_CODE(code, lino, _end); + } + resetFillWindow(&pFillSup->cur); + } + pAPI->stateStore.streamStateFreeCur(pCur); + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } +} + +void doBuildForceFillResult(SOperatorInfo* pOperator, SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo, + SSDataBlock* pBlock, SGroupResInfo* pGroupResInfo) { + blockDataCleanup(pBlock); + if (!hasRemainResults(pGroupResInfo)) { + return; + } + + // clear the existed group id + pBlock->info.id.groupId = 0; + doBuildForceFillResultImpl(pOperator, pFillSup, pFillInfo, pBlock, pGroupResInfo); +} + +static int32_t buildForceFillResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamFillOperatorInfo* pInfo = pOperator->info; + uint16_t opType = pOperator->operatorType; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + + doBuildForceFillResult(pOperator, pInfo->pFillSup, pInfo->pFillInfo, pInfo->pRes, &pInfo->groupResInfo); + if (pInfo->pRes->info.rows != 0) { + printDataBlock(pInfo->pRes, getStreamOpName(opType), GET_TASKID(pTaskInfo)); + (*ppRes) = pInfo->pRes; + goto _end; + } + + (*ppRes) = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +// force window close impl +static int32_t doStreamForceFillImpl(SOperatorInfo* pOperator) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamFillOperatorInfo* pInfo = pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStreamFillSupporter* pFillSup = pInfo->pFillSup; + SStreamFillInfo* pFillInfo = pInfo->pFillInfo; + SSDataBlock* pBlock = pInfo->pSrcBlock; + uint64_t groupId = pBlock->info.id.groupId; + SStreamAggSupporter* pAggSup = pInfo->pStreamAggSup; + SColumnInfoData* pTsCol = taosArrayGet(pInfo->pSrcBlock->pDataBlock, pInfo->primaryTsCol); + TSKEY* tsCol = (TSKEY*)pTsCol->pData; + for (int32_t i = 0; i < pBlock->info.rows; i++){ + code = keepBlockRowInDiscBuf(pOperator, pFillInfo, pBlock, tsCol, i, groupId, pFillSup->rowSize); + QUERY_CHECK_CODE(code, lino, _end); + + int32_t size = taosArrayGetSize(pInfo->pCloseTs); + if (size > 0) { + TSKEY* pTs = (TSKEY*) taosArrayGet(pInfo->pCloseTs, 0); + TSKEY resTs = tsCol[i]; + while (resTs < (*pTs)) { + SWinKey key = {.groupId = groupId, .ts = resTs}; + void* pPushRes = taosArrayPush(pInfo->pUpdated, &key); + QUERY_CHECK_NULL(pPushRes, code, lino, _end, terrno); + + if (IS_FILL_CONST_VALUE(pFillSup->type)) { + break; + } + resTs = taosTimeAdd(resTs, pFillSup->interval.sliding, pFillSup->interval.slidingUnit, + pFillSup->interval.precision); + } + } + } + code = pAggSup->stateStore.streamStateGroupPut(pAggSup->pState, groupId, NULL, 0); + QUERY_CHECK_CODE(code, lino, _end); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s. task:%s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo)); + } + return code; +} + +int32_t buildAllResultKey(SStreamAggSupporter* pAggSup, TSKEY ts, SArray* pUpdated) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int64_t groupId = 0; + SStreamStateCur* pCur = pAggSup->stateStore.streamStateGroupGetCur(pAggSup->pState); + while (1) { + int32_t winCode = pAggSup->stateStore.streamStateGroupGetKVByCur(pCur, &groupId, NULL, NULL); + if (winCode != TSDB_CODE_SUCCESS) { + break; + } + SWinKey key = {.ts = ts, .groupId = groupId}; + void* pPushRes = taosArrayPush(pUpdated, &key); + QUERY_CHECK_NULL(pPushRes, code, lino, _end, terrno); + + pAggSup->stateStore.streamStateGroupCurNext(pCur); + } + pAggSup->stateStore.streamStateFreeCur(pCur); + pCur = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + pAggSup->stateStore.streamStateFreeCur(pCur); + pCur = NULL; + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static void removeDuplicateResult(SArray* pTsArrray, __compar_fn_t fn) { + taosArraySort(pTsArrray, fn); + taosArrayRemoveDuplicate(pTsArrray, fn, NULL); +} + +// force window close +static int32_t doStreamForceFillNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamFillOperatorInfo* pInfo = pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + + if (pOperator->status == OP_EXEC_DONE) { + (*ppRes) = NULL; + return code; + } + + if (pOperator->status == OP_RES_TO_RETURN) { + SSDataBlock* resBlock = NULL; + code = buildForceFillResult(pOperator, &resBlock); + QUERY_CHECK_CODE(code, lino, _end); + + if (resBlock != NULL) { + (*ppRes) = resBlock; + goto _end; + } + pInfo->pStreamAggSup->stateStore.streamStateClearExpiredState(pInfo->pStreamAggSup->pState); + setStreamOperatorCompleted(pOperator); + (*ppRes) = NULL; + goto _end; + } + + SSDataBlock* fillResult = NULL; + SOperatorInfo* downstream = pOperator->pDownstream[0]; + while (1) { + SSDataBlock* pBlock = getNextBlockFromDownstream(pOperator, 0); + if (pBlock == NULL) { + pOperator->status = OP_RES_TO_RETURN; + qDebug("===stream===return data:%s.", getStreamOpName(pOperator->operatorType)); + break; + } + printSpecDataBlock(pBlock, getStreamOpName(pOperator->operatorType), "recv", GET_TASKID(pTaskInfo)); + setStreamOperatorState(&pInfo->basic, pBlock->info.type); + + switch (pBlock->info.type) { + case STREAM_NORMAL: + case STREAM_INVALID: { + code = doApplyStreamScalarCalculation(pOperator, pBlock, pInfo->pSrcBlock); + QUERY_CHECK_CODE(code, lino, _end); + + memcpy(pInfo->pSrcBlock->info.parTbName, pBlock->info.parTbName, TSDB_TABLE_NAME_LEN); + pInfo->srcRowIndex = -1; + } break; + case STREAM_CHECKPOINT: + case STREAM_CREATE_CHILD_TABLE: { + (*ppRes) = pBlock; + goto _end; + } break; + case STREAM_GET_RESULT: { + void* pPushRes = taosArrayPush(pInfo->pCloseTs, &pBlock->info.window.skey); + QUERY_CHECK_NULL(pPushRes, code, lino, _end, terrno); + continue; + } + default: + code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; + QUERY_CHECK_CODE(code, lino, _end); + } + + code = doStreamForceFillImpl(pOperator); + QUERY_CHECK_CODE(code, lino, _end); + } + + for (int32_t i = 0; i < taosArrayGetSize(pInfo->pCloseTs); i++) { + TSKEY ts = *(TSKEY*) taosArrayGet(pInfo->pCloseTs, i); + code = buildAllResultKey(pInfo->pStreamAggSup, ts, pInfo->pUpdated); + QUERY_CHECK_CODE(code, lino, _end); + } + taosArrayClear(pInfo->pCloseTs); + removeDuplicateResult(pInfo->pUpdated, winKeyCmprImpl); + + initMultiResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated); + pInfo->groupResInfo.freeItem = false; + + pInfo->pUpdated = taosArrayInit(1024, sizeof(SWinKey)); + QUERY_CHECK_NULL(pInfo->pUpdated, code, lino, _end, terrno); + + code = blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); + QUERY_CHECK_CODE(code, lino, _end); + + code = buildForceFillResult(pOperator, ppRes); + QUERY_CHECK_CODE(code, lino, _end); + + if ((*ppRes) == NULL) { + pInfo->pStreamAggSup->stateStore.streamStateClearExpiredState(pInfo->pStreamAggSup->pState); + setStreamOperatorCompleted(pOperator); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s. task:%s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo)); + pTaskInfo->code = code; + } + return code; +} + static int32_t initResultBuf(SSDataBlock* pInputRes, SStreamFillSupporter* pFillSup) { int32_t numOfCols = taosArrayGetSize(pInputRes->pDataBlock); pFillSup->rowSize = sizeof(SResultCellData) * numOfCols; @@ -1201,7 +1460,7 @@ static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNod QUERY_CHECK_CODE(code, lino, _end); pFillSup->pAllColInfo = createFillColInfo(pFillExprInfo, pFillSup->numOfFillCols, noFillExprInfo, numOfNotFillCols, - (const SNodeListNode*)(pPhyFillNode->pValues)); + NULL, 0, (const SNodeListNode*)(pPhyFillNode->pValues)); if (pFillSup->pAllColInfo == NULL) { code = terrno; lino = __LINE__; @@ -1277,42 +1536,40 @@ SStreamFillInfo* initStreamFillInfo(SStreamFillSupporter* pFillSup, SSDataBlock* for (int32_t i = 0; i < pFillSup->numOfAllCols; i++) { SColumnInfoData* pColData = taosArrayGet(pRes->pDataBlock, i); - SPoint value = {0}; - value.val = taosMemoryCalloc(1, pColData->info.bytes); - if (!value.val) { - code = terrno; - QUERY_CHECK_CODE(code, lino, _end); + if (pColData == NULL) { + SPoint dummy = {0}; + dummy.val = taosMemoryCalloc(1, 1); + void* tmpRes = taosArrayPush(pFillInfo->pLinearInfo->pEndPoints, &dummy); + QUERY_CHECK_NULL(tmpRes, code, lino, _end, terrno); + + dummy.val = taosMemoryCalloc(1, 1); + tmpRes = taosArrayPush(pFillInfo->pLinearInfo->pNextEndPoints, &dummy); + QUERY_CHECK_NULL(tmpRes, code, lino, _end, terrno); + + continue; } + SPoint value = {0}; + value.val = taosMemoryCalloc(1, pColData->info.bytes); + QUERY_CHECK_NULL(value.val, code, lino, _end, terrno); void* tmpRes = taosArrayPush(pFillInfo->pLinearInfo->pEndPoints, &value); - if (!tmpRes) { - code = terrno; - QUERY_CHECK_CODE(code, lino, _end); - } + QUERY_CHECK_NULL(tmpRes, code, lino, _end, terrno); value.val = taosMemoryCalloc(1, pColData->info.bytes); - if (!value.val) { - code = terrno; - QUERY_CHECK_CODE(code, lino, _end); - } + QUERY_CHECK_NULL(value.val, code, lino, _end, terrno); tmpRes = taosArrayPush(pFillInfo->pLinearInfo->pNextEndPoints, &value); - if (!tmpRes) { - code = terrno; - QUERY_CHECK_CODE(code, lino, _end); - } + QUERY_CHECK_NULL(tmpRes, code, lino, _end, terrno); } } pFillInfo->pLinearInfo->winIndex = 0; + pFillInfo->pNonFillRow = NULL; pFillInfo->pResRow = NULL; if (pFillSup->type == TSDB_FILL_SET_VALUE || pFillSup->type == TSDB_FILL_SET_VALUE_F || pFillSup->type == TSDB_FILL_NULL || pFillSup->type == TSDB_FILL_NULL_F) { pFillInfo->pResRow = taosMemoryCalloc(1, sizeof(SResultRowData)); - if (!pFillInfo->pResRow) { - code = terrno; - QUERY_CHECK_CODE(code, lino, _end); - } + QUERY_CHECK_NULL(pFillInfo->pResRow, code, lino, _end, terrno); pFillInfo->pResRow->key = INT64_MIN; pFillInfo->pResRow->pRowVal = taosMemoryCalloc(1, pFillSup->rowSize); @@ -1324,9 +1581,20 @@ SStreamFillInfo* initStreamFillInfo(SStreamFillSupporter* pFillSup, SSDataBlock* for (int32_t i = 0; i < pFillSup->numOfAllCols; ++i) { SColumnInfoData* pColData = taosArrayGet(pRes->pDataBlock, i); SResultCellData* pCell = getResultCell(pFillInfo->pResRow, i); + if (pColData == NULL) { + pCell->bytes = 1; + pCell->type = 4; + continue; + } pCell->bytes = pColData->info.bytes; pCell->type = pColData->info.type; } + + pFillInfo->pNonFillRow = taosMemoryCalloc(1, sizeof(SResultRowData)); + QUERY_CHECK_NULL(pFillInfo->pNonFillRow, code, lino, _end, terrno); + pFillInfo->pNonFillRow->key = INT64_MIN; + pFillInfo->pNonFillRow->pRowVal = taosMemoryCalloc(1, pFillSup->rowSize); + memcpy(pFillInfo->pNonFillRow->pRowVal, pFillInfo->pResRow->pRowVal, pFillSup->rowSize); } pFillInfo->type = pFillSup->type; @@ -1338,6 +1606,7 @@ SStreamFillInfo* initStreamFillInfo(SStreamFillSupporter* pFillSup, SSDataBlock* pFillInfo->delIndex = 0; pFillInfo->curGroupId = 0; + pFillInfo->hasNext = false; return pFillInfo; _end: @@ -1348,8 +1617,68 @@ SStreamFillInfo* initStreamFillInfo(SStreamFillSupporter* pFillSup, SSDataBlock* return NULL; } +static void setValueForFillInfo(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo) { + if (pFillInfo->type == TSDB_FILL_SET_VALUE || pFillInfo->type == TSDB_FILL_SET_VALUE_F) { + for (int32_t i = 0; i < pFillSup->numOfAllCols; ++i) { + SFillColInfo* pFillCol = pFillSup->pAllColInfo + i; + int32_t slotId = GET_DEST_SLOT_ID(pFillCol); + SResultCellData* pCell = getResultCell(pFillInfo->pResRow, slotId); + SVariant* pVar = &(pFillCol->fillVal); + if (pCell->type == TSDB_DATA_TYPE_FLOAT) { + float v = 0; + GET_TYPED_DATA(v, float, pVar->nType, &pVar->i); + SET_TYPED_DATA(pCell->pData, pCell->type, v); + } else if (IS_FLOAT_TYPE(pCell->type)) { + double v = 0; + GET_TYPED_DATA(v, double, pVar->nType, &pVar->i); + SET_TYPED_DATA(pCell->pData, pCell->type, v); + } else if (IS_INTEGER_TYPE(pCell->type)) { + int64_t v = 0; + GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i); + SET_TYPED_DATA(pCell->pData, pCell->type, v); + } else { + pCell->isNull = true; + } + } + } else if (pFillInfo->type == TSDB_FILL_NULL || pFillInfo->type == TSDB_FILL_NULL_F) { + for (int32_t i = 0; i < pFillSup->numOfAllCols; ++i) { + SFillColInfo* pFillCol = pFillSup->pAllColInfo + i; + int32_t slotId = GET_DEST_SLOT_ID(pFillCol); + SResultCellData* pCell = getResultCell(pFillInfo->pResRow, slotId); + pCell->isNull = true; + } + } +} + +int32_t getDownStreamInfo(SOperatorInfo* downstream, int8_t* triggerType, SInterval* pInterval, SStreamAggSupporter** ppAggSup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + if (IS_NORMAL_INTERVAL_OP(downstream)) { + SStreamIntervalOperatorInfo* pInfo = downstream->info; + *triggerType = pInfo->twAggSup.calTrigger; + *pInterval = pInfo->interval; + (*ppAggSup) = NULL; + } else if (IS_CONTINUE_INTERVAL_OP(downstream)) { + SStreamIntervalSliceOperatorInfo* pInfo = downstream->info; + *triggerType = pInfo->twAggSup.calTrigger; + *pInterval = pInfo->interval; + pInfo->hasFill = true; + (*ppAggSup) = &pInfo->streamAggSup; + pInfo->streamAggSup.stateStore.streamStateSetFillInfo(pInfo->streamAggSup.pState); + } else { + code = TSDB_CODE_STREAM_INTERNAL_ERROR; + } + QUERY_CHECK_CODE(code, lino, _end); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysiNode* pPhyFillNode, - SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo) { + SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo) { QRY_PARAM_CHECK(pOptrInfo); int32_t code = TSDB_CODE_SUCCESS; @@ -1361,7 +1690,6 @@ int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysi QUERY_CHECK_CODE(code, lino, _error); } - SInterval* pInterval = &((SStreamIntervalOperatorInfo*)downstream->info)->interval; int32_t numOfFillCols = 0; SExprInfo* pFillExprInfo = NULL; @@ -1374,7 +1702,12 @@ int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysi pInfo->pSrcBlock = createDataBlockFromDescNode(pPhyFillNode->node.pOutputDataBlockDesc); QUERY_CHECK_NULL(pInfo->pSrcBlock, code, lino, _error, terrno); - pInfo->pFillSup = initStreamFillSup(pPhyFillNode, pInterval, pFillExprInfo, numOfFillCols, &pTaskInfo->storageAPI, + int8_t triggerType = 0; + SInterval interval = {0}; + code = getDownStreamInfo(downstream, &triggerType, &interval, &pInfo->pStreamAggSup); + QUERY_CHECK_CODE(code, lino, _error); + + pInfo->pFillSup = initStreamFillSup(pPhyFillNode, &interval, pFillExprInfo, numOfFillCols, &pTaskInfo->storageAPI, pInfo->pSrcBlock); if (!pInfo->pFillSup) { code = TSDB_CODE_FAILED; @@ -1396,36 +1729,7 @@ int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysi goto _error; } - if (pInfo->pFillInfo->type == TSDB_FILL_SET_VALUE || pInfo->pFillInfo->type == TSDB_FILL_SET_VALUE_F) { - for (int32_t i = 0; i < pInfo->pFillSup->numOfAllCols; ++i) { - SFillColInfo* pFillCol = pInfo->pFillSup->pAllColInfo + i; - int32_t slotId = GET_DEST_SLOT_ID(pFillCol); - SResultCellData* pCell = getResultCell(pInfo->pFillInfo->pResRow, slotId); - SVariant* pVar = &(pFillCol->fillVal); - if (pCell->type == TSDB_DATA_TYPE_FLOAT) { - float v = 0; - GET_TYPED_DATA(v, float, pVar->nType, &pVar->i); - SET_TYPED_DATA(pCell->pData, pCell->type, v); - } else if (IS_FLOAT_TYPE(pCell->type)) { - double v = 0; - GET_TYPED_DATA(v, double, pVar->nType, &pVar->i); - SET_TYPED_DATA(pCell->pData, pCell->type, v); - } else if (IS_INTEGER_TYPE(pCell->type)) { - int64_t v = 0; - GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i); - SET_TYPED_DATA(pCell->pData, pCell->type, v); - } else { - pCell->isNull = true; - } - } - } else if (pInfo->pFillInfo->type == TSDB_FILL_NULL || pInfo->pFillInfo->type == TSDB_FILL_NULL_F) { - for (int32_t i = 0; i < pInfo->pFillSup->numOfAllCols; ++i) { - SFillColInfo* pFillCol = pInfo->pFillSup->pAllColInfo + i; - int32_t slotId = GET_DEST_SLOT_ID(pFillCol); - SResultCellData* pCell = getResultCell(pInfo->pFillInfo->pResRow, slotId); - pCell->isNull = true; - } - } + setValueForFillInfo(pInfo->pFillSup, pInfo->pFillInfo); code = createSpecialDataBlock(STREAM_DELETE_RESULT, &pInfo->pDelRes); QUERY_CHECK_CODE(code, lino, _error); @@ -1433,6 +1737,12 @@ int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysi code = blockDataEnsureCapacity(pInfo->pDelRes, pOperator->resultInfo.capacity); QUERY_CHECK_CODE(code, lino, _error); + pInfo->pUpdated = taosArrayInit(1024, sizeof(SWinKey)); + QUERY_CHECK_NULL(pInfo->pUpdated, code, lino, _error, terrno); + + pInfo->pCloseTs = taosArrayInit(1024, sizeof(TSKEY)); + QUERY_CHECK_NULL(pInfo->pCloseTs, code, lino, _error, terrno); + pInfo->primaryTsCol = ((STargetNode*)pPhyFillNode->pWStartTs)->slotId; pInfo->primarySrcSlotId = ((SColumnNode*)((STargetNode*)pPhyFillNode->pWStartTs)->pExpr)->slotId; @@ -1447,8 +1757,14 @@ int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysi pInfo->srcRowIndex = -1; setOperatorInfo(pOperator, "StreamFillOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL, false, OP_NOT_OPENED, pInfo, pTaskInfo); - pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamFillNext, NULL, destroyStreamFillOperatorInfo, - optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); + + if (triggerType == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamForceFillNext, NULL, destroyStreamFillOperatorInfo, + optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); + } else { + pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamFillNext, NULL, destroyStreamFillOperatorInfo, + optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); + } setOperatorStreamStateFn(pOperator, streamOpReleaseState, streamOpReloadState); code = appendDownstream(pOperator, &downstream, 1); diff --git a/source/libs/executor/src/streamintervalsliceoperator.c b/source/libs/executor/src/streamintervalsliceoperator.c new file mode 100644 index 00000000000..bc35b58a99d --- /dev/null +++ b/source/libs/executor/src/streamintervalsliceoperator.c @@ -0,0 +1,643 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#include "executorInt.h" +#include "functionMgt.h" +#include "operator.h" +#include "querytask.h" +#include "storageapi.h" +#include "streamexecutorInt.h" +#include "tcommon.h" +#include "tcompare.h" +#include "tdatablock.h" +#include "ttime.h" + +#define STREAM_INTERVAL_SLICE_OP_CHECKPOINT_NAME "StreamIntervalSliceOperator_Checkpoint" + +typedef struct SInervalSlicePoint { + SSessionKey winKey; + bool *pFinished; + SSliceRowData* pLastRow; + SRowBuffPos* pResPos; +} SInervalSlicePoint; + +typedef enum SIntervalSliceType { + INTERVAL_SLICE_START = 1, + INTERVAL_SLICE_END = 2, +} SIntervalSliceType; + +void streamIntervalSliceReleaseState(SOperatorInfo* pOperator) { +} + +void streamIntervalSliceReloadState(SOperatorInfo* pOperator) { +} + +void destroyStreamIntervalSliceOperatorInfo(void* param) { + SStreamIntervalSliceOperatorInfo* pInfo = (SStreamIntervalSliceOperatorInfo*)param; + if (param == NULL) { + return; + } + cleanupBasicInfo(&pInfo->binfo); + if (pInfo->pOperator) { + cleanupResultInfoInStream(pInfo->pOperator->pTaskInfo, pInfo->streamAggSup.pState, &pInfo->pOperator->exprSupp, + &pInfo->groupResInfo); + pInfo->pOperator = NULL; + } + + clearGroupResInfo(&pInfo->groupResInfo); + taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); + pInfo->pUpdated = NULL; + + if (pInfo->pUpdatedMap != NULL) { + tSimpleHashSetFreeFp(pInfo->pUpdatedMap, destroyFlusedppPos); + tSimpleHashCleanup(pInfo->pUpdatedMap); + pInfo->pUpdatedMap = NULL; + } + destroyStreamAggSupporter(&pInfo->streamAggSup); + + colDataDestroy(&pInfo->twAggSup.timeWindowData); + cleanupExprSupp(&pInfo->scalarSup); + + tSimpleHashCleanup(pInfo->pDeletedMap); + taosArrayDestroy(pInfo->pDelWins); + blockDataDestroy(pInfo->pDelRes); + + blockDataDestroy(pInfo->pCheckpointRes); + + taosMemoryFreeClear(param); +} + +static int32_t buildIntervalSliceResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamIntervalSliceOperatorInfo* pInfo = pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + uint16_t opType = pOperator->operatorType; + SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; + + doBuildDeleteResultImpl(&pInfo->streamAggSup.stateStore, pInfo->streamAggSup.pState, pInfo->pDelWins, &pInfo->delIndex, + pInfo->pDelRes); + if (pInfo->pDelRes->info.rows != 0) { + // process the rest of the data + printDataBlock(pInfo->pDelRes, getStreamOpName(opType), GET_TASKID(pTaskInfo)); + (*ppRes) = pInfo->pDelRes; + return code; + } + + doBuildStreamIntervalResult(pOperator, pInfo->streamAggSup.pState, pInfo->binfo.pRes, &pInfo->groupResInfo); + if (pInfo->binfo.pRes->info.rows != 0) { + printDataBlock(pInfo->binfo.pRes, getStreamOpName(opType), GET_TASKID(pTaskInfo)); + (*ppRes) = pInfo->binfo.pRes; + goto _end; + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +// static void doStreamIntervalSliceSaveCheckpoint(SOperatorInfo* pOperator) { +// } + +void initIntervalSlicePoint(SStreamAggSupporter* pAggSup, STimeWindow* pTWin, int64_t groupId, SInervalSlicePoint* pPoint) { + pPoint->winKey.groupId = groupId; + pPoint->winKey.win = *pTWin; + pPoint->pFinished = POINTER_SHIFT(pPoint->pResPos->pRowBuff, pAggSup->resultRowSize - pAggSup->stateKeySize); + pPoint->pLastRow = POINTER_SHIFT(pPoint->pFinished, sizeof(bool)); +} + +static int32_t getIntervalSliceCurStateBuf(SStreamAggSupporter* pAggSup, SInterval* pInterval, bool needPrev, STimeWindow* pTWin, int64_t groupId, + SInervalSlicePoint* pCurPoint, SInervalSlicePoint* pPrevPoint, int32_t* pWinCode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SWinKey curKey = {.ts = pTWin->skey, .groupId = groupId}; + int32_t curVLen = 0; + code = pAggSup->stateStore.streamStateAddIfNotExist(pAggSup->pState, &curKey, (void**)&pCurPoint->pResPos, + &curVLen, pWinCode); + QUERY_CHECK_CODE(code, lino, _end); + + qDebug("===stream=== set stream twa cur point buf.ts:%" PRId64 ", groupId:%" PRIu64 ", res:%d", + curKey.ts, curKey.groupId, *pWinCode); + + initIntervalSlicePoint(pAggSup, pTWin, groupId, pCurPoint); + + if (needPrev) { + SWinKey prevKey = {.groupId = groupId}; + SET_WIN_KEY_INVALID(prevKey.ts); + int32_t prevVLen = 0; + int32_t prevWinCode = TSDB_CODE_SUCCESS; + code = pAggSup->stateStore.streamStateGetPrev(pAggSup->pState, &curKey, &prevKey, (void**)&pPrevPoint->pResPos, + &prevVLen, &prevWinCode); + QUERY_CHECK_CODE(code, lino, _end); + + if (prevWinCode == TSDB_CODE_SUCCESS) { + STimeWindow prevSTW = {.skey = prevKey.ts}; + prevSTW.ekey = taosTimeGetIntervalEnd(prevSTW.skey, pInterval); + initIntervalSlicePoint(pAggSup, &prevSTW, groupId, pPrevPoint); + qDebug("===stream=== set stream twa prev point buf.ts:%" PRId64 ", groupId:%" PRIu64 ", res:%d", pPrevPoint->winKey.win.skey, + pPrevPoint->winKey.groupId, prevWinCode); + } else { + SET_WIN_KEY_INVALID(pPrevPoint->winKey.win.skey); + SET_WIN_KEY_INVALID(pPrevPoint->winKey.win.ekey); + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +void doStreamSliceInterpolation(SSliceRowData* pPrevWinVal, TSKEY winKey, TSKEY curTs, SSDataBlock* pDataBlock, + int32_t curRowIndex, SExprSupp* pSup, SIntervalSliceType type) { + SqlFunctionCtx* pCtx = pSup->pCtx; + for (int32_t k = 0; k < pSup->numOfExprs; ++k) { + if (!fmIsIntervalInterpoFunc(pCtx[k].functionId)) { + pCtx[k].start.key = INT64_MIN; + continue; + } + + SFunctParam* pParam = &pCtx[k].param[0]; + SColumnInfoData* pColInfo = taosArrayGet(pDataBlock->pDataBlock, pParam->pCol->slotId); + + double prevVal = 0, curVal = 0, winVal = 0; + SResultCellData* pCell = getSliceResultCell((SResultCellData*)pPrevWinVal->pRowVal, pParam->pCol->slotId); + GET_TYPED_DATA(prevVal, double, pCell->type, pCell->pData); + GET_TYPED_DATA(curVal, double, pColInfo->info.type, colDataGetData(pColInfo, curRowIndex)); + + SPoint point1 = (SPoint){.key = pPrevWinVal->key, .val = &prevVal}; + SPoint point2 = (SPoint){.key = curTs, .val = &curVal}; + SPoint point = (SPoint){.key = winKey, .val = &winVal}; + + if (!fmIsElapsedFunc(pCtx[k].functionId)) { + taosGetLinearInterpolationVal(&point, TSDB_DATA_TYPE_DOUBLE, &point1, &point2, TSDB_DATA_TYPE_DOUBLE); + } + + if (type == INTERVAL_SLICE_START) { + pCtx[k].start.key = point.key; + pCtx[k].start.val = winVal; + } else { + pCtx[k].end.key = point.key; + pCtx[k].end.val = winVal; + } + } +} + +void doSetElapsedEndKey(TSKEY winKey, SExprSupp* pSup) { + SqlFunctionCtx* pCtx = pSup->pCtx; + for (int32_t k = 0; k < pSup->numOfExprs; ++k) { + if (fmIsElapsedFunc(pCtx[k].functionId)) { + pCtx[k].end.key = winKey; + pCtx[k].end.val = 0; + } + } +} + +static void resetIntervalSliceFunctionKey(SqlFunctionCtx* pCtx, int32_t numOfOutput) { + for (int32_t k = 0; k < numOfOutput; ++k) { + pCtx[k].start.key = INT64_MIN; + pCtx[k].end.key = INT64_MIN; + } +} + +int32_t setIntervalSliceOutputBuf(SInervalSlicePoint* pPoint, SqlFunctionCtx* pCtx, int32_t numOfOutput, + int32_t* rowEntryInfoOffset) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SResultRow* res = pPoint->pResPos->pRowBuff; + + // set time window for current result + res->win = pPoint->winKey.win; + code = setResultRowInitCtx(res, pCtx, numOfOutput, rowEntryInfoOffset); + QUERY_CHECK_CODE(code, lino, _end); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static void setInterpoWindowFinished(SInervalSlicePoint* pPoint) { + (*pPoint->pFinished) = true; +} + +static bool isInterpoWindowFinished(SInervalSlicePoint* pPoint) { + return *pPoint->pFinished; +} + +static int32_t doStreamIntervalSliceAggImpl(SOperatorInfo* pOperator, SSDataBlock* pBlock, SSHashObj* pUpdatedMap, + SSHashObj* pDeletedMap) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamIntervalSliceOperatorInfo* pInfo = (SStreamIntervalSliceOperatorInfo*)pOperator->info; + SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo); + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SExprSupp* pSup = &pOperator->exprSupp; + int32_t numOfOutput = pSup->numOfExprs; + TSKEY* tsCols = NULL; + int64_t groupId = pBlock->info.id.groupId; + SResultRow* pResult = NULL; + int32_t forwardRows = 0; + + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex); + tsCols = (int64_t*)pColDataInfo->pData; + + int32_t startPos = 0; + TSKEY curTs = getStartTsKey(&pBlock->info.window, tsCols); + SInervalSlicePoint curPoint = {0}; + SInervalSlicePoint prevPoint = {0}; + STimeWindow curWin = + getActiveTimeWindow(NULL, pResultRowInfo, curTs, &pInfo->interval, TSDB_ORDER_ASC); + while (1) { + if (curTs > pInfo->endTs) { + break; + } + + int32_t winCode = TSDB_CODE_SUCCESS; + code = getIntervalSliceCurStateBuf(&pInfo->streamAggSup, &pInfo->interval, pInfo->hasInterpoFunc, &curWin, groupId, &curPoint, &prevPoint, &winCode); + QUERY_CHECK_CODE(code, lino, _end); + + if (pInfo->hasInterpoFunc && IS_VALID_WIN_KEY(prevPoint.winKey.win.skey) && isInterpoWindowFinished(&prevPoint) == false) { + code = setIntervalSliceOutputBuf(&prevPoint, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset); + QUERY_CHECK_CODE(code, lino, _end); + + resetIntervalSliceFunctionKey(pSup->pCtx, numOfOutput); + doSetElapsedEndKey(prevPoint.winKey.win.ekey, &pOperator->exprSupp); + doStreamSliceInterpolation(prevPoint.pLastRow, prevPoint.winKey.win.ekey, curTs, pBlock, startPos, &pOperator->exprSupp, INTERVAL_SLICE_END); + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &prevPoint.winKey.win, 1); + code = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, + 0, pBlock->info.rows, numOfOutput); + QUERY_CHECK_CODE(code, lino, _end); + SWinKey prevKey = {.ts = prevPoint.winKey.win.skey, .groupId = prevPoint.winKey.groupId}; + code = saveWinResult(&prevKey, prevPoint.pResPos, pInfo->pUpdatedMap); + QUERY_CHECK_CODE(code, lino, _end); + setInterpoWindowFinished(&prevPoint); + } + + code = setIntervalSliceOutputBuf(&curPoint, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset); + QUERY_CHECK_CODE(code, lino, _end); + + resetIntervalSliceFunctionKey(pSup->pCtx, numOfOutput); + if (pInfo->hasInterpoFunc && IS_VALID_WIN_KEY(prevPoint.winKey.win.skey) && curPoint.winKey.win.skey != curTs) { + doStreamSliceInterpolation(prevPoint.pLastRow, curPoint.winKey.win.skey, curTs, pBlock, startPos, &pOperator->exprSupp, INTERVAL_SLICE_START); + } + forwardRows = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, curWin.ekey, binarySearchForKey, NULL, + TSDB_ORDER_ASC); + int32_t prevEndPos = (forwardRows - 1) + startPos; + if (pInfo->hasInterpoFunc && winCode != TSDB_CODE_SUCCESS) { + int32_t endRowId = getQualifiedRowNumDesc(pSup, pBlock, tsCols, prevEndPos, false); + TSKEY endRowTs = tsCols[endRowId]; + transBlockToSliceResultRow(pBlock, endRowId, endRowTs, curPoint.pLastRow, 0, NULL, NULL); + } + SWinKey curKey = {.ts = curPoint.winKey.win.skey, .groupId = curPoint.winKey.groupId}; + if (pInfo->destHasPrimaryKey && winCode == TSDB_CODE_SUCCESS) { + code = tSimpleHashPut(pDeletedMap, &curKey, sizeof(SWinKey), NULL, 0); + QUERY_CHECK_CODE(code, lino, _end); + } + + code = saveWinResult(&curKey, curPoint.pResPos, pInfo->pUpdatedMap); + QUERY_CHECK_CODE(code, lino, _end); + + updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &curPoint.winKey.win, 1); + code = applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, + forwardRows, pBlock->info.rows, numOfOutput); + QUERY_CHECK_CODE(code, lino, _end); + + if (curPoint.pLastRow->key == curPoint.winKey.win.ekey) { + setInterpoWindowFinished(&curPoint); + } + + startPos = getNextQualifiedWindow(&pInfo->interval, &curWin, &pBlock->info, tsCols, prevEndPos, TSDB_ORDER_ASC); + if (startPos < 0) { + break; + } + curTs = tsCols[startPos]; + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s. task:%s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo)); + } + return code; +} + +static int32_t doStreamIntervalSliceNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamIntervalSliceOperatorInfo* pInfo = pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; + + qDebug("stask:%s %s status: %d", GET_TASKID(pTaskInfo), getStreamOpName(pOperator->operatorType), pOperator->status); + + if (pOperator->status == OP_EXEC_DONE) { + (*ppRes) = NULL; + goto _end; + } + + if (pOperator->status == OP_RES_TO_RETURN) { + SSDataBlock* resBlock = NULL; + code = buildIntervalSliceResult(pOperator, &resBlock); + QUERY_CHECK_CODE(code, lino, _end); + if (resBlock != NULL) { + (*ppRes) = resBlock; + return code; + } + + if (pInfo->hasFill == false) { + pAggSup->stateStore.streamStateClearExpiredState(pAggSup->pState); + } + setStreamOperatorCompleted(pOperator); + (*ppRes) = NULL; + return code; + } + + SOperatorInfo* downstream = pOperator->pDownstream[0]; + int32_t numOfDatapack = 0; + + while (1) { + SSDataBlock* pBlock = NULL; + code = downstream->fpSet.getNextFn(downstream, &pBlock); + QUERY_CHECK_CODE(code, lino, _end); + + if (pBlock == NULL) { + pOperator->status = OP_RES_TO_RETURN; + break; + } + + switch (pBlock->info.type) { + case STREAM_NORMAL: + case STREAM_INVALID: { + SExprSupp* pExprSup = &pInfo->scalarSup; + if (pExprSup->pExprInfo != NULL) { + code = projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL); + QUERY_CHECK_CODE(code, lino, _end); + } + } break; + case STREAM_CHECKPOINT: { + pInfo->recvCkBlock = true; + pAggSup->stateStore.streamStateCommit(pAggSup->pState); + // doStreamIntervalSliceSaveCheckpoint(pOperator); + pInfo->recvCkBlock = true; + code = copyDataBlock(pInfo->pCheckpointRes, pBlock); + QUERY_CHECK_CODE(code, lino, _end); + continue; + } break; + case STREAM_CREATE_CHILD_TABLE: { + (*ppRes) = pBlock; + goto _end; + } break; + case STREAM_GET_RESULT: { + pInfo->endTs = taosTimeGetIntervalEnd(pBlock->info.window.skey, &pInfo->interval); + if (pInfo->hasFill) { + (*ppRes) = pBlock; + goto _end; + } else { + continue; + } + } + default: + code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; + QUERY_CHECK_CODE(code, lino, _end); + } + + code = setInputDataBlock(&pOperator->exprSupp, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true); + QUERY_CHECK_CODE(code, lino, _end); + code = doStreamIntervalSliceAggImpl(pOperator, pBlock, pInfo->pUpdatedMap, NULL); + QUERY_CHECK_CODE(code, lino, _end); + + } + + if (!pInfo->destHasPrimaryKey) { + removeDeleteResults(pInfo->pUpdatedMap, pInfo->pDelWins); + } + + if (pInfo->destHasPrimaryKey) { + code = copyIntervalDeleteKey(pInfo->pDeletedMap, pInfo->pDelWins); + QUERY_CHECK_CODE(code, lino, _end); + } + + code = copyUpdateResult(&pInfo->pUpdatedMap, pInfo->pUpdated, winPosCmprImpl); + QUERY_CHECK_CODE(code, lino, _end); + + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pUpdatedMap = tSimpleHashInit(1024, hashFn); + QUERY_CHECK_NULL(pInfo->pUpdatedMap, code, lino, _end, terrno); + + initMultiResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated); + pInfo->pUpdated = taosArrayInit(1024, POINTER_BYTES); + QUERY_CHECK_NULL(pInfo->pUpdated, code, lino, _end, terrno); + + code = blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); + QUERY_CHECK_CODE(code, lino, _end); + + (*ppRes) = NULL; + code = buildIntervalSliceResult(pOperator, ppRes); + QUERY_CHECK_CODE(code, lino, _end); + + if ((*ppRes) == NULL) { + if (pInfo->hasFill == false) { + pAggSup->stateStore.streamStateClearExpiredState(pAggSup->pState); + } + setStreamOperatorCompleted(pOperator); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t initIntervalSliceDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, uint16_t type, + int32_t tsColIndex, STimeWindowAggSupp* pTwSup, struct SSteamOpBasicInfo* pBasic, + SInterval* pInterval, bool hasInterpoFunc) { + SExecTaskInfo* pTaskInfo = downstream->pTaskInfo; + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION) { + SStreamPartitionOperatorInfo* pPartionInfo = downstream->info; + pPartionInfo->tsColIndex = tsColIndex; + pBasic->primaryPkIndex = pPartionInfo->basic.primaryPkIndex; + } + + if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { + code = + initIntervalSliceDownStream(downstream->pDownstream[0], pAggSup, type, tsColIndex, pTwSup, pBasic, pInterval, hasInterpoFunc); + return code; + } + SStreamScanInfo* pScanInfo = downstream->info; + pScanInfo->useGetResultRange = hasInterpoFunc; + pScanInfo->igCheckUpdate = true; + pScanInfo->windowSup = (SWindowSupporter){.pStreamAggSup = pAggSup, .gap = pAggSup->gap, .parentType = type}; + pScanInfo->pState = pAggSup->pState; + if (!pScanInfo->pUpdateInfo) { + code = pAggSup->stateStore.updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, pTwSup->waterMark, + pScanInfo->igCheckUpdate, pScanInfo->pkColType, pScanInfo->pkColLen, + &pScanInfo->pUpdateInfo); + QUERY_CHECK_CODE(code, lino, _end); + } + pScanInfo->twAggSup = *pTwSup; + pScanInfo->interval = *pInterval; + pAggSup->pUpdateInfo = pScanInfo->pUpdateInfo; + if (!hasSrcPrimaryKeyCol(pBasic)) { + pBasic->primaryPkIndex = pScanInfo->basic.primaryPkIndex; + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s. task:%s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo)); + } + return code; +} + +static bool windowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols) { + bool needed = false; + for (int32_t i = 0; i < numOfCols; ++i) { + SExprInfo* pExpr = pCtx[i].pExpr; + if (fmIsIntervalInterpoFunc(pCtx[i].functionId)) { + needed = true; + break; + } + } + return needed; +} + +int32_t createStreamIntervalSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, + SReadHandle* pHandle, SOperatorInfo** ppOptInfo) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamIntervalSliceOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamIntervalSliceOperatorInfo)); + QUERY_CHECK_NULL(pInfo, code, lino, _error, terrno); + + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + QUERY_CHECK_NULL(pOperator, code, lino, _error, terrno) + + pInfo->pUpdated = taosArrayInit(1024, POINTER_BYTES); + QUERY_CHECK_NULL(pInfo->pUpdated, code, lino, _error, terrno); + + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pUpdatedMap = tSimpleHashInit(1024, hashFn); + QUERY_CHECK_NULL(pInfo->pUpdatedMap, code, lino, _error, terrno); + + pInfo->pDeletedMap = tSimpleHashInit(1024, hashFn); + QUERY_CHECK_NULL(pInfo->pDeletedMap, code, lino, _error, terrno); + + pInfo->delIndex = 0; + pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey)); + QUERY_CHECK_NULL(pInfo->pDelWins, code, lino, _error, terrno); + + code = createSpecialDataBlock(STREAM_DELETE_RESULT, &pInfo->pDelRes); + QUERY_CHECK_CODE(code, lino, _error); + + SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); + QUERY_CHECK_NULL(pResBlock, code, lino, _error, terrno); + initBasicInfo(&pInfo->binfo, pResBlock); + + code = createSpecialDataBlock(STREAM_CHECKPOINT, &pInfo->pCheckpointRes); + QUERY_CHECK_CODE(code, lino, _error); + pInfo->recvCkBlock = false; + + SStreamIntervalPhysiNode* pIntervalPhyNode = (SStreamIntervalPhysiNode*)pPhyNode; + pOperator->pTaskInfo = pTaskInfo; + initResultSizeInfo(&pOperator->resultInfo, 4096); + SExprSupp* pExpSup = &pOperator->exprSupp; + int32_t numOfExprs = 0; + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &pExprInfo, &numOfExprs); + QUERY_CHECK_CODE(code, lino, _error); + + code = initExprSupp(pExpSup, pExprInfo, numOfExprs, &pTaskInfo->storageAPI.functionStore); + QUERY_CHECK_CODE(code, lino, _error); + + pInfo->interval = (SInterval){.interval = pIntervalPhyNode->interval, + .sliding = pIntervalPhyNode->sliding, + .intervalUnit = pIntervalPhyNode->intervalUnit, + .slidingUnit = pIntervalPhyNode->slidingUnit, + .offset = pIntervalPhyNode->offset, + .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision}; + + pInfo->twAggSup = + (STimeWindowAggSupp){.waterMark = pIntervalPhyNode->window.watermark, + .calTrigger = pIntervalPhyNode->window.triggerType, + .maxTs = INT64_MIN, + .minTs = INT64_MAX, + .deleteMark = getDeleteMark(&pIntervalPhyNode->window, pIntervalPhyNode->interval)}; + code = initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); + QUERY_CHECK_CODE(code, lino, _error); + + if (pIntervalPhyNode->window.pExprs != NULL) { + int32_t numOfScalar = 0; + SExprInfo* pScalarExprInfo = NULL; + code = createExprInfo(pIntervalPhyNode->window.pExprs, NULL, &pScalarExprInfo, &numOfScalar); + QUERY_CHECK_CODE(code, lino, _error); + + code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore); + QUERY_CHECK_CODE(code, lino, _error); + } + + SSDataBlock* pDownRes = NULL; + SColumnInfo* pPkCol = NULL; + code = getDownstreamRes(downstream, &pDownRes, &pPkCol); + QUERY_CHECK_CODE(code, lino, _error); + + int32_t keyBytes = sizeof(TSKEY); + keyBytes += blockDataGetRowSize(pDownRes) + sizeof(SResultCellData) * taosArrayGetSize(pDownRes->pDataBlock) + sizeof(bool); + if (pPkCol) { + keyBytes += pPkCol->bytes; + } + code = initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfExprs, 0, pTaskInfo->streamInfo.pState, keyBytes, 0, + &pTaskInfo->storageAPI.stateStore, pHandle, &pInfo->twAggSup, GET_TASKID(pTaskInfo), + &pTaskInfo->storageAPI, pInfo->primaryTsIndex, STREAM_STATE_BUFF_HASH_SEARCH, 1); + + pInfo->destHasPrimaryKey = pIntervalPhyNode->window.destHasPrimaryKey; + pInfo->pOperator = pOperator; + pInfo->hasFill = false; + pInfo->hasInterpoFunc = windowinterpNeeded(pExpSup->pCtx, numOfExprs); + + setOperatorInfo(pOperator, "StreamIntervalSliceOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL, true, OP_NOT_OPENED, + pInfo, pTaskInfo); + pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamIntervalSliceNext, NULL, destroyStreamIntervalSliceOperatorInfo, + optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); + setOperatorStreamStateFn(pOperator, streamIntervalSliceReleaseState, streamIntervalSliceReloadState); + + initStreamBasicInfo(&pInfo->basic); + if (downstream) { + code = initIntervalSliceDownStream(downstream, &pInfo->streamAggSup, pPhyNode->type, pInfo->primaryTsIndex, + &pInfo->twAggSup, &pInfo->basic, &pInfo->interval, pInfo->hasInterpoFunc); + QUERY_CHECK_CODE(code, lino, _error); + + code = appendDownstream(pOperator, &downstream, 1); + QUERY_CHECK_CODE(code, lino, _error); + } + + (*ppOptInfo) = pOperator; + return code; + +_error: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (pInfo != NULL) { + destroyStreamIntervalSliceOperatorInfo(pInfo); + } + destroyOperatorAndDownstreams(pOperator, &downstream, 1); + pTaskInfo->code = code; + (*ppOptInfo) = NULL; + return code; +} diff --git a/source/libs/executor/src/streamtimesliceoperator.c b/source/libs/executor/src/streamtimesliceoperator.c new file mode 100644 index 00000000000..b120bb63743 --- /dev/null +++ b/source/libs/executor/src/streamtimesliceoperator.c @@ -0,0 +1,2187 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#include "executorInt.h" +#include "filter.h" +#include "function.h" +#include "functionMgt.h" +#include "operator.h" +#include "querytask.h" +#include "storageapi.h" +#include "streamexecutorInt.h" +#include "tchecksum.h" +#include "tcommon.h" +#include "tcompare.h" +#include "tdatablock.h" +#include "tfill.h" +#include "ttime.h" + +#define STREAM_TIME_SLICE_OP_STATE_NAME "StreamTimeSliceHistoryState" +#define STREAM_TIME_SLICE_OP_CHECKPOINT_NAME "StreamTimeSliceOperator_Checkpoint" + + +int32_t saveTimeSliceWinResult(SWinKey* pKey, SSHashObj* pUpdatedMap) { + return tSimpleHashPut(pUpdatedMap, pKey, sizeof(SWinKey), NULL, 0); +} + +void streamTimeSliceReleaseState(SOperatorInfo* pOperator) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamTimeSliceOperatorInfo* pInfo = pOperator->info; + int32_t winNum = taosArrayGetSize(pInfo->historyWins); + + int32_t winSize = winNum * sizeof(SWinKey); + int32_t resSize = winSize + sizeof(TSKEY); + char* pBuff = taosMemoryCalloc(1, resSize); + QUERY_CHECK_NULL(pBuff, code, lino, _end, terrno); + + if (winNum > 0) { + memcpy(pBuff, pInfo->historyWins->pData, winSize); + } + memcpy(pBuff + winSize, &pInfo->twAggSup.maxTs, sizeof(TSKEY)); + qDebug("===stream=== time slice operator relase state. save result count:%d", winNum); + pInfo->streamAggSup.stateStore.streamStateSaveInfo(pInfo->streamAggSup.pState, STREAM_TIME_SLICE_OP_STATE_NAME, + strlen(STREAM_TIME_SLICE_OP_STATE_NAME), pBuff, resSize); + pInfo->streamAggSup.stateStore.streamStateCommit(pInfo->streamAggSup.pState); + taosMemoryFreeClear(pBuff); + + SOperatorInfo* downstream = pOperator->pDownstream[0]; + if (downstream->fpSet.releaseStreamStateFn) { + downstream->fpSet.releaseStreamStateFn(downstream); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } +} + +void streamTimeSliceReloadState(SOperatorInfo* pOperator) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamTimeSliceOperatorInfo* pInfo = pOperator->info; + SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStreamFillSupporter* pFillSup = pInfo->pFillSup; + resetWinRange(&pAggSup->winRange); + + int32_t size = 0; + void* pBuf = NULL; + code = pAggSup->stateStore.streamStateGetInfo(pAggSup->pState, STREAM_TIME_SLICE_OP_STATE_NAME, + strlen(STREAM_TIME_SLICE_OP_STATE_NAME), &pBuf, &size); + QUERY_CHECK_CODE(code, lino, _end); + + int32_t num = (size - sizeof(TSKEY)) / sizeof(SWinKey); + qDebug("===stream=== time slice operator reload state. get result count:%d", num); + SWinKey* pKeyBuf = (SWinKey*)pBuf; + QUERY_CHECK_CONDITION((size == num * sizeof(SWinKey) + sizeof(TSKEY)), code, lino, _end, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); + + TSKEY ts = *(TSKEY*)((char*)pBuf + size - sizeof(TSKEY)); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, ts); + pAggSup->stateStore.streamStateReloadInfo(pAggSup->pState, ts); + qDebug("===stream=== reload state. reload ts:%" PRId64, ts); + + if (!pInfo->pUpdatedMap && num > 0) { + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pUpdatedMap = tSimpleHashInit(64, hashFn); + QUERY_CHECK_NULL(pInfo->pUpdatedMap, code, lino, _end, terrno); + } + + int32_t tmpRes = TSDB_CODE_SUCCESS; + for (int32_t i = 0; i < num; i++) { + SWinKey* pKey = pKeyBuf + i; + SWinKey resKey = {.groupId = pKey->groupId}; + if (pFillSup->type != TSDB_FILL_PREV && pFillSup->type != TSDB_FILL_LINEAR) { + code = pAggSup->stateStore.streamStateFillGetNext(pAggSup->pState, pKey, &resKey, NULL, NULL, &tmpRes); + QUERY_CHECK_CODE(code, lino, _end); + + if (tmpRes != TSDB_CODE_SUCCESS) { + continue; + } + } else { + resKey = *pKey; + } + qDebug("===stream=== reload state. try process result %" PRId64 ", %" PRIu64 ", index:%d", resKey.ts, + resKey.groupId, i); + code = saveTimeSliceWinResult(&resKey, pInfo->pUpdatedMap); + QUERY_CHECK_CODE(code, lino, _end); + } + taosMemoryFree(pBuf); + + SOperatorInfo* downstream = pOperator->pDownstream[0]; + if (downstream->fpSet.reloadStreamStateFn) { + downstream->fpSet.reloadStreamStateFn(downstream); + } + reloadAggSupFromDownStream(downstream, &pInfo->streamAggSup); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s. task:%s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo)); + } +} + +static void resetFillWindow(SResultRowData* pRowData) { + pRowData->key = INT64_MIN; + pRowData->pRowVal = NULL; +} + +static void resetPrevAndNextWindow(SStreamFillSupporter* pFillSup) { + resetFillWindow(&pFillSup->cur); + resetFillWindow(&pFillSup->prev); + resetFillWindow(&pFillSup->next); + resetFillWindow(&pFillSup->nextNext); +} + +void destroyStreamTimeSliceOperatorInfo(void* param) { + SStreamTimeSliceOperatorInfo* pInfo = (SStreamTimeSliceOperatorInfo*)param; + if (pInfo->pOperator) { + cleanupResultInfoInStream(pInfo->pOperator->pTaskInfo, pInfo->streamAggSup.pState, &pInfo->pOperator->exprSupp, + &pInfo->groupResInfo); + pInfo->pOperator = NULL; + } + colDataDestroy(&pInfo->twAggSup.timeWindowData); + destroyStreamAggSupporter(&pInfo->streamAggSup); + resetPrevAndNextWindow(pInfo->pFillSup); + destroyStreamFillSupporter(pInfo->pFillSup); + destroyStreamFillInfo(pInfo->pFillInfo); + blockDataDestroy(pInfo->pRes); + blockDataDestroy(pInfo->pDelRes); + blockDataDestroy(pInfo->pCheckpointRes); + + taosMemoryFreeClear(pInfo->leftRow.pRowVal); + taosMemoryFreeClear(pInfo->valueRow.pRowVal); + taosMemoryFreeClear(pInfo->rightRow.pRowVal); + + cleanupExprSupp(&pInfo->scalarSup); + taosArrayDestroy(pInfo->historyPoints); + + taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); + pInfo->pUpdated = NULL; + + tSimpleHashCleanup(pInfo->pUpdatedMap); + pInfo->pUpdatedMap = NULL; + + taosArrayDestroy(pInfo->pDelWins); + tSimpleHashCleanup(pInfo->pDeletedMap); + clearGroupResInfo(&pInfo->groupResInfo); + + taosArrayDestroy(pInfo->historyWins); + + taosArrayDestroy(pInfo->pCloseTs); + + taosMemoryFreeClear(param); +} + +int32_t doStreamTimeSliceEncodeOpState(void** buf, int32_t len, SOperatorInfo* pOperator, int32_t* pLen) { + int32_t code = TSDB_CODE_SUCCESS; + SStreamTimeSliceOperatorInfo* pInfo = pOperator->info; + if (!pInfo) { + return TSDB_CODE_FAILED; + } + + void* pData = (buf == NULL) ? NULL : *buf; + + // 1.streamAggSup.pResultRows + int32_t tlen = 0; + int32_t mapSize = tSimpleHashGetSize(pInfo->streamAggSup.pResultRows); + tlen += taosEncodeFixedI32(buf, mapSize); + void* pIte = NULL; + size_t keyLen = 0; + int32_t iter = 0; + while ((pIte = tSimpleHashIterate(pInfo->streamAggSup.pResultRows, pIte, &iter)) != NULL) { + void* pKey = tSimpleHashGetKey(pIte, &keyLen); + tlen += encodeSSessionKey(buf, pKey); + tlen += encodeSResultWindowInfo(buf, pIte, pInfo->streamAggSup.resultRowSize); + } + + // 2.twAggSup + tlen += encodeSTimeWindowAggSupp(buf, &pInfo->twAggSup); + + // 3.checksum + if (buf) { + uint32_t cksum = taosCalcChecksum(0, pData, len - sizeof(uint32_t)); + tlen += taosEncodeFixedU32(buf, cksum); + } else { + tlen += sizeof(uint32_t); + } + + (*pLen) = tlen; + return code; +} + +int32_t doStreamTimeSliceDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOperator) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamTimeSliceOperatorInfo* pInfo = pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + if (!pInfo) { + code = TSDB_CODE_FAILED; + QUERY_CHECK_CODE(code, lino, _end); + } + SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; + + // 3.checksum + int32_t dataLen = len - sizeof(uint32_t); + void* pCksum = POINTER_SHIFT(buf, dataLen); + if (taosCheckChecksum(buf, dataLen, *(uint32_t*)pCksum) != TSDB_CODE_SUCCESS) { + qError("stream event state is invalid"); + code = TSDB_CODE_FAILED; + QUERY_CHECK_CODE(code, lino, _end); + } + + // 1.streamAggSup.pResultRows + int32_t mapSize = 0; + buf = taosDecodeFixedI32(buf, &mapSize); + for (int32_t i = 0; i < mapSize; i++) { + SResultWindowInfo winfo = {0}; + buf = decodeSSessionKey(buf, &winfo.sessionWin); + int32_t winCode = TSDB_CODE_SUCCESS; + code = pAggSup->stateStore.streamStateSessionAddIfNotExist( + pAggSup->pState, &winfo.sessionWin, pAggSup->gap, (void**)&winfo.pStatePos, &pAggSup->resultRowSize, &winCode); + QUERY_CHECK_CODE(code, lino, _end); + + buf = decodeSResultWindowInfo(buf, &winfo, pInfo->streamAggSup.resultRowSize); + code = tSimpleHashPut(pInfo->streamAggSup.pResultRows, &winfo.sessionWin, sizeof(SSessionKey), &winfo, + sizeof(SResultWindowInfo)); + QUERY_CHECK_CODE(code, lino, _end); + } + + // 2.twAggSup + buf = decodeSTimeWindowAggSupp(buf, &pInfo->twAggSup); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s. task:%s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo)); + } + return code; +} + +static int32_t initTimeSliceResultBuf(SStreamFillSupporter* pFillSup, SExprSupp* pExpSup) { + pFillSup->rowSize = sizeof(TSKEY) + getResultRowSize(pExpSup->pCtx, pFillSup->numOfAllCols); + pFillSup->next.key = INT64_MIN; + pFillSup->nextNext.key = INT64_MIN; + pFillSup->prev.key = INT64_MIN; + pFillSup->cur.key = INT64_MIN; + pFillSup->next.pRowVal = NULL; + pFillSup->nextNext.pRowVal = NULL; + pFillSup->prev.pRowVal = NULL; + pFillSup->cur.pRowVal = NULL; + + return TSDB_CODE_SUCCESS; +} + +static int32_t initTimeSliceFillSup(SStreamInterpFuncPhysiNode* pPhyFillNode, SExprSupp* pExprSup, int32_t numOfExprs, SColumnInfo* pPkCol, + SStreamFillSupporter** ppResFillSup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamFillSupporter* pFillSup = taosMemoryCalloc(1, sizeof(SStreamFillSupporter)); + QUERY_CHECK_NULL(pFillSup, code, lino, _end, terrno); + + pFillSup->numOfFillCols = numOfExprs; + int32_t numOfNotFillCols = 0; + pFillSup->pAllColInfo = createFillColInfo(pExprSup->pExprInfo, pFillSup->numOfFillCols, NULL, numOfNotFillCols, NULL, 0, + (const SNodeListNode*)(pPhyFillNode->pFillValues)); + QUERY_CHECK_NULL(pFillSup->pAllColInfo, code, lino, _end, terrno); + + pFillSup->type = convertFillType(pPhyFillNode->fillMode); + pFillSup->numOfAllCols = pFillSup->numOfFillCols + numOfNotFillCols; + pFillSup->interval.interval = pPhyFillNode->interval; + pFillSup->interval.intervalUnit = pPhyFillNode->intervalUnit; + pFillSup->interval.offset = 0; + pFillSup->interval.offsetUnit = pPhyFillNode->intervalUnit; + pFillSup->interval.precision = pPhyFillNode->precision; + pFillSup->interval.sliding = pPhyFillNode->interval; + pFillSup->interval.slidingUnit = pPhyFillNode->intervalUnit; + pFillSup->pAPI = NULL; + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pFillSup->pResMap = tSimpleHashInit(16, hashFn); + QUERY_CHECK_NULL(pFillSup->pResMap, code, lino, _end, terrno); + + code = initTimeSliceResultBuf(pFillSup, pExprSup); + QUERY_CHECK_CODE(code, lino, _end); + + pFillSup->hasDelete = false; + if (pPkCol != NULL) { + pFillSup->pkColBytes = pPkCol->bytes; + pFillSup->comparePkColFn = getKeyComparFunc(pPkCol->type, TSDB_ORDER_ASC); + } else { + pFillSup->pkColBytes = 0; + pFillSup->comparePkColFn = NULL; + } + + (*ppResFillSup) = pFillSup; + +_end: + if (code != TSDB_CODE_SUCCESS) { + destroyStreamFillSupporter(pFillSup); + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static void doStreamTimeSliceSaveCheckpoint(SOperatorInfo* pOperator) { + SStreamTimeSliceOperatorInfo* pInfo = pOperator->info; + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + void* buf = NULL; + if (needSaveStreamOperatorInfo(&pInfo->basic)) { + int32_t len = 0; + code = doStreamTimeSliceEncodeOpState(NULL, 0, pOperator, &len); + QUERY_CHECK_CODE(code, lino, _end); + + buf = taosMemoryCalloc(1, len); + QUERY_CHECK_NULL(buf, code, lino, _end, terrno); + + void* pBuf = buf; + code = doStreamTimeSliceEncodeOpState(&pBuf, len, pOperator, &len); + QUERY_CHECK_CODE(code, lino, _end); + + pInfo->streamAggSup.stateStore.streamStateSaveInfo(pInfo->streamAggSup.pState, STREAM_TIME_SLICE_OP_CHECKPOINT_NAME, + strlen(STREAM_TIME_SLICE_OP_CHECKPOINT_NAME), buf, len); + saveStreamOperatorStateComplete(&pInfo->basic); + } + +_end: + taosMemoryFreeClear(buf); + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } +} + +SResultCellData* getSliceResultCell(SResultCellData* pRowVal, int32_t index) { + if (!pRowVal) { + return NULL; + } + char* pData = (char*)pRowVal; + SResultCellData* pCell = pRowVal; + for (int32_t i = 0; i < index; i++) { + pData += (pCell->bytes + sizeof(SResultCellData)); + pCell = (SResultCellData*)pData; + } + return pCell; +} + +static bool isGroupKeyFunc(SExprInfo* pExprInfo) { + int32_t functionType = pExprInfo->pExpr->_function.functionType; + return (functionType == FUNCTION_TYPE_GROUP_KEY); +} + +static bool isSelectGroupConstValueFunc(SExprInfo* pExprInfo) { + int32_t functionType = pExprInfo->pExpr->_function.functionType; + return (functionType == FUNCTION_TYPE_GROUP_CONST_VALUE); +} + +static int32_t fillPointResult(SStreamFillSupporter* pFillSup, SResultRowData* pResRow, SResultRowData* pNonFillRow, TSKEY ts, SSDataBlock* pBlock, + bool* pRes, bool isFilled) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + if (pBlock->info.rows >= pBlock->info.capacity) { + (*pRes) = false; + goto _end; + } + + bool ckRes = true; + code = checkResult(pFillSup, ts, pBlock->info.id.groupId, &ckRes); + QUERY_CHECK_CODE(code, lino, _end); + if (!ckRes) { + (*pRes) = true; + goto _end; + } + + for (int32_t i = 0; i < pFillSup->numOfAllCols; i++) { + SFillColInfo* pFillCol = pFillSup->pAllColInfo + i; + int32_t dstSlotId = GET_DEST_SLOT_ID(pFillCol); + SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId); + + if (isIrowtsPseudoColumn(pFillCol->pExpr)) { + code = colDataSetVal(pDstCol, pBlock->info.rows, (char*)&ts, false); + QUERY_CHECK_CODE(code, lino, _end); + } else if (isIsfilledPseudoColumn(pFillCol->pExpr)) { + code = colDataSetVal(pDstCol, pBlock->info.rows, (char*)&isFilled, false); + QUERY_CHECK_CODE(code, lino, _end); + } else { + int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId; + SResultCellData* pCell = NULL; + if (IS_FILL_CONST_VALUE(pFillSup->type) && (isGroupKeyFunc(pFillCol->pExpr) || isSelectGroupConstValueFunc(pFillCol->pExpr)) ) { + pCell = getSliceResultCell(pNonFillRow->pRowVal, srcSlot); + } else { + pCell = getSliceResultCell(pResRow->pRowVal, srcSlot); + } + code = setRowCell(pDstCol, pBlock->info.rows, pCell); + QUERY_CHECK_CODE(code, lino, _end); + } + } + + pBlock->info.rows++; + (*pRes) = true; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static void fillNormalRange(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo, SSDataBlock* pBlock) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + while (hasRemainCalc(pFillInfo) && pBlock->info.rows < pBlock->info.capacity) { + STimeWindow st = {.skey = pFillInfo->current, .ekey = pFillInfo->current}; + // if (inWinRange(&pFillSup->winRange, &st)) { + bool res = true; + code = fillPointResult(pFillSup, pFillInfo->pResRow, pFillInfo->pNonFillRow, pFillInfo->current, pBlock, &res, true); + QUERY_CHECK_CODE(code, lino, _end); + // } + pFillInfo->current = taosTimeAdd(pFillInfo->current, pFillSup->interval.sliding, pFillSup->interval.slidingUnit, + pFillSup->interval.precision); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } +} + +static void fillLinearRange(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo, SSDataBlock* pBlock) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + while (hasRemainCalc(pFillInfo) && pBlock->info.rows < pBlock->info.capacity) { + bool ckRes = true; + code = checkResult(pFillSup, pFillInfo->current, pBlock->info.id.groupId, &ckRes); + QUERY_CHECK_CODE(code, lino, _end); + for (int32_t i = 0; i < pFillSup->numOfAllCols && ckRes; ++i) { + SFillColInfo* pFillCol = pFillSup->pAllColInfo + i; + int32_t dstSlotId = GET_DEST_SLOT_ID(pFillCol); + SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId); + int16_t type = pDstCol->info.type; + int32_t index = pBlock->info.rows; + if (isIrowtsPseudoColumn(pFillCol->pExpr)) { + code = colDataSetVal(pDstCol, pBlock->info.rows, (char*)&pFillInfo->current, false); + QUERY_CHECK_CODE(code, lino, _end); + } else if (isIsfilledPseudoColumn(pFillCol->pExpr)) { + bool isFilled = true; + code = colDataSetVal(pDstCol, pBlock->info.rows, (char*)&isFilled, false); + QUERY_CHECK_CODE(code, lino, _end); + } else if (isInterpFunc(pFillCol->pExpr)) { + int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId; + SResultCellData* pCell = getSliceResultCell(pFillInfo->pResRow->pRowVal, srcSlot); + if (IS_VAR_DATA_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || pCell->isNull) { + colDataSetNULL(pDstCol, index); + continue; + } + SPoint* pEnd = taosArrayGet(pFillInfo->pLinearInfo->pEndPoints, srcSlot); + double vCell = 0; + SPoint start = {0}; + start.key = pFillInfo->pResRow->key; + start.val = pCell->pData; + + SPoint cur = {0}; + cur.key = pFillInfo->current; + cur.val = taosMemoryCalloc(1, pCell->bytes); + QUERY_CHECK_NULL(cur.val, code, lino, _end, terrno); + + taosGetLinearInterpolationVal(&cur, pCell->type, &start, pEnd, pCell->type); + code = colDataSetVal(pDstCol, index, (const char*)cur.val, false); + QUERY_CHECK_CODE(code, lino, _end); + + destroySPoint(&cur); + } else { + int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId; + SResultCellData* pCell = getSliceResultCell(pFillInfo->pResRow->pRowVal, srcSlot); + code = setRowCell(pDstCol, pBlock->info.rows, pCell); + QUERY_CHECK_CODE(code, lino, _end); + } + } + pFillInfo->current = taosTimeAdd(pFillInfo->current, pFillSup->interval.sliding, pFillSup->interval.slidingUnit, + pFillSup->interval.precision); + if (ckRes) { + pBlock->info.rows++; + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } +} + +static void setFillKeyInfo(TSKEY start, TSKEY end, SInterval* pInterval, SStreamFillInfo* pFillInfo) { + pFillInfo->start = start; + pFillInfo->current = pFillInfo->start; + pFillInfo->end = end; +} + +static TSKEY adustPrevTsKey(TSKEY pointTs, TSKEY rowTs, SInterval* pInterval) { + if (rowTs >= pointTs) { + pointTs = taosTimeAdd(pointTs, pInterval->sliding, pInterval->slidingUnit, pInterval->precision); + } + return pointTs; +} + +static TSKEY adustEndTsKey(TSKEY pointTs, TSKEY rowTs, SInterval* pInterval) { + if (rowTs <= pointTs) { + pointTs = taosTimeAdd(pointTs, pInterval->sliding * -1, pInterval->slidingUnit, pInterval->precision); + } + return pointTs; +} + +static void adjustFillResRow(SResultRowData** ppResRow, SStreamFillSupporter* pFillSup) { + if (pFillSup->type == TSDB_FILL_PREV) { + (*ppResRow) = &pFillSup->cur; + } else if (pFillSup->type == TSDB_FILL_NEXT){ + (*ppResRow) = &pFillSup->next; + } +} + +static void doStreamFillRange(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo, SSDataBlock* pRes) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + bool res = true; + if (pFillInfo->needFill == false && pFillInfo->pos != FILL_POS_INVALID) { + code = fillPointResult(pFillSup, &pFillSup->cur, pFillInfo->pNonFillRow, pFillSup->cur.key, pRes, &res, false); + QUERY_CHECK_CODE(code, lino, _end); + return; + } + + if (pFillInfo->pos == FILL_POS_START) { + code = fillPointResult(pFillSup, &pFillSup->cur, pFillInfo->pNonFillRow, pFillSup->cur.key, pRes, &res, false); + QUERY_CHECK_CODE(code, lino, _end); + if (res) { + pFillInfo->pos = FILL_POS_INVALID; + } + } + if (pFillInfo->type != TSDB_FILL_LINEAR) { + fillNormalRange(pFillSup, pFillInfo, pRes); + + if (pFillInfo->pos == FILL_POS_MID) { + code = fillPointResult(pFillSup, &pFillSup->cur, pFillInfo->pNonFillRow, pFillSup->cur.key, pRes, &res, false); + QUERY_CHECK_CODE(code, lino, _end); + if (res) { + pFillInfo->pos = FILL_POS_INVALID; + } + } + if (pFillInfo->current > pFillInfo->end && pFillInfo->hasNext) { + pFillInfo->hasNext = false; + TSKEY startTs = adustPrevTsKey(pFillInfo->current, pFillSup->cur.key, &pFillSup->interval); + setFillKeyInfo(startTs, pFillSup->next.key, &pFillSup->interval, pFillInfo); + adjustFillResRow(&pFillInfo->pResRow, pFillSup); + fillNormalRange(pFillSup, pFillInfo, pRes); + } + + } else { + fillLinearRange(pFillSup, pFillInfo, pRes); + + if (pFillInfo->pos == FILL_POS_MID) { + code = fillPointResult(pFillSup, &pFillSup->cur, pFillInfo->pNonFillRow, pFillSup->cur.key, pRes, &res, false); + QUERY_CHECK_CODE(code, lino, _end); + if (res) { + pFillInfo->pos = FILL_POS_INVALID; + } + } + + if (pFillInfo->current > pFillInfo->end && pFillInfo->pLinearInfo->hasNext) { + pFillInfo->pLinearInfo->hasNext = false; + taosArraySwap(pFillInfo->pLinearInfo->pEndPoints, pFillInfo->pLinearInfo->pNextEndPoints); + pFillInfo->pResRow = &pFillSup->cur; + setFillKeyInfo(pFillSup->cur.key, pFillInfo->pLinearInfo->nextEnd, &pFillSup->interval, pFillInfo); + fillLinearRange(pFillSup, pFillInfo, pRes); + } + } + if (pFillInfo->pos == FILL_POS_END) { + code = fillPointResult(pFillSup, &pFillSup->cur, pFillInfo->pNonFillRow, pFillSup->cur.key, pRes, &res, false); + QUERY_CHECK_CODE(code, lino, _end); + if (res) { + pFillInfo->pos = FILL_POS_INVALID; + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } +} + +static int32_t getQualifiedRowNumAsc(SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId, bool ignoreNull) { + if (rowId >= pBlock->info.rows) { + return -1; + } + + if (!ignoreNull) { + return rowId; + } + + for (int32_t i = rowId; i < pBlock->info.rows; i++) { + if (!checkNullRow(pExprSup, pBlock, i, ignoreNull)) { + return i; + } + } + return -1; +} + +int32_t getQualifiedRowNumDesc(SExprSupp* pExprSup, SSDataBlock* pBlock, TSKEY* tsCols, int32_t rowId, + bool ignoreNull) { + TSKEY ts = tsCols[rowId]; + int32_t resRow = -1; + for (; rowId >= 0; rowId--) { + if (checkNullRow(pExprSup, pBlock, rowId, ignoreNull)) { + continue; + } + + if (ts != tsCols[rowId]) { + if (resRow >= 0) { + break; + } else { + ts = tsCols[rowId]; + } + } + resRow = rowId; + } + return resRow; +} + +static void setResultRowData(SSliceRowData** ppRowData, void* pBuff) { (*ppRowData) = (SSliceRowData*)pBuff; } + +void setPointBuff(SSlicePoint* pPoint, SStreamFillSupporter* pFillSup) { + if (pFillSup->type != TSDB_FILL_LINEAR) { + setResultRowData(&pPoint->pRightRow, pPoint->pResPos->pRowBuff); + pPoint->pLeftRow = pPoint->pRightRow; + } else { + setResultRowData(&pPoint->pLeftRow, pPoint->pResPos->pRowBuff); + void* pBuff = POINTER_SHIFT(pPoint->pResPos->pRowBuff, pFillSup->rowSize + pFillSup->pkColBytes); + setResultRowData(&pPoint->pRightRow, pBuff); + } +} + +static int32_t getLinearResultInfoFromState(SStreamAggSupporter* pAggSup, SStreamFillSupporter* pFillSup, TSKEY ts, + int64_t groupId, SSlicePoint* pCurPoint, SSlicePoint* pPrevPoint, + SSlicePoint* pNextPoint) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t tmpRes = TSDB_CODE_SUCCESS; + void* pState = pAggSup->pState; + resetPrevAndNextWindow(pFillSup); + pCurPoint->pResPos = NULL; + pPrevPoint->pResPos = NULL; + pNextPoint->pResPos = NULL; + + pCurPoint->key.groupId = groupId; + pCurPoint->key.ts = ts; + int32_t curVLen = 0; + code = + pAggSup->stateStore.streamStateFillGet(pState, &pCurPoint->key, (void**)&pCurPoint->pResPos, &curVLen, &tmpRes); + QUERY_CHECK_CODE(code, lino, _end); + + setPointBuff(pCurPoint, pFillSup); + + if (HAS_ROW_DATA(pCurPoint->pRightRow)) { + pFillSup->cur.key = pCurPoint->pRightRow->key; + pFillSup->cur.pRowVal = (SResultCellData*)pCurPoint->pRightRow->pRowVal; + if (HAS_NON_ROW_DATA(pCurPoint->pLeftRow)) { + pPrevPoint->key.groupId = groupId; + int32_t preVLen = 0; + code = pAggSup->stateStore.streamStateFillGetPrev(pState, &pCurPoint->key, &pPrevPoint->key, + (void**)&pPrevPoint->pResPos, &preVLen, &tmpRes); + QUERY_CHECK_CODE(code, lino, _end); + if (tmpRes == TSDB_CODE_SUCCESS) { + QUERY_CHECK_CONDITION(!IS_INVALID_WIN_KEY(pPrevPoint->key.ts), code, lino, _end, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); + setPointBuff(pPrevPoint, pFillSup); + if (HAS_ROW_DATA(pPrevPoint->pRightRow)) { + pFillSup->prev.key = pPrevPoint->pRightRow->key; + pFillSup->prev.pRowVal = (SResultCellData*)pPrevPoint->pRightRow->pRowVal; + } else { + pFillSup->prev.key = pPrevPoint->pLeftRow->key; + pFillSup->prev.pRowVal = (SResultCellData*)pPrevPoint->pLeftRow->pRowVal; + } + pFillSup->prevOriginKey = pFillSup->prev.key; + pFillSup->prev.key = adustPrevTsKey(pPrevPoint->key.ts, pFillSup->prev.key, &pFillSup->interval); + } + goto _end; + } + } + + if (HAS_ROW_DATA(pCurPoint->pLeftRow)) { + pFillSup->prev.key = pCurPoint->pLeftRow->key; + pFillSup->prev.pRowVal = (SResultCellData*)pCurPoint->pLeftRow->pRowVal; + pFillSup->prevOriginKey = pFillSup->prev.key; + pFillSup->prev.key = adustPrevTsKey(pCurPoint->key.ts, pFillSup->prev.key, &pFillSup->interval); + if (HAS_NON_ROW_DATA(pCurPoint->pRightRow)) { + pNextPoint->key.groupId = groupId; + int32_t nextVLen = 0; + code = pAggSup->stateStore.streamStateFillGetNext(pState, &pCurPoint->key, &pNextPoint->key, + (void**)&pNextPoint->pResPos, &nextVLen, &tmpRes); + QUERY_CHECK_CODE(code, lino, _end); + if (tmpRes == TSDB_CODE_SUCCESS) { + QUERY_CHECK_CONDITION(!IS_INVALID_WIN_KEY(pNextPoint->key.ts), code, lino, _end, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); + setPointBuff(pNextPoint, pFillSup); + if (HAS_ROW_DATA(pNextPoint->pLeftRow)) { + pFillSup->next.key = pNextPoint->pLeftRow->key; + pFillSup->next.pRowVal = (SResultCellData*)pNextPoint->pLeftRow->pRowVal; + } else { + pFillSup->next.key = pNextPoint->pRightRow->key; + pFillSup->next.pRowVal = (SResultCellData*)pNextPoint->pRightRow->pRowVal; + } + pFillSup->nextOriginKey = pFillSup->next.key; + pFillSup->next.key = adustEndTsKey(pNextPoint->key.ts, pFillSup->next.key, &pFillSup->interval); + } else { + resetFillWindow(&pFillSup->prev); + } + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t getResultInfoFromState(SStreamAggSupporter* pAggSup, SStreamFillSupporter* pFillSup, TSKEY ts, + int64_t groupId, SSlicePoint* pCurPoint, SSlicePoint* pPrevPoint, + SSlicePoint* pNextPoint) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t tmpRes = TSDB_CODE_SUCCESS; + void* pState = pAggSup->pState; + resetPrevAndNextWindow(pFillSup); + pCurPoint->pResPos = NULL; + pPrevPoint->pResPos = NULL; + pNextPoint->pResPos = NULL; + + pCurPoint->key.groupId = groupId; + pCurPoint->key.ts = ts; + int32_t curVLen = 0; + code = + pAggSup->stateStore.streamStateFillGet(pState, &pCurPoint->key, (void**)&pCurPoint->pResPos, &curVLen, &tmpRes); + QUERY_CHECK_CODE(code, lino, _end); + + if (tmpRes == TSDB_CODE_SUCCESS) { + setPointBuff(pCurPoint, pFillSup); + pFillSup->cur.key = pCurPoint->pRightRow->key; + pFillSup->cur.pRowVal = (SResultCellData*)pCurPoint->pRightRow->pRowVal; + } else { + pFillSup->cur.key = pCurPoint->key.ts + 1; + } + + pPrevPoint->key.groupId = groupId; + int32_t preVLen = 0; + code = pAggSup->stateStore.streamStateFillGetPrev(pState, &pCurPoint->key, &pPrevPoint->key, + (void**)&pPrevPoint->pResPos, &preVLen, &tmpRes); + QUERY_CHECK_CODE(code, lino, _end); + qDebug("===stream=== set stream interp resutl prev buf.ts:%" PRId64 ", groupId:%" PRId64 ", res:%d", pPrevPoint->key.ts, pPrevPoint->key.groupId, tmpRes); + + if (tmpRes == TSDB_CODE_SUCCESS) { + QUERY_CHECK_CONDITION(!IS_INVALID_WIN_KEY(pPrevPoint->key.ts), code, lino, _end, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); + setPointBuff(pPrevPoint, pFillSup); + if (HAS_ROW_DATA(pPrevPoint->pRightRow)) { + pFillSup->prev.key = pPrevPoint->pRightRow->key; + pFillSup->prev.pRowVal = (SResultCellData*)pPrevPoint->pRightRow->pRowVal; + } else { + pFillSup->prev.key = pPrevPoint->pLeftRow->key; + pFillSup->prev.pRowVal = (SResultCellData*)pPrevPoint->pLeftRow->pRowVal; + } + pFillSup->prev.key = adustPrevTsKey(pPrevPoint->key.ts, pFillSup->prev.key, &pFillSup->interval); + } + + pNextPoint->key.groupId = groupId; + int32_t nextVLen = 0; + code = pAggSup->stateStore.streamStateFillGetNext(pState, &pCurPoint->key, &pNextPoint->key, + (void**)&pNextPoint->pResPos, &nextVLen, &tmpRes); + QUERY_CHECK_CODE(code, lino, _end); + qDebug("===stream=== set stream interp resutl next buf.ts:%" PRId64 ", groupId:%" PRId64 ", res:%d", pNextPoint->key.ts, pNextPoint->key.groupId, tmpRes); + if (tmpRes == TSDB_CODE_SUCCESS) { + QUERY_CHECK_CONDITION(!IS_INVALID_WIN_KEY(pNextPoint->key.ts), code, lino, _end, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); + setPointBuff(pNextPoint, pFillSup); + if (HAS_ROW_DATA(pNextPoint->pLeftRow)) { + pFillSup->next.key = pNextPoint->pLeftRow->key; + pFillSup->next.pRowVal = (SResultCellData*)pNextPoint->pLeftRow->pRowVal; + } else { + pFillSup->next.key = pNextPoint->pRightRow->key; + pFillSup->next.pRowVal = (SResultCellData*)pNextPoint->pRightRow->pRowVal; + } + pFillSup->next.key = adustEndTsKey(pNextPoint->key.ts, pFillSup->next.key, &pFillSup->interval); + + if (pFillSup->type == TSDB_FILL_PREV) { + int32_t nextNextVLen = 0; + int32_t tmpWinCode = TSDB_CODE_SUCCESS; + SSlicePoint nextNextPoint = {.key.groupId = pNextPoint->key.groupId}; + code = pAggSup->stateStore.streamStateFillGetNext(pState, &pNextPoint->key, &nextNextPoint.key, NULL, NULL, + &tmpWinCode); + QUERY_CHECK_CODE(code, lino, _end); + if (tmpWinCode == TSDB_CODE_SUCCESS) { + pFillSup->nextNext.key = nextNextPoint.key.ts; + } + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t getPointInfoFromStateRight(SStreamAggSupporter* pAggSup, SStreamFillSupporter* pFillSup, TSKEY ts, + int64_t groupId, SSlicePoint* pCurPoint, SSlicePoint* pNextPoint, + int32_t* pWinCode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t tmpRes = TSDB_CODE_SUCCESS; + void* pState = pAggSup->pState; + pCurPoint->pResPos = NULL; + pNextPoint->pResPos = NULL; + + pNextPoint->key.groupId = groupId; + STimeWindow stw = {.skey = ts, .ekey = ts}; + getNextTimeWindow(&pFillSup->interval, &stw, TSDB_ORDER_ASC); + pNextPoint->key.ts = stw.skey; + + int32_t curVLen = 0; + code = pAggSup->stateStore.streamStateFillAddIfNotExist(pState, &pNextPoint->key, (void**)&pNextPoint->pResPos, + &curVLen, pWinCode); + QUERY_CHECK_CODE(code, lino, _end); + + qDebug("===stream=== set stream interp next point buf.ts:%" PRId64 ", groupId:%" PRId64 ", res:%d", + pNextPoint->key.ts, pNextPoint->key.groupId, *pWinCode); + + setPointBuff(pNextPoint, pFillSup); + + if (*pWinCode != TSDB_CODE_SUCCESS) { + if (pNextPoint->pLeftRow) { + SET_WIN_KEY_INVALID(pNextPoint->pLeftRow->key); + } + if (pNextPoint->pRightRow) { + SET_WIN_KEY_INVALID(pNextPoint->pRightRow->key); + } + } + + SET_WIN_KEY_INVALID(pCurPoint->key.ts); + pCurPoint->key.groupId = groupId; + int32_t nextVLen = 0; + code = pAggSup->stateStore.streamStateFillGetPrev(pState, &pNextPoint->key, &pCurPoint->key, + (void**)&pCurPoint->pResPos, &nextVLen, &tmpRes); + QUERY_CHECK_CODE(code, lino, _end); + + qDebug("===stream=== set stream interp cur point buf.ts:%" PRId64 ", groupId:%" PRId64 ", res:%d", pCurPoint->key.ts, pCurPoint->key.groupId, tmpRes); + + if (tmpRes == TSDB_CODE_SUCCESS) { + setPointBuff(pCurPoint, pFillSup); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t getPointInfoFromState(SStreamAggSupporter* pAggSup, SStreamFillSupporter* pFillSup, TSKEY ts, + int64_t groupId, SSlicePoint* pCurPoint, SSlicePoint* pNextPoint, + int32_t* pWinCode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t tmpRes = TSDB_CODE_SUCCESS; + void* pState = pAggSup->pState; + pCurPoint->pResPos = NULL; + pNextPoint->pResPos = NULL; + pCurPoint->key.groupId = groupId; + pCurPoint->key.ts = ts; + + int32_t curVLen = 0; + code = pAggSup->stateStore.streamStateFillAddIfNotExist(pState, &pCurPoint->key, (void**)&pCurPoint->pResPos, + &curVLen, pWinCode); + QUERY_CHECK_CODE(code, lino, _end); + + qDebug("===stream=== set stream interp buf.ts:%" PRId64 ", groupId:%" PRId64, pCurPoint->key.ts, pCurPoint->key.groupId); + + setPointBuff(pCurPoint, pFillSup); + + if (*pWinCode != TSDB_CODE_SUCCESS) { + if (pCurPoint->pLeftRow) { + SET_WIN_KEY_INVALID(pCurPoint->pLeftRow->key); + } + if (pCurPoint->pRightRow) { + SET_WIN_KEY_INVALID(pCurPoint->pRightRow->key); + } + } + + int32_t nextVLen = 0; + pNextPoint->key.groupId = groupId; + if (pFillSup->type != TSDB_FILL_LINEAR && pFillSup->type != TSDB_FILL_PREV) { + SET_WIN_KEY_INVALID(pNextPoint->key.ts); + code = pAggSup->stateStore.streamStateFillGetNext(pState, &pCurPoint->key, &pNextPoint->key, + (void**)&pNextPoint->pResPos, &nextVLen, &tmpRes); + QUERY_CHECK_CODE(code, lino, _end); + if (tmpRes == TSDB_CODE_SUCCESS) { + setPointBuff(pNextPoint, pFillSup); + } + } else { + pNextPoint->key.ts = taosTimeAdd(pCurPoint->key.ts, pFillSup->interval.sliding, pFillSup->interval.slidingUnit, + pFillSup->interval.precision); + code = pAggSup->stateStore.streamStateFillAddIfNotExist(pState, &pNextPoint->key, (void**)&pNextPoint->pResPos, + &nextVLen, &tmpRes); + QUERY_CHECK_CODE(code, lino, _end); + setPointBuff(pNextPoint, pFillSup); + if (tmpRes != TSDB_CODE_SUCCESS) { + SET_WIN_KEY_INVALID(pNextPoint->pLeftRow->key); + SET_WIN_KEY_INVALID(pNextPoint->pRightRow->key); + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +// partition key +static void copyNonFillValueInfo(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo) { + for (int32_t i = 0; i < pFillSup->numOfAllCols; ++i) { + SFillColInfo* pFillCol = pFillSup->pAllColInfo + i; + if (!isInterpFunc(pFillCol->pExpr) && !isIrowtsPseudoColumn(pFillCol->pExpr) && + !isIsfilledPseudoColumn(pFillCol->pExpr)) { + int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId; + SResultCellData* pSrcCell = getResultCell(&pFillSup->cur, srcSlot); + SResultCellData* pDestCell = getResultCell(pFillInfo->pNonFillRow, srcSlot); + pDestCell->isNull = pSrcCell->isNull; + if (!pDestCell->isNull) { + memcpy(pDestCell->pData, pSrcCell->pData, pSrcCell->bytes); + } + } + } +} + +static void copyCalcRowDeltaData(SResultRowData* pEndRow, SArray* pEndPoins, SFillColInfo* pFillCol, int32_t numOfCol) { + for (int32_t i = 0; i < numOfCol; i++) { + if (isInterpFunc(pFillCol[i].pExpr)) { + int32_t slotId = pFillCol[i].pExpr->base.pParam[0].pCol->slotId; + SResultCellData* pECell = getResultCell(pEndRow, slotId); + SPoint* pPoint = taosArrayGet(pEndPoins, slotId); + pPoint->key = pEndRow->key; + memcpy(pPoint->val, pECell->pData, pECell->bytes); + } + } +} + +static void setForceWindowCloseFillRule(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo, TSKEY ts) { + qDebug("===stream=== set force window close rule.ts:%" PRId64 ",cur key:%" PRId64 ", has prev%d, has next:%d", ts, + pFillSup->cur.key, hasPrevWindow(pFillSup), hasNextWindow(pFillSup)); + pFillInfo->needFill = true; + pFillInfo->pos = FILL_POS_INVALID; + switch (pFillInfo->type) { + case TSDB_FILL_NULL: + case TSDB_FILL_NULL_F: + case TSDB_FILL_SET_VALUE: + case TSDB_FILL_SET_VALUE_F: { + if (ts == pFillSup->cur.key) { + pFillInfo->pos = FILL_POS_START; + pFillInfo->needFill = false; + } else { + pFillInfo->pos = FILL_POS_INVALID; + setFillKeyInfo(ts, ts + 1, &pFillSup->interval, pFillInfo); + } + if (pFillSup->cur.pRowVal != NULL) { + copyNonFillValueInfo(pFillSup, pFillInfo); + } + } break; + case TSDB_FILL_PREV: { + if (ts == pFillSup->cur.key) { + pFillInfo->pos = FILL_POS_START; + pFillInfo->needFill = false; + } else if (ts > pFillSup->cur.key) { + setFillKeyInfo(ts, ts + 1, &pFillSup->interval, pFillInfo); + pFillInfo->pResRow = &pFillSup->cur; + } else if (hasPrevWindow(pFillSup)) { + pFillInfo->pos = FILL_POS_INVALID; + setFillKeyInfo(ts, ts + 1, &pFillSup->interval, pFillInfo); + pFillInfo->pResRow = &pFillSup->prev; + } else { + pFillInfo->needFill = false; + pFillInfo->pos = FILL_POS_INVALID; + } + } break; + default: + qError("%s failed at line %d since invalid fill type", __func__, __LINE__); + break; + } +} + +static void setTimeSliceFillRule(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo, TSKEY ts) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + if (IS_FILL_CONST_VALUE(pFillInfo->type)) { + copyNonFillValueInfo(pFillSup, pFillInfo); + } + if (!hasNextWindow(pFillSup) && !hasPrevWindow(pFillSup)) { + pFillInfo->needFill = false; + pFillInfo->pos = FILL_POS_START; + goto _end; + } + + TSKEY prevWKey = INT64_MIN; + TSKEY nextWKey = INT64_MIN; + if (hasPrevWindow(pFillSup)) { + prevWKey = pFillSup->prev.key; + } + if (hasNextWindow(pFillSup)) { + nextWKey = pFillSup->next.key; + } + TSKEY endTs = adustEndTsKey(ts, pFillSup->cur.key, &pFillSup->interval); + TSKEY startTs = adustPrevTsKey(ts, pFillSup->cur.key, &pFillSup->interval); + + pFillInfo->needFill = true; + pFillInfo->pos = FILL_POS_INVALID; + switch (pFillInfo->type) { + case TSDB_FILL_NULL: + case TSDB_FILL_NULL_F: + case TSDB_FILL_SET_VALUE: + case TSDB_FILL_SET_VALUE_F: { + if (hasPrevWindow(pFillSup) && hasNextWindow(pFillSup) && pFillInfo->preRowKey == pFillInfo->prePointKey && + pFillInfo->nextRowKey != pFillInfo->nextPointKey) { + setFillKeyInfo(prevWKey, endTs, &pFillSup->interval, pFillInfo); + pFillInfo->pos = FILL_POS_MID; + pFillInfo->hasNext = true; + } else if (hasPrevWindow(pFillSup)) { + setFillKeyInfo(prevWKey, endTs, &pFillSup->interval, pFillInfo); + pFillInfo->pos = FILL_POS_END; + } else { + setFillKeyInfo(startTs, nextWKey, &pFillSup->interval, pFillInfo); + pFillInfo->pos = FILL_POS_START; + } + // copyNonFillValueInfo(pFillSup, pFillInfo); + } break; + case TSDB_FILL_PREV: { + if (hasPrevWindow(pFillSup) && hasNextWindow(pFillSup) && pFillInfo->preRowKey != pFillInfo->prePointKey && + pFillInfo->nextRowKey == pFillInfo->nextPointKey) { + setFillKeyInfo(prevWKey, endTs, &pFillSup->interval, pFillInfo); + pFillInfo->pos = FILL_POS_MID; + pFillInfo->hasNext = true; + } else if (hasNextWindow(pFillSup)) { + setFillKeyInfo(startTs, nextWKey, &pFillSup->interval, pFillInfo); + pFillInfo->pos = FILL_POS_START; + resetFillWindow(&pFillSup->prev); + pFillSup->prev.key = ts; + pFillSup->prev.pRowVal = pFillSup->cur.pRowVal; + } else { + QUERY_CHECK_CONDITION(hasPrevWindow(pFillSup), code, lino, _end, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); + setFillKeyInfo(prevWKey, endTs, &pFillSup->interval, pFillInfo); + pFillInfo->pos = FILL_POS_END; + } + pFillInfo->pResRow = &pFillSup->prev; + } break; + case TSDB_FILL_NEXT: { + if (hasPrevWindow(pFillSup) && hasNextWindow(pFillSup) && pFillInfo->preRowKey == pFillInfo->prePointKey && + pFillInfo->nextRowKey != pFillInfo->nextPointKey) { + setFillKeyInfo(prevWKey, endTs, &pFillSup->interval, pFillInfo); + pFillInfo->pos = FILL_POS_MID; + pFillInfo->hasNext = true; + pFillInfo->pResRow = &pFillSup->cur; + } else if (hasPrevWindow(pFillSup)) { + setFillKeyInfo(prevWKey, endTs, &pFillSup->interval, pFillInfo); + pFillInfo->pos = FILL_POS_END; + resetFillWindow(&pFillSup->next); + pFillSup->next.key = ts; + pFillSup->next.pRowVal = pFillSup->cur.pRowVal; + pFillInfo->pResRow = &pFillSup->next; + } else { + setFillKeyInfo(startTs, nextWKey, &pFillSup->interval, pFillInfo); + pFillInfo->pos = FILL_POS_START; + resetFillWindow(&pFillSup->prev); + pFillInfo->pResRow = &pFillSup->next; + } + } break; + case TSDB_FILL_LINEAR: { + if (hasPrevWindow(pFillSup) && hasNextWindow(pFillSup)) { + setFillKeyInfo(prevWKey, nextWKey, &pFillSup->interval, pFillInfo); + pFillInfo->pos = FILL_POS_INVALID; + SET_WIN_KEY_INVALID(pFillInfo->pLinearInfo->nextEnd); + pFillSup->next.key = pFillSup->nextOriginKey; + copyCalcRowDeltaData(&pFillSup->next, pFillInfo->pLinearInfo->pEndPoints, pFillSup->pAllColInfo, + pFillSup->numOfAllCols); + pFillSup->prev.key = pFillSup->prevOriginKey; + pFillInfo->pResRow = &pFillSup->prev; + pFillInfo->pLinearInfo->hasNext = false; + } else if (hasPrevWindow(pFillSup)) { + setFillKeyInfo(prevWKey, endTs, &pFillSup->interval, pFillInfo); + pFillInfo->pos = FILL_POS_END; + SET_WIN_KEY_INVALID(pFillInfo->pLinearInfo->nextEnd); + copyCalcRowDeltaData(&pFillSup->cur, pFillInfo->pLinearInfo->pEndPoints, pFillSup->pAllColInfo, + pFillSup->numOfAllCols); + pFillSup->prev.key = pFillSup->prevOriginKey; + pFillInfo->pResRow = &pFillSup->prev; + pFillInfo->pLinearInfo->hasNext = false; + } else { + QUERY_CHECK_CONDITION(hasNextWindow(pFillSup), code, lino, _end, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); + setFillKeyInfo(startTs, nextWKey, &pFillSup->interval, pFillInfo); + pFillInfo->pos = FILL_POS_START; + SET_WIN_KEY_INVALID(pFillInfo->pLinearInfo->nextEnd); + pFillSup->next.key = pFillSup->nextOriginKey; + copyCalcRowDeltaData(&pFillSup->next, pFillInfo->pLinearInfo->pEndPoints, pFillSup->pAllColInfo, + pFillSup->numOfAllCols); + pFillInfo->pResRow = &pFillSup->cur; + pFillInfo->pLinearInfo->hasNext = false; + } + } break; + default: + qError("%s failed at line %d since invalid fill type", __func__, __LINE__); + break; + } + +_end: + if (ts != pFillSup->cur.key) { + pFillInfo->pos = FILL_POS_INVALID; + } + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } +} + +static int32_t comparePkVal(void* pLeft, void* pRight, SStreamFillSupporter* pFillSup) { + void* pTmpVal = POINTER_SHIFT(pLeft, pFillSup->rowSize); + return pFillSup->comparePkColFn(pTmpVal, pRight); +} + +static bool needAdjustValue(SSlicePoint* pPoint, TSKEY ts, void* pPkVal, SStreamFillSupporter* pFillSup, bool isLeft, + int32_t fillType) { + if (IS_INVALID_WIN_KEY(pPoint->key.ts)) { + return false; + } + + switch (fillType) { + case TSDB_FILL_NULL: + case TSDB_FILL_NULL_F: + case TSDB_FILL_SET_VALUE: + case TSDB_FILL_SET_VALUE_F: { + if (!isLeft) { + if (HAS_NON_ROW_DATA(pPoint->pRightRow)) { + return true; + } else { + if (pPoint->key.ts == ts) { + if (pFillSup->comparePkColFn == NULL || + comparePkVal(pPoint->pRightRow, pPkVal, pFillSup) >= 0) { + return true; + } + } + } + } + } break; + case TSDB_FILL_PREV: { + if (isLeft) { + if (HAS_NON_ROW_DATA(pPoint->pLeftRow)) { + return true; + } else { + if (pPoint->pLeftRow->key < ts) { + return true; + } else if (pPoint->pLeftRow->key == ts) { + if (pFillSup->comparePkColFn == NULL || comparePkVal(pPoint->pLeftRow, pPkVal, pFillSup) >= 0) { + return true; + } + } + } + } + + if (!isLeft && pPoint->key.ts == ts) { + if (HAS_NON_ROW_DATA(pPoint->pLeftRow) || pFillSup->comparePkColFn == NULL || + comparePkVal(pPoint->pLeftRow, pPkVal, pFillSup) >= 0) { + return true; + } + } + } break; + case TSDB_FILL_NEXT: { + if (!isLeft) { + if (HAS_NON_ROW_DATA(pPoint->pRightRow)) { + return true; + } else { + if (pPoint->pRightRow->key > ts) { + return true; + } else if (pPoint->pRightRow->key == ts) { + if (pFillSup->comparePkColFn == NULL || + comparePkVal(pPoint->pRightRow, pPkVal, pFillSup) >= 0) { + return true; + } + } + } + } + } break; + case TSDB_FILL_LINEAR: { + if (isLeft) { + if (HAS_NON_ROW_DATA(pPoint->pLeftRow)) { + return true; + } else { + if (pPoint->pLeftRow->key < ts) { + return true; + } else if (pPoint->pLeftRow->key == ts) { + if (pFillSup->comparePkColFn == NULL || comparePkVal(pPoint->pLeftRow, pPkVal, pFillSup) >= 0) { + return true; + } + } + } + } else { + if (HAS_NON_ROW_DATA(pPoint->pRightRow)) { + return true; + } else { + if (pPoint->pRightRow->key > ts) { + return true; + } else if (pPoint->pRightRow->key == ts) { + if (pFillSup->comparePkColFn == NULL || + comparePkVal(pPoint->pRightRow, pPkVal, pFillSup) >= 0) { + return true; + } + } + } + } + } break; + default: + qError("%s failed at line %d since invalid fill type", __func__, __LINE__); + } + return false; +} + +void transBlockToSliceResultRow(const SSDataBlock* pBlock, int32_t rowId, TSKEY ts, SSliceRowData* pRowVal, + int32_t rowSize, void* pPkData, SColumnInfoData* pPkCol) { + int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i); + SResultCellData* pCell = getSliceResultCell((SResultCellData*)pRowVal->pRowVal, i); + if (!colDataIsNull_s(pColData, rowId)) { + pCell->isNull = false; + pCell->type = pColData->info.type; + pCell->bytes = pColData->info.bytes; + char* val = colDataGetData(pColData, rowId); + if (IS_VAR_DATA_TYPE(pCell->type)) { + memcpy(pCell->pData, val, varDataTLen(val)); + } else { + memcpy(pCell->pData, val, pCell->bytes); + } + } else { + pCell->isNull = true; + } + } + pRowVal->key = ts; + if (pPkData != NULL) { + void* pPkVal = POINTER_SHIFT(pRowVal, rowSize); + if (IS_VAR_DATA_TYPE(pPkCol->info.type)) { + memcpy(pPkVal, pPkData, varDataTLen(pPkData)); + } else { + memcpy(pPkVal, pPkData, pPkCol->info.bytes); + } + } +} + +static int32_t saveTimeSliceWinResultInfo(SStreamAggSupporter* pAggSup, STimeWindowAggSupp* pTwAggSup, SWinKey* pKey, + SSHashObj* pUpdatedMap, bool needDel, SSHashObj* pDeletedMap) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + if (pTwAggSup->calTrigger == STREAM_TRIGGER_AT_ONCE) { + code = saveTimeSliceWinResult(pKey, pUpdatedMap); + QUERY_CHECK_CODE(code, lino, _end); + if (needDel) { + code = saveTimeSliceWinResult(pKey, pDeletedMap); + QUERY_CHECK_CODE(code, lino, _end); + } + } else if (pTwAggSup->calTrigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + code = pAggSup->stateStore.streamStateGroupPut(pAggSup->pState, pKey->groupId, NULL, 0); + QUERY_CHECK_CODE(code, lino, _end); + } + pTwAggSup->maxTs = TMAX(pTwAggSup->maxTs, pKey->ts); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static void doStreamTimeSliceImpl(SOperatorInfo* pOperator, SSDataBlock* pBlock) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t winCode = TSDB_CODE_SUCCESS; + SStreamTimeSliceOperatorInfo* pInfo = (SStreamTimeSliceOperatorInfo*)pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; + SExprSupp* pExprSup = &pOperator->exprSupp; + int32_t numOfOutput = pExprSup->numOfExprs; + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex); + TSKEY* tsCols = (int64_t*)pColDataInfo->pData; + void* pPkVal = NULL; + int32_t pkLen = 0; + int64_t groupId = pBlock->info.id.groupId; + SColumnInfoData* pPkColDataInfo = NULL; + SStreamFillSupporter* pFillSup = pInfo->pFillSup; + SStreamFillInfo* pFillInfo = pInfo->pFillInfo; + if (hasSrcPrimaryKeyCol(&pInfo->basic)) { + pPkColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->basic.primaryPkIndex); + } + + pFillSup->winRange = pTaskInfo->streamInfo.fillHistoryWindow; + if (pFillSup->winRange.ekey <= 0) { + pFillSup->winRange.ekey = INT64_MAX; + } + + int32_t startPos = 0; + for (; startPos < pBlock->info.rows; startPos++) { + if (hasSrcPrimaryKeyCol(&pInfo->basic) && pInfo->ignoreExpiredData) { + pPkVal = colDataGetData(pPkColDataInfo, startPos); + pkLen = colDataGetRowLength(pPkColDataInfo, startPos); + } + + if (pInfo->twAggSup.calTrigger != STREAM_TRIGGER_FORCE_WINDOW_CLOSE && pInfo->ignoreExpiredData && + checkExpiredData(&pAggSup->stateStore, pAggSup->pUpdateInfo, &pInfo->twAggSup, pBlock->info.id.uid, + tsCols[startPos], pPkVal, pkLen)) { + qDebug("===stream===ignore expired data, window end ts:%" PRId64 ", maxts - wartermak:%" PRId64, tsCols[startPos], + pInfo->twAggSup.maxTs - pInfo->twAggSup.waterMark); + continue; + } + + if (checkNullRow(pExprSup, pBlock, startPos, pInfo->ignoreNull)) { + continue; + } + break; + } + + if (startPos >= pBlock->info.rows) { + return; + } + + SResultRowInfo dumyInfo = {0}; + dumyInfo.cur.pageId = -1; + STimeWindow curWin = getActiveTimeWindow(NULL, &dumyInfo, tsCols[startPos], &pFillSup->interval, TSDB_ORDER_ASC); + SSlicePoint curPoint = {0}; + SSlicePoint nextPoint = {0}; + bool left = false; + bool right = false; + if (pFillSup->type != TSDB_FILL_PREV || curWin.skey == tsCols[startPos]) { + code = getPointInfoFromState(pAggSup, pFillSup, curWin.skey, groupId, &curPoint, &nextPoint, &winCode); + } else { + code = getPointInfoFromStateRight(pAggSup, pFillSup, curWin.skey, groupId, &curPoint, &nextPoint, &winCode); + } + QUERY_CHECK_CODE(code, lino, _end); + + if (hasSrcPrimaryKeyCol(&pInfo->basic)) { + pPkVal = colDataGetData(pPkColDataInfo, startPos); + } + right = needAdjustValue(&curPoint, tsCols[startPos], pPkVal, pFillSup, false, pFillSup->type); + if (right) { + transBlockToSliceResultRow(pBlock, startPos, tsCols[startPos], curPoint.pRightRow, pFillSup->rowSize, pPkVal, pPkColDataInfo); + bool needDel = pInfo->destHasPrimaryKey && winCode == TSDB_CODE_SUCCESS; + code = saveTimeSliceWinResultInfo(pAggSup, &pInfo->twAggSup, &curPoint.key, pInfo->pUpdatedMap, needDel, + pInfo->pDeletedMap); + QUERY_CHECK_CODE(code, lino, _end); + } + releaseOutputBuf(pAggSup->pState, curPoint.pResPos, &pAggSup->stateStore); + + while (startPos < pBlock->info.rows) { + int32_t numOfWin = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, curWin.ekey, binarySearchForKey, NULL, + TSDB_ORDER_ASC); + startPos += numOfWin; + int32_t leftRowId = getQualifiedRowNumDesc(pExprSup, pBlock, tsCols, startPos - 1, pInfo->ignoreNull); + QUERY_CHECK_CONDITION((leftRowId >= 0), code, lino, _end, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); + if (hasSrcPrimaryKeyCol(&pInfo->basic)) { + pPkVal = colDataGetData(pPkColDataInfo, leftRowId); + } + left = needAdjustValue(&nextPoint, tsCols[leftRowId], pPkVal, pFillSup, true, pFillSup->type); + if (left) { + transBlockToSliceResultRow(pBlock, leftRowId, tsCols[leftRowId], nextPoint.pLeftRow, pFillSup->rowSize, pPkVal, pPkColDataInfo); + bool needDel = pInfo->destHasPrimaryKey && winCode == TSDB_CODE_SUCCESS; + code = saveTimeSliceWinResultInfo(pAggSup, &pInfo->twAggSup, &nextPoint.key, pInfo->pUpdatedMap, + needDel, pInfo->pDeletedMap); + QUERY_CHECK_CODE(code, lino, _end); + } + releaseOutputBuf(pAggSup->pState, nextPoint.pResPos, &pAggSup->stateStore); + + startPos = getQualifiedRowNumAsc(pExprSup, pBlock, startPos, pInfo->ignoreNull); + if (startPos < 0) { + break; + } + curWin = getActiveTimeWindow(NULL, &dumyInfo, tsCols[startPos], &pFillSup->interval, TSDB_ORDER_ASC); + if (pFillSup->type != TSDB_FILL_PREV || curWin.skey == tsCols[startPos]) { + code = getPointInfoFromState(pAggSup, pFillSup, curWin.skey, groupId, &curPoint, &nextPoint, &winCode); + } else { + code = getPointInfoFromStateRight(pAggSup, pFillSup, curWin.skey, groupId, &curPoint, &nextPoint, &winCode); + } + QUERY_CHECK_CODE(code, lino, _end); + + if (hasSrcPrimaryKeyCol(&pInfo->basic)) { + pPkVal = colDataGetData(pPkColDataInfo, startPos); + } + right = needAdjustValue(&curPoint, tsCols[startPos], pPkVal, pFillSup, false, pFillSup->type); + if (right) { + transBlockToSliceResultRow(pBlock, startPos, tsCols[startPos], curPoint.pRightRow, pFillSup->rowSize, pPkVal, pPkColDataInfo); + bool needDel = pInfo->destHasPrimaryKey && winCode == TSDB_CODE_SUCCESS; + code = saveTimeSliceWinResultInfo(pAggSup, &pInfo->twAggSup, &curPoint.key, pInfo->pUpdatedMap, needDel, + pInfo->pDeletedMap); + QUERY_CHECK_CODE(code, lino, _end); + } + releaseOutputBuf(pAggSup->pState, curPoint.pResPos, &pAggSup->stateStore); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } +} + +void getNextResKey(int64_t curGroupId, SArray* pKeyArray, int32_t curIndex, TSKEY* pNextKey) { + int32_t nextIndex = curIndex + 1; + if (nextIndex < taosArrayGetSize(pKeyArray)) { + SWinKey* pKey = (SWinKey*)taosArrayGet(pKeyArray, nextIndex); + if (pKey->groupId == curGroupId) { + *pNextKey = pKey->ts; + return; + } + } + *pNextKey = INT64_MIN; +} + +void getPrevResKey(int64_t curGroupId, SArray* pKeyArray, int32_t curIndex, TSKEY* pNextKey) { + int32_t prevIndex = curIndex - 1; + if (prevIndex >= 0) { + SWinKey* pKey = (SWinKey*)taosArrayGet(pKeyArray, prevIndex); + if (pKey->groupId == curGroupId) { + *pNextKey = pKey->ts; + return; + } + } + *pNextKey = INT64_MIN; +} + +void doBuildTimeSlicePointResult(SStreamAggSupporter* pAggSup, STimeWindowAggSupp* pTwSup, SStreamFillSupporter* pFillSup, + SStreamFillInfo* pFillInfo, SSDataBlock* pBlock, SGroupResInfo* pGroupResInfo) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + blockDataCleanup(pBlock); + if (!hasRemainResults(pGroupResInfo)) { + return; + } + + // clear the existed group id + pBlock->info.id.groupId = 0; + int32_t numOfRows = getNumOfTotalRes(pGroupResInfo); + for (; pGroupResInfo->index < numOfRows; pGroupResInfo->index++) { + SWinKey* pKey = (SWinKey*)taosArrayGet(pGroupResInfo->pRows, pGroupResInfo->index); + qDebug("===stream=== build interp res. key:%" PRId64 ",groupId:%" PRIu64, pKey->ts, pKey->groupId); + if (pBlock->info.id.groupId == 0) { + pBlock->info.id.groupId = pKey->groupId; + } else if (pBlock->info.id.groupId != pKey->groupId) { + if (pBlock->info.rows > 0) { + break; + } else { + pBlock->info.id.groupId = pKey->groupId; + } + } + SSlicePoint curPoint = {.key.ts = pKey->ts, .key.groupId = pKey->groupId}; + SSlicePoint prevPoint = {0}; + SSlicePoint nextPoint = {0}; + if (pFillSup->type != TSDB_FILL_LINEAR) { + code = getResultInfoFromState(pAggSup, pFillSup, pKey->ts, pKey->groupId, &curPoint, &prevPoint, &nextPoint); + } else { + code = + getLinearResultInfoFromState(pAggSup, pFillSup, pKey->ts, pKey->groupId, &curPoint, &prevPoint, &nextPoint); + } + QUERY_CHECK_CODE(code, lino, _end); + + if (pFillSup->type != TSDB_FILL_LINEAR) { + getPrevResKey(pKey->groupId, pGroupResInfo->pRows, pGroupResInfo->index, &pFillInfo->preRowKey); + if (hasPrevWindow(pFillSup)) { + pFillInfo->prePointKey = prevPoint.key.ts; + } + + getNextResKey(pKey->groupId, pGroupResInfo->pRows, pGroupResInfo->index, &pFillInfo->nextRowKey); + if (hasNextWindow(pFillSup)) { + pFillInfo->nextPointKey = nextPoint.key.ts; + } + } + + if (pTwSup->calTrigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + setForceWindowCloseFillRule(pFillSup, pFillInfo, pKey->ts); + } else { + setTimeSliceFillRule(pFillSup, pFillInfo, pKey->ts); + } + doStreamFillRange(pFillSup, pFillInfo, pBlock); + releaseOutputBuf(pAggSup->pState, curPoint.pResPos, &pAggSup->stateStore); + releaseOutputBuf(pAggSup->pState, prevPoint.pResPos, &pAggSup->stateStore); + releaseOutputBuf(pAggSup->pState, nextPoint.pResPos, &pAggSup->stateStore); + if (pBlock->info.rows >= pBlock->info.capacity) { + pGroupResInfo->index++; + break; + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } +} + +static void doBuildTimeSliceDeleteResult(SStreamAggSupporter* pAggSup, SStreamFillSupporter* pFillSup, SArray* pWins, int32_t* index, SSDataBlock* pBlock) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + blockDataCleanup(pBlock); + int32_t size = taosArrayGetSize(pWins); + if (*index == size) { + *index = 0; + taosArrayClear(pWins); + goto _end; + } + code = blockDataEnsureCapacity(pBlock, size - *index); + QUERY_CHECK_CODE(code, lino, _end); + + uint64_t uid = 0; + for (int32_t i = *index; i < size; i++) { + SWinKey* pKey = taosArrayGet(pWins, i); + SSlicePoint curPoint = {.key.ts = pKey->ts, .key.groupId = pKey->groupId}; + SSlicePoint prevPoint = {0}; + SSlicePoint nextPoint = {0}; + STimeWindow tw = {0}; + if (pFillSup->type != TSDB_FILL_LINEAR) { + code = getResultInfoFromState(pAggSup, pFillSup, pKey->ts, pKey->groupId, &curPoint, &prevPoint, &nextPoint); + } else { + code = + getLinearResultInfoFromState(pAggSup, pFillSup, pKey->ts, pKey->groupId, &curPoint, &prevPoint, &nextPoint); + } + QUERY_CHECK_CODE(code, lino, _end); + + if (pFillSup->type == TSDB_FILL_PREV && hasNextWindow(pFillSup)) { + tw.skey = pFillSup->cur.key; + tw.ekey = pFillSup->next.key; + } else if (pFillSup->type == TSDB_FILL_NEXT && hasPrevWindow(pFillSup)) { + tw.skey = pFillSup->prev.key; + tw.ekey = pFillSup->cur.key; + } else if (pFillSup->type == TSDB_FILL_LINEAR) { + if (hasPrevWindow(pFillSup)) { + tw.skey = pFillSup->prev.key; + } else { + tw.skey = pFillSup->cur.key; + } + if (hasNextWindow(pFillSup)) { + tw.ekey = pFillSup->next.key; + } else { + tw.ekey = pFillSup->cur.key; + } + } else { + tw.skey = pFillSup->cur.key; + tw.ekey = pFillSup->cur.key; + } + + if (tw.skey == INT64_MIN || tw.ekey == INT64_MIN) { + continue; + } + + releaseOutputBuf(pAggSup->pState, curPoint.pResPos, &pAggSup->stateStore); + releaseOutputBuf(pAggSup->pState, prevPoint.pResPos, &pAggSup->stateStore); + releaseOutputBuf(pAggSup->pState, nextPoint.pResPos, &pAggSup->stateStore); + + void* tbname = NULL; + int32_t winCode = TSDB_CODE_SUCCESS; + + code = pAggSup->stateStore.streamStateGetParName(pAggSup->pState, pKey->groupId, &tbname, false, &winCode); + QUERY_CHECK_CODE(code, lino, _end); + + if (winCode != TSDB_CODE_SUCCESS) { + code = appendDataToSpecialBlock(pBlock, &tw.skey, &tw.ekey, &uid, &pKey->groupId, NULL); + QUERY_CHECK_CODE(code, lino, _end); + } else { + QUERY_CHECK_CONDITION((tbname), code, lino, _end, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); + char parTbName[VARSTR_HEADER_SIZE + TSDB_TABLE_NAME_LEN]; + STR_WITH_MAXSIZE_TO_VARSTR(parTbName, tbname, sizeof(parTbName)); + code = appendDataToSpecialBlock(pBlock, &tw.skey, &tw.ekey, &uid, &pKey->groupId, parTbName); + QUERY_CHECK_CODE(code, lino, _end); + } + pAggSup->stateStore.streamStateFreeVal(tbname); + (*index)++; + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } +} + +static int32_t buildTimeSliceResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamTimeSliceOperatorInfo* pInfo = pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + uint16_t opType = pOperator->operatorType; + SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; + + + doBuildTimeSliceDeleteResult(pAggSup, pInfo->pFillSup, pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes); + if (pInfo->pDelRes->info.rows != 0) { + // process the rest of the data + printDataBlock(pInfo->pDelRes, getStreamOpName(opType), GET_TASKID(pTaskInfo)); + (*ppRes) = pInfo->pDelRes; + goto _end; + } + + doBuildTimeSlicePointResult(pAggSup, &pInfo->twAggSup, pInfo->pFillSup, pInfo->pFillInfo, pInfo->pRes, &pInfo->groupResInfo); + if (pInfo->pRes->info.rows != 0) { + printDataBlock(pInfo->pRes, getStreamOpName(opType), GET_TASKID(pTaskInfo)); + (*ppRes) = pInfo->pRes; + goto _end; + } + + (*ppRes) = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t getSliceMaxTsWins(const SArray* pAllWins, SArray* pMaxWins) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t size = taosArrayGetSize(pAllWins); + if (size == 0) { + goto _end; + } + SWinKey* pKey = taosArrayGet(pAllWins, size - 1); + void* tmp = taosArrayPush(pMaxWins, pKey); + QUERY_CHECK_NULL(tmp, code, lino, _end, terrno); + + if (pKey->groupId == 0) { + goto _end; + } + uint64_t preGpId = pKey->groupId; + for (int32_t i = size - 2; i >= 0; i--) { + pKey = taosArrayGet(pAllWins, i); + if (preGpId != pKey->groupId) { + void* p = taosArrayPush(pMaxWins, pKey); + QUERY_CHECK_NULL(p, code, lino, _end, terrno); + preGpId = pKey->groupId; + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t doDeleteTimeSliceResult(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SSHashObj* pUpdatedMap) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t winCode = TSDB_CODE_SUCCESS; + + SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX); + uint64_t* groupIds = (uint64_t*)pGroupCol->pData; + SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); + TSKEY* tsStarts = (TSKEY*)pStartCol->pData; + SColumnInfoData* pEndCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX); + TSKEY* tsEnds = (TSKEY*)pEndCol->pData; + for (int32_t i = 0; i < pBlock->info.rows; i++) { + TSKEY ts = tsStarts[i]; + TSKEY endCalTs = tsEnds[i]; + uint64_t groupId = groupIds[i]; + SWinKey key = {.ts = ts, .groupId = groupId}; + while (1) { + SWinKey nextKey = {.groupId = groupId}; + code = pAggSup->stateStore.streamStateFillGetNext(pAggSup->pState, &key, &nextKey, NULL, NULL, &winCode); + QUERY_CHECK_CODE(code, lino, _end); + if (key.ts > endCalTs) { + break; + } + int32_t tmpRes = tSimpleHashRemove(pUpdatedMap, &key, sizeof(SWinKey)); + qTrace("%s delete stream interp result at line %d res: %s", __func__, __LINE__, tstrerror(tmpRes)); + + pAggSup->stateStore.streamStateDel(pAggSup->pState, &key); + if (winCode != TSDB_CODE_SUCCESS) { + break; + } + key = nextKey; + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t setAllResultKey(SStreamAggSupporter* pAggSup, TSKEY ts, SSHashObj* pUpdatedMap) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int64_t groupId = 0; + SStreamStateCur* pCur = pAggSup->stateStore.streamStateGroupGetCur(pAggSup->pState); + while (1) { + int32_t winCode = pAggSup->stateStore.streamStateGroupGetKVByCur(pCur, &groupId, NULL, NULL); + if (winCode != TSDB_CODE_SUCCESS) { + break; + } + SWinKey key = {.ts = ts, .groupId = groupId}; + code = saveTimeSliceWinResult(&key, pUpdatedMap); + QUERY_CHECK_CODE(code, lino, _end); + + pAggSup->stateStore.streamStateGroupCurNext(pCur); + } + pAggSup->stateStore.streamStateFreeCur(pCur); + pCur = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + pAggSup->stateStore.streamStateFreeCur(pCur); + pCur = NULL; + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static void removeDuplicateTs(SArray* pTsArrray) { + __compar_fn_t fn = getKeyComparFunc(TSDB_DATA_TYPE_TIMESTAMP, TSDB_ORDER_ASC); + taosArraySort(pTsArrray, fn); + taosArrayRemoveDuplicate(pTsArrray, fn, NULL); +} + +static int32_t doStreamTimeSliceNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamTimeSliceOperatorInfo* pInfo = pOperator->info; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; + + if (pOperator->status == OP_EXEC_DONE) { + (*ppRes) = NULL; + goto _end; + } + + if (pOperator->status == OP_RES_TO_RETURN) { + if (hasRemainCalc(pInfo->pFillInfo) || + (pInfo->pFillInfo->pos != FILL_POS_INVALID && pInfo->pFillInfo->needFill == true)) { + blockDataCleanup(pInfo->pRes); + doStreamFillRange(pInfo->pFillSup, pInfo->pFillInfo, pInfo->pRes); + if (pInfo->pRes->info.rows > 0) { + printDataBlock(pInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); + (*ppRes) = pInfo->pRes; + goto _end; + } + } + + SSDataBlock* resBlock = NULL; + code = buildTimeSliceResult(pOperator, &resBlock); + QUERY_CHECK_CODE(code, lino, _end); + + if (resBlock != NULL) { + (*ppRes) = resBlock; + goto _end; + } + + if (pInfo->recvCkBlock) { + pInfo->recvCkBlock = false; + printDataBlock(pInfo->pCheckpointRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); + (*ppRes) = pInfo->pCheckpointRes; + goto _end; + } + + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + pAggSup->stateStore.streamStateClearExpiredState(pAggSup->pState); + } + setStreamOperatorCompleted(pOperator); + resetStreamFillSup(pInfo->pFillSup); + (*ppRes) = NULL; + goto _end; + } + + SSDataBlock* fillResult = NULL; + SOperatorInfo* downstream = pOperator->pDownstream[0]; + while (1) { + SSDataBlock* pBlock = getNextBlockFromDownstream(pOperator, 0); + if (pBlock == NULL) { + pOperator->status = OP_RES_TO_RETURN; + qDebug("===stream===return data:%s. recv datablock num:%" PRIu64, getStreamOpName(pOperator->operatorType), + pInfo->numOfDatapack); + pInfo->numOfDatapack = 0; + break; + } + pInfo->numOfDatapack++; + printSpecDataBlock(pBlock, getStreamOpName(pOperator->operatorType), "recv", GET_TASKID(pTaskInfo)); + setStreamOperatorState(&pInfo->basic, pBlock->info.type); + + switch (pBlock->info.type) { + case STREAM_DELETE_RESULT: + case STREAM_DELETE_DATA: { + code = doDeleteTimeSliceResult(pAggSup, pBlock, pInfo->pUpdatedMap); + QUERY_CHECK_CODE(code, lino, _end); + code = copyDataBlock(pInfo->pDelRes, pBlock); + QUERY_CHECK_CODE(code, lino, _end); + pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; + (*ppRes) = pInfo->pDelRes; + printDataBlock((*ppRes), getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); + goto _end; + } break; + case STREAM_NORMAL: + case STREAM_INVALID: { + SExprSupp* pExprSup = &pInfo->scalarSup; + if (pExprSup->pExprInfo != NULL) { + code = projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL); + QUERY_CHECK_CODE(code, lino, _end); + } + } break; + case STREAM_CHECKPOINT: { + pInfo->recvCkBlock = true; + pAggSup->stateStore.streamStateCommit(pAggSup->pState); + doStreamTimeSliceSaveCheckpoint(pOperator); + code = copyDataBlock(pInfo->pCheckpointRes, pBlock); + QUERY_CHECK_CODE(code, lino, _end); + continue; + } break; + case STREAM_CREATE_CHILD_TABLE: { + (*ppRes) = pBlock; + goto _end; + } break; + case STREAM_GET_RESULT: { + void* pPushRes = taosArrayPush(pInfo->pCloseTs, &pBlock->info.window.skey); + QUERY_CHECK_NULL(pPushRes, code, lino, _end, terrno); + continue; + } + default: + code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; + QUERY_CHECK_CODE(code, lino, _end); + } + + doStreamTimeSliceImpl(pOperator, pBlock); + pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); + } + + if (pInfo->destHasPrimaryKey) { + code = copyIntervalDeleteKey(pInfo->pDeletedMap, pInfo->pDelWins); + QUERY_CHECK_CODE(code, lino, _end); + } + + if (taosArrayGetSize(pInfo->pCloseTs) > 0) { + removeDuplicateTs(pInfo->pCloseTs); + int32_t size = taosArrayGetSize(pInfo->pCloseTs); + qDebug("===stream===build stream result, ts count:%d", size); + for (int32_t i = 0; i < size; i++) { + TSKEY ts = *(TSKEY*) taosArrayGet(pInfo->pCloseTs, i); + code = buildAllResultKey(&pInfo->streamAggSup, ts, pInfo->pUpdated); + QUERY_CHECK_CODE(code, lino, _end); + } + qDebug("===stream===build stream result, res count:%ld", taosArrayGetSize(pInfo->pUpdated)); + taosArrayClear(pInfo->pCloseTs); + if (size > 1024) { + taosArrayDestroy(pInfo->pCloseTs); + pInfo->pCloseTs = taosArrayInit(1024, sizeof(TSKEY)); + } + } else { + void* pIte = NULL; + int32_t iter = 0; + while ((pIte = tSimpleHashIterate(pInfo->pUpdatedMap, pIte, &iter)) != NULL) { + SWinKey* pKey = (SWinKey*)tSimpleHashGetKey(pIte, NULL); + void* tmp = taosArrayPush(pInfo->pUpdated, pKey); + QUERY_CHECK_NULL(tmp, code, lino, _end, terrno); + } + } + taosArraySort(pInfo->pUpdated, winKeyCmprImpl); + + if (pInfo->isHistoryOp) { + code = getSliceMaxTsWins(pInfo->pUpdated, pInfo->historyWins); + QUERY_CHECK_CODE(code, lino, _end); + } + + initMultiResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated); + pInfo->groupResInfo.freeItem = false; + + pInfo->pUpdated = taosArrayInit(16, sizeof(SWinKey)); + QUERY_CHECK_NULL(pInfo->pUpdated, code, lino, _end, terrno); + + code = blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); + QUERY_CHECK_CODE(code, lino, _end); + + tSimpleHashCleanup(pInfo->pUpdatedMap); + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pUpdatedMap = tSimpleHashInit(1024, hashFn); + + code = buildTimeSliceResult(pOperator, ppRes); + QUERY_CHECK_CODE(code, lino, _end); + + if (!(*ppRes)) { + if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + pAggSup->stateStore.streamStateClearExpiredState(pAggSup->pState); + } + setStreamOperatorCompleted(pOperator); + resetStreamFillSup(pInfo->pFillSup); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static void copyFillValueInfo(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo) { + if (pFillInfo->type == TSDB_FILL_SET_VALUE || pFillInfo->type == TSDB_FILL_SET_VALUE_F) { + int32_t valueIndex = 0; + for (int32_t i = 0; i < pFillSup->numOfAllCols; ++i) { + SFillColInfo* pFillCol = pFillSup->pAllColInfo + i; + if (!isInterpFunc(pFillCol->pExpr)) { + continue; + } + int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId; + SResultCellData* pCell = getResultCell(pFillInfo->pResRow, srcSlot); + SFillColInfo* pValueCol = pFillSup->pAllColInfo + valueIndex; + SVariant* pVar = &(pValueCol->fillVal); + if (pCell->type == TSDB_DATA_TYPE_FLOAT) { + float v = 0; + GET_TYPED_DATA(v, float, pVar->nType, &pVar->i); + SET_TYPED_DATA(pCell->pData, pCell->type, v); + } else if (IS_FLOAT_TYPE(pCell->type)) { + double v = 0; + GET_TYPED_DATA(v, double, pVar->nType, &pVar->i); + SET_TYPED_DATA(pCell->pData, pCell->type, v); + } else if (IS_INTEGER_TYPE(pCell->type)) { + int64_t v = 0; + GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i); + SET_TYPED_DATA(pCell->pData, pCell->type, v); + } else { + pCell->isNull = true; + } + valueIndex++; + } + } else if (pFillInfo->type == TSDB_FILL_NULL || pFillInfo->type == TSDB_FILL_NULL_F) { + for (int32_t i = 0; i < pFillSup->numOfAllCols; ++i) { + SFillColInfo* pFillCol = pFillSup->pAllColInfo + i; + int32_t slotId = GET_DEST_SLOT_ID(pFillCol); + SResultCellData* pCell = getResultCell(pFillInfo->pResRow, slotId); + pCell->isNull = true; + } + } +} + +int32_t getDownstreamRes(SOperatorInfo* downstream, SSDataBlock** ppRes, SColumnInfo** ppPkCol) { + if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { + SStreamScanInfo* pInfo = (SStreamScanInfo*)downstream->info; + *ppRes = pInfo->pRes; + if (hasSrcPrimaryKeyCol(&pInfo->basic)) { + SColumnInfoData* pPkColInfo = taosArrayGet(pInfo->pRes->pDataBlock, pInfo->basic.primaryPkIndex); + (*ppPkCol) = &pPkColInfo->info; + } + return TSDB_CODE_SUCCESS; + } else if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION) { + SStreamPartitionOperatorInfo* pInfo = (SStreamPartitionOperatorInfo*)downstream->info; + *ppRes = pInfo->binfo.pRes; + if (hasSrcPrimaryKeyCol(&pInfo->basic)) { + SColumnInfoData* pPkColInfo = taosArrayGet(pInfo->binfo.pRes->pDataBlock, pInfo->basic.primaryPkIndex); + (*ppPkCol) = &pPkColInfo->info; + } + return TSDB_CODE_SUCCESS; + } + qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(TSDB_CODE_FAILED)); + return TSDB_CODE_FAILED; +} + +int32_t initTimeSliceDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, uint16_t type, + int32_t tsColIndex, STimeWindowAggSupp* pTwSup, struct SSteamOpBasicInfo* pBasic, + SStreamFillSupporter* pFillSup) { + SExecTaskInfo* pTaskInfo = downstream->pTaskInfo; + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION) { + SStreamPartitionOperatorInfo* pPartionInfo = downstream->info; + pPartionInfo->tsColIndex = tsColIndex; + pBasic->primaryPkIndex = pPartionInfo->basic.primaryPkIndex; + } + + if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { + code = initTimeSliceDownStream(downstream->pDownstream[0], pAggSup, type, tsColIndex, pTwSup, pBasic, pFillSup); + return code; + } + SStreamScanInfo* pScanInfo = downstream->info; + pScanInfo->igCheckUpdate = true; + pScanInfo->windowSup = (SWindowSupporter){.pStreamAggSup = pAggSup, .gap = pAggSup->gap, .parentType = type}; + pScanInfo->pState = pAggSup->pState; + if (!pScanInfo->pUpdateInfo) { + code = pAggSup->stateStore.updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, pTwSup->waterMark, + pScanInfo->igCheckUpdate, pScanInfo->pkColType, pScanInfo->pkColLen, + &pScanInfo->pUpdateInfo); + QUERY_CHECK_CODE(code, lino, _end); + } + pScanInfo->twAggSup = *pTwSup; + pScanInfo->pFillSup = pFillSup; + pScanInfo->interval = pFillSup->interval; + pAggSup->pUpdateInfo = pScanInfo->pUpdateInfo; + if (!hasSrcPrimaryKeyCol(pBasic)) { + pBasic->primaryPkIndex = pScanInfo->basic.primaryPkIndex; + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s. task:%s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo)); + } + return code; +} + +int32_t createStreamTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, + SReadHandle* pHandle, SOperatorInfo** ppOptInfo) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamTimeSliceOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamTimeSliceOperatorInfo)); + QUERY_CHECK_NULL(pInfo, code, lino, _error, terrno); + + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + QUERY_CHECK_NULL(pOperator, code, lino, _error, terrno); + + SStreamInterpFuncPhysiNode* pInterpPhyNode = (SStreamInterpFuncPhysiNode*)pPhyNode; + pOperator->pTaskInfo = pTaskInfo; + initResultSizeInfo(&pOperator->resultInfo, 4096); + SExprSupp* pExpSup = &pOperator->exprSupp; + int32_t numOfExprs = 0; + SExprInfo* pExprInfo = NULL; + code = createExprInfo(pInterpPhyNode->pFuncs, NULL, &pExprInfo, &numOfExprs); + QUERY_CHECK_CODE(code, lino, _error); + + code = initExprSupp(pExpSup, pExprInfo, numOfExprs, &pTaskInfo->storageAPI.functionStore); + QUERY_CHECK_CODE(code, lino, _error); + + if (pInterpPhyNode->pExprs != NULL) { + int32_t num = 0; + SExprInfo* pScalarExprInfo = NULL; + code = createExprInfo(pInterpPhyNode->pExprs, NULL, &pScalarExprInfo, &num); + QUERY_CHECK_CODE(code, lino, _error); + + code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, num, &pTaskInfo->storageAPI.functionStore); + QUERY_CHECK_CODE(code, lino, _error); + } + + code = filterInitFromNode((SNode*)pInterpPhyNode->node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); + QUERY_CHECK_CODE(code, lino, _error); + + pInfo->twAggSup = (STimeWindowAggSupp){ + .waterMark = pInterpPhyNode->streamNodeOption.watermark, + .calTrigger = pInterpPhyNode->streamNodeOption.triggerType, + .maxTs = INT64_MIN, + .minTs = INT64_MAX, + .deleteMark = getDeleteMarkFromOption(&pInterpPhyNode->streamNodeOption), + }; + + pInfo->primaryTsIndex = ((SColumnNode*)pInterpPhyNode->pTimeSeries)->slotId; + + SSDataBlock* pDownRes = NULL; + SColumnInfo* pPkCol = NULL; + code = getDownstreamRes(downstream, &pDownRes, &pPkCol); + QUERY_CHECK_CODE(code, lino, _error); + + pInfo->pFillSup = NULL; + code = initTimeSliceFillSup(pInterpPhyNode, pExpSup, numOfExprs, pPkCol, &pInfo->pFillSup); + QUERY_CHECK_CODE(code, lino, _error); + + int32_t ratio = 1; + if (pInfo->pFillSup->type == TSDB_FILL_LINEAR) { + ratio = 2; + } + + int32_t keyBytes = sizeof(TSKEY); + keyBytes += blockDataGetRowSize(pDownRes) + sizeof(SResultCellData) * taosArrayGetSize(pDownRes->pDataBlock); + if (pPkCol) { + keyBytes += pPkCol->bytes; + } + code = initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfExprs, 0, pTaskInfo->streamInfo.pState, keyBytes, 0, + &pTaskInfo->storageAPI.stateStore, pHandle, &pInfo->twAggSup, GET_TASKID(pTaskInfo), + &pTaskInfo->storageAPI, pInfo->primaryTsIndex, STREAM_STATE_BUFF_HASH_SORT, ratio); + QUERY_CHECK_CODE(code, lino, _error); + + code = initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); + QUERY_CHECK_CODE(code, lino, _error); + + pInfo->pRes = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc); + pInfo->delIndex = 0; + pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey)); + QUERY_CHECK_NULL(pInfo->pDelWins, code, lino, _error, terrno); + + pInfo->pDelRes = NULL; + code = createSpecialDataBlock(STREAM_DELETE_RESULT, &pInfo->pDelRes); + QUERY_CHECK_CODE(code, lino, _error); + + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pDeletedMap = tSimpleHashInit(1024, hashFn); + QUERY_CHECK_NULL(pInfo->pDeletedMap, code, lino, _error, terrno); + + pInfo->ignoreExpiredData = pInterpPhyNode->streamNodeOption.igExpired; + pInfo->ignoreExpiredDataSaved = false; + pInfo->pUpdated = taosArrayInit(64, sizeof(SWinKey)); + pInfo->pUpdatedMap = tSimpleHashInit(1024, hashFn); + pInfo->historyPoints = taosArrayInit(4, sizeof(SWinKey)); + QUERY_CHECK_NULL(pInfo->historyPoints, code, lino, _error, terrno); + + pInfo->recvCkBlock = false; + pInfo->pCheckpointRes = NULL; + code = createSpecialDataBlock(STREAM_CHECKPOINT, &pInfo->pCheckpointRes); + QUERY_CHECK_CODE(code, lino, _error); + + pInfo->destHasPrimaryKey = pInterpPhyNode->streamNodeOption.destHasPrimaryKey; + pInfo->numOfDatapack = 0; + + pInfo->pFillInfo = initStreamFillInfo(pInfo->pFillSup, pDownRes); + copyFillValueInfo(pInfo->pFillSup, pInfo->pFillInfo); + pInfo->ignoreNull = getIgoreNullRes(pExpSup); + + pInfo->historyWins = taosArrayInit(4, sizeof(SWinKey)); + QUERY_CHECK_NULL(pInfo->historyWins, code, lino, _error, terrno); + + if (pHandle) { + pInfo->isHistoryOp = pHandle->fillHistory; + } + + pInfo->pCloseTs = taosArrayInit(1024, sizeof(TSKEY)); + QUERY_CHECK_NULL(pInfo->pCloseTs, code, lino, _error, terrno); + + pInfo->pOperator = pOperator; + + pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC; + setOperatorInfo(pOperator, getStreamOpName(pOperator->operatorType), QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC, + true, OP_NOT_OPENED, pInfo, pTaskInfo); + // for stream + void* buff = NULL; + int32_t len = 0; + int32_t res = pTaskInfo->storageAPI.stateStore.streamStateGetInfo( + pTaskInfo->streamInfo.pState, STREAM_TIME_SLICE_OP_CHECKPOINT_NAME, strlen(STREAM_TIME_SLICE_OP_CHECKPOINT_NAME), + &buff, &len); + if (res == TSDB_CODE_SUCCESS) { + code = doStreamTimeSliceDecodeOpState(buff, len, pOperator); + taosMemoryFree(buff); + QUERY_CHECK_CODE(code, lino, _error); + } + pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamTimeSliceNext, NULL, destroyStreamTimeSliceOperatorInfo, + optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); + setOperatorStreamStateFn(pOperator, streamTimeSliceReleaseState, streamTimeSliceReloadState); + + initStreamBasicInfo(&pInfo->basic); + if (downstream) { + code = initTimeSliceDownStream(downstream, &pInfo->streamAggSup, pOperator->operatorType, pInfo->primaryTsIndex, + &pInfo->twAggSup, &pInfo->basic, pInfo->pFillSup); + QUERY_CHECK_CODE(code, lino, _error); + + code = appendDownstream(pOperator, &downstream, 1); + QUERY_CHECK_CODE(code, lino, _error); + } + (*ppOptInfo) = pOperator; + return code; + +_error: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (pInfo != NULL) { + destroyStreamTimeSliceOperatorInfo(pInfo); + } + destroyOperatorAndDownstreams(pOperator, &downstream, 1); + pTaskInfo->code = code; + (*ppOptInfo) = NULL; + return code; +} diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index fc919dfe5ff..8fd00e93130 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -30,9 +30,6 @@ #define IS_FINAL_INTERVAL_OP(op) ((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) #define IS_MID_INTERVAL_OP(op) ((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL) -#define IS_NORMAL_INTERVAL_OP(op) \ - ((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL || \ - (op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) #define IS_FINAL_SESSION_OP(op) ((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) #define IS_NORMAL_SESSION_OP(op) \ @@ -171,7 +168,7 @@ int32_t saveResult(SResultWindowInfo winInfo, SSHashObj* pStUpdated) { return tSimpleHashPut(pStUpdated, &winInfo.sessionWin, sizeof(SSessionKey), &winInfo, sizeof(SResultWindowInfo)); } -static int32_t saveWinResult(SWinKey* pKey, SRowBuffPos* pPos, SSHashObj* pUpdatedMap) { +int32_t saveWinResult(SWinKey* pKey, SRowBuffPos* pPos, SSHashObj* pUpdatedMap) { if (tSimpleHashGetSize(pUpdatedMap) > MAX_STREAM_HISTORY_RESULT) { qError("%s failed at line %d since too many history result. ", __func__, __LINE__); return TSDB_CODE_STREAM_INTERNAL_ERROR; @@ -203,7 +200,7 @@ static int32_t compareWinKey(void* pKey, void* data, int32_t index) { return winKeyCmprImpl(pKey, pDataPos); } -static void removeDeleteResults(SSHashObj* pUpdatedMap, SArray* pDelWins) { +void removeDeleteResults(SSHashObj* pUpdatedMap, SArray* pDelWins) { taosArraySort(pDelWins, winKeyCmprImpl); taosArrayRemoveDuplicate(pDelWins, winKeyCmprImpl, NULL); int32_t delSize = taosArrayGetSize(pDelWins); @@ -400,6 +397,11 @@ STimeWindow getFinalTimeWindow(int64_t ts, SInterval* pInterval) { static void doBuildDeleteResult(SStreamIntervalOperatorInfo* pInfo, SArray* pWins, int32_t* index, SSDataBlock* pBlock) { + doBuildDeleteResultImpl(&pInfo->stateStore, pInfo->pState, pWins, index, pBlock); +} + +void doBuildDeleteResultImpl(SStateStore* pAPI, SStreamState* pState, SArray* pWins, int32_t* index, + SSDataBlock* pBlock) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; blockDataCleanup(pBlock); @@ -417,7 +419,7 @@ static void doBuildDeleteResult(SStreamIntervalOperatorInfo* pInfo, SArray* pWin SWinKey* pWin = taosArrayGet(pWins, i); void* tbname = NULL; int32_t winCode = TSDB_CODE_SUCCESS; - code = pInfo->stateStore.streamStateGetParName(pInfo->pState, pWin->groupId, &tbname, false, &winCode); + code = pAPI->streamStateGetParName(pState, pWin->groupId, &tbname, false, &winCode); QUERY_CHECK_CODE(code, lino, _end); if (winCode != TSDB_CODE_SUCCESS) { @@ -430,7 +432,7 @@ static void doBuildDeleteResult(SStreamIntervalOperatorInfo* pInfo, SArray* pWin code = appendDataToSpecialBlock(pBlock, &pWin->ts, &pWin->ts, &uid, &pWin->groupId, parTbName); QUERY_CHECK_CODE(code, lino, _end); } - pInfo->stateStore.streamStateFreeVal(tbname); + pAPI->streamStateFreeVal(tbname); (*index)++; } @@ -474,8 +476,8 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) { SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); if (pInfo->pOperator) { - cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, - &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup, + false); pInfo->pOperator = NULL; } cleanupAggSup(&pInfo->aggSup); @@ -497,6 +499,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) { blockDataDestroy(pInfo->pMidRetriveRes); blockDataDestroy(pInfo->pMidPulloverRes); if (pInfo->pUpdatedMap != NULL) { + // free flushed pos tSimpleHashSetFreeFp(pInfo->pUpdatedMap, destroyFlusedppPos); tSimpleHashCleanup(pInfo->pUpdatedMap); pInfo->pUpdatedMap = NULL; @@ -1204,7 +1207,7 @@ static int32_t doStreamIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pS return code; } -static inline int winPosCmprImpl(const void* pKey1, const void* pKey2) { +int winPosCmprImpl(const void* pKey1, const void* pKey2) { SRowBuffPos* pos1 = *(SRowBuffPos**)pKey1; SRowBuffPos* pos2 = *(SRowBuffPos**)pKey2; SWinKey* pWin1 = (SWinKey*)pos1->pKey; @@ -1475,7 +1478,7 @@ void doStreamIntervalSaveCheckpoint(SOperatorInfo* pOperator) { } } -static int32_t copyIntervalDeleteKey(SSHashObj* pMap, SArray* pWins) { +int32_t copyIntervalDeleteKey(SSHashObj* pMap, SArray* pWins) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; void* pIte = NULL; @@ -1823,6 +1826,14 @@ int64_t getDeleteMark(SWindowPhysiNode* pWinPhyNode, int64_t interval) { return deleteMark; } +int64_t getDeleteMarkFromOption(SStreamNodeOption* pOption) { + if (pOption->deleteMark <= 0) { + return DEAULT_DELETE_MARK; + } + int64_t deleteMark = TMAX(pOption->deleteMark, pOption->watermark); + return deleteMark; +} + static TSKEY compareTs(void* pKey) { SWinKey* pWinKey = (SWinKey*)pKey; return pWinKey->ts; @@ -1967,7 +1978,6 @@ int32_t createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiN code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str, pInfo->pState, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); - tSimpleHashSetFreeFp(pInfo->aggSup.pResultRowHashTable, destroyFlusedppPos); code = initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); QUERY_CHECK_CODE(code, lino, _error); @@ -2035,7 +2045,7 @@ int32_t createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiN QUERY_CHECK_NULL(pInfo->pMidPullDatas, code, lino, _error, terrno); pInfo->pDeletedMap = tSimpleHashInit(4096, hashFn); QUERY_CHECK_NULL(pInfo->pDeletedMap, code, lino, _error, terrno); - pInfo->destHasPrimaryKey = pIntervalPhyNode->window.destHasPrimayKey; + pInfo->destHasPrimaryKey = pIntervalPhyNode->window.destHasPrimaryKey; pInfo->pOperator = pOperator; pOperator->operatorType = pPhyNode->type; @@ -2206,8 +2216,8 @@ static TSKEY sesionTs(void* pKey) { int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, int32_t numOfOutput, int64_t gap, SStreamState* pState, int32_t keySize, int16_t keyType, SStateStore* pStore, SReadHandle* pHandle, STimeWindowAggSupp* pTwAggSup, const char* taskIdStr, - SStorageAPI* pApi, int32_t tsIndex) { - pSup->resultRowSize = keySize + getResultRowSize(pExpSup->pCtx, numOfOutput); + SStorageAPI* pApi, int32_t tsIndex, int8_t stateType, int32_t ratio) { + pSup->resultRowSize = (keySize + getResultRowSize(pExpSup->pCtx, numOfOutput)) * ratio; int32_t lino = 0; int32_t code = createSpecialDataBlock(STREAM_CLEAR, &pSup->pScanBlock); QUERY_CHECK_CODE(code, lino, _end); @@ -2228,16 +2238,24 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, in *(pSup->pState) = *pState; pSup->stateStore.streamStateSetNumber(pSup->pState, -1, tsIndex); int32_t funResSize = getMaxFunResSize(pExpSup, numOfOutput); - pSup->pState->pFileState = NULL; - - // used for backward compatibility of function's result info - pSup->pState->pResultRowStore.resultRowGet = getResultRowFromBuf; - pSup->pState->pResultRowStore.resultRowPut = putResultRowToBuf; - pSup->pState->pExprSupp = pExpSup; + if (stateType != STREAM_STATE_BUFF_HASH_SORT) { + // used for backward compatibility of function's result info + pSup->pState->pResultRowStore.resultRowGet = getResultRowFromBuf; + pSup->pState->pResultRowStore.resultRowPut = putResultRowToBuf; + pSup->pState->pExprSupp = pExpSup; + } - code = pSup->stateStore.streamFileStateInit(tsStreamBufferSize, sizeof(SSessionKey), pSup->resultRowSize, funResSize, - sesionTs, pSup->pState, pTwAggSup->deleteMark, taskIdStr, - pHandle->checkpointId, STREAM_STATE_BUFF_SORT, &pSup->pState->pFileState); + if (stateType == STREAM_STATE_BUFF_SORT) { + pSup->pState->pFileState = NULL; + code = pSup->stateStore.streamFileStateInit(tsStreamBufferSize, sizeof(SSessionKey), pSup->resultRowSize, + funResSize, sesionTs, pSup->pState, pTwAggSup->deleteMark, taskIdStr, + pHandle->checkpointId, stateType, &pSup->pState->pFileState); + } else if (stateType == STREAM_STATE_BUFF_HASH_SORT || stateType == STREAM_STATE_BUFF_HASH_SEARCH) { + pSup->pState->pFileState = NULL; + code = pSup->stateStore.streamFileStateInit(tsStreamBufferSize, sizeof(SWinKey), pSup->resultRowSize, funResSize, + compareTs, pSup->pState, pTwAggSup->deleteMark, taskIdStr, + pHandle->checkpointId, stateType, &pSup->pState->pFileState); + } QUERY_CHECK_CODE(code, lino, _end); _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); @@ -3837,9 +3855,10 @@ int32_t createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode }; pInfo->primaryTsIndex = ((SColumnNode*)pSessionNode->window.pTspk)->slotId; - code = initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfCols, pSessionNode->gap, - pTaskInfo->streamInfo.pState, 0, 0, &pTaskInfo->storageAPI.stateStore, pHandle, - &pInfo->twAggSup, GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI, pInfo->primaryTsIndex); + code = + initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfCols, pSessionNode->gap, pTaskInfo->streamInfo.pState, + 0, 0, &pTaskInfo->storageAPI.stateStore, pHandle, &pInfo->twAggSup, GET_TASKID(pTaskInfo), + &pTaskInfo->storageAPI, pInfo->primaryTsIndex, STREAM_STATE_BUFF_SORT, 1); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -3879,7 +3898,7 @@ int32_t createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode pInfo->clearState = false; pInfo->recvGetAll = false; - pInfo->destHasPrimaryKey = pSessionNode->window.destHasPrimayKey; + pInfo->destHasPrimaryKey = pSessionNode->window.destHasPrimaryKey; pInfo->pPkDeleted = tSimpleHashInit(64, hashFn); QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno); pInfo->pOperator = pOperator; @@ -5030,9 +5049,10 @@ int32_t createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* int32_t keySize = sizeof(SStateKeys) + pColNode->node.resType.bytes; int16_t type = pColNode->node.resType.type; pInfo->primaryTsIndex = tsSlotId; - code = initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfCols, 0, pTaskInfo->streamInfo.pState, keySize, - type, &pTaskInfo->storageAPI.stateStore, pHandle, &pInfo->twAggSup, - GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI, pInfo->primaryTsIndex); + code = + initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfCols, 0, pTaskInfo->streamInfo.pState, keySize, type, + &pTaskInfo->storageAPI.stateStore, pHandle, &pInfo->twAggSup, GET_TASKID(pTaskInfo), + &pTaskInfo->storageAPI, pInfo->primaryTsIndex, STREAM_STATE_BUFF_SORT, 1); QUERY_CHECK_CODE(code, lino, _error); _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); @@ -5065,7 +5085,7 @@ int32_t createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pInfo->recvGetAll = false; pInfo->pPkDeleted = tSimpleHashInit(64, hashFn); QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno); - pInfo->destHasPrimaryKey = pStateNode->window.destHasPrimayKey; + pInfo->destHasPrimaryKey = pStateNode->window.destHasPrimaryKey; pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "StreamStateAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, true, OP_NOT_OPENED, @@ -5294,8 +5314,8 @@ static int32_t doStreamIntervalAggNext(SOperatorInfo* pOperator, SSDataBlock** p return code; } -int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, - SReadHandle* pHandle, SOperatorInfo** pOptrInfo) { +static int32_t createStreamSingleIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, + SReadHandle* pHandle, SOperatorInfo** pOptrInfo) { QRY_PARAM_CHECK(pOptrInfo); int32_t code = TSDB_CODE_SUCCESS; @@ -5358,7 +5378,6 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* code = initAggSup(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str, pInfo->pState, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); - tSimpleHashSetFreeFp(pInfo->aggSup.pResultRowHashTable, destroyFlusedppPos); if (pIntervalPhyNode->window.pExprs != NULL) { int32_t numOfScalar = 0; @@ -5425,7 +5444,7 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pInfo->pDeletedMap = tSimpleHashInit(4096, hashFn); QUERY_CHECK_NULL(pInfo->pDeletedMap, code, lino, _error, terrno); - pInfo->destHasPrimaryKey = pIntervalPhyNode->window.destHasPrimayKey; + pInfo->destHasPrimaryKey = pIntervalPhyNode->window.destHasPrimaryKey; // for stream void* buff = NULL; @@ -5454,6 +5473,17 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* return code; } +int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, + SReadHandle* pHandle, SOperatorInfo** pOptrInfo) { + SStreamIntervalPhysiNode* pIntervalPhyNode = (SStreamIntervalPhysiNode*)pPhyNode; + if (pIntervalPhyNode->window.triggerType == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + return createStreamIntervalSliceOperatorInfo(downstream, pPhyNode, pTaskInfo, pHandle, pOptrInfo); + } else { + return createStreamSingleIntervalOperatorInfo(downstream, pPhyNode, pTaskInfo, pHandle, pOptrInfo); + } + return TSDB_CODE_SUCCESS; +} + static void doStreamMidIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBlock, SSHashObj* pUpdatedMap) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; @@ -5845,7 +5875,7 @@ static SSDataBlock* doStreamMidIntervalAgg(SOperatorInfo* pOperator) { } void setStreamOperatorCompleted(SOperatorInfo* pOperator) { - setOperatorCompleted(pOperator); qDebug("stask:%s %s status: %d. set completed", GET_TASKID(pOperator->pTaskInfo), getStreamOpName(pOperator->operatorType), pOperator->status); + setOperatorCompleted(pOperator); } diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 88f4a748e8c..cfcd9dedb68 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -425,7 +425,7 @@ static bool sysTableIsOperatorCondOnOneTable(SNode* pCond, char* condTable) { SValueNode* pValue = (SValueNode*)node->pRight; if (pValue->node.resType.type == TSDB_DATA_TYPE_NCHAR || pValue->node.resType.type == TSDB_DATA_TYPE_VARCHAR) { char* value = nodesGetValueFromNode(pValue); - strncpy(condTable, varDataVal(value), TSDB_TABLE_NAME_LEN); + tstrncpy(condTable, varDataVal(value), TSDB_TABLE_NAME_LEN); return true; } } @@ -914,41 +914,41 @@ void relocateAndFilterSysTagsScanResult(SSysTableScanInfo* pInfo, int32_t numOfR } } -int32_t convertTagDataToStr(char* str, int type, void* buf, int32_t bufSize, int32_t* len) { +int32_t convertTagDataToStr(char* str, int32_t strBuffLen, int type, void* buf, int32_t bufSize, int32_t* len) { int32_t n = 0; switch (type) { case TSDB_DATA_TYPE_NULL: - n = sprintf(str, "null"); + n = tsnprintf(str, strBuffLen, "null"); break; case TSDB_DATA_TYPE_BOOL: - n = sprintf(str, (*(int8_t*)buf) ? "true" : "false"); + n = tsnprintf(str, strBuffLen, (*(int8_t*)buf) ? "true" : "false"); break; case TSDB_DATA_TYPE_TINYINT: - n = sprintf(str, "%d", *(int8_t*)buf); + n = tsnprintf(str, strBuffLen, "%d", *(int8_t*)buf); break; case TSDB_DATA_TYPE_SMALLINT: - n = sprintf(str, "%d", *(int16_t*)buf); + n = tsnprintf(str, strBuffLen, "%d", *(int16_t*)buf); break; case TSDB_DATA_TYPE_INT: - n = sprintf(str, "%d", *(int32_t*)buf); + n = tsnprintf(str, strBuffLen, "%d", *(int32_t*)buf); break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - n = sprintf(str, "%" PRId64, *(int64_t*)buf); + n = tsnprintf(str, strBuffLen, "%" PRId64, *(int64_t*)buf); break; case TSDB_DATA_TYPE_FLOAT: - n = sprintf(str, "%.5f", GET_FLOAT_VAL(buf)); + n = tsnprintf(str, strBuffLen, "%.5f", GET_FLOAT_VAL(buf)); break; case TSDB_DATA_TYPE_DOUBLE: - n = sprintf(str, "%.9f", GET_DOUBLE_VAL(buf)); + n = tsnprintf(str, strBuffLen, "%.9f", GET_DOUBLE_VAL(buf)); break; case TSDB_DATA_TYPE_BINARY: @@ -973,19 +973,19 @@ int32_t convertTagDataToStr(char* str, int type, void* buf, int32_t bufSize, int n = length; break; case TSDB_DATA_TYPE_UTINYINT: - n = sprintf(str, "%u", *(uint8_t*)buf); + n = tsnprintf(str, strBuffLen, "%u", *(uint8_t*)buf); break; case TSDB_DATA_TYPE_USMALLINT: - n = sprintf(str, "%u", *(uint16_t*)buf); + n = tsnprintf(str, strBuffLen, "%u", *(uint16_t*)buf); break; case TSDB_DATA_TYPE_UINT: - n = sprintf(str, "%u", *(uint32_t*)buf); + n = tsnprintf(str, strBuffLen, "%u", *(uint32_t*)buf); break; case TSDB_DATA_TYPE_UBIGINT: - n = sprintf(str, "%" PRIu64, *(uint64_t*)buf); + n = tsnprintf(str, strBuffLen, "%" PRIu64, *(uint64_t*)buf); break; default: @@ -1065,14 +1065,21 @@ static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, int8_t tagType = (*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].type; pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno); + int32_t tagStrBufflen = 32; char tagTypeStr[VARSTR_HEADER_SIZE + 32]; - int tagTypeLen = sprintf(varDataVal(tagTypeStr), "%s", tDataTypes[tagType].name); + int tagTypeLen = tsnprintf(varDataVal(tagTypeStr), tagStrBufflen, "%s", tDataTypes[tagType].name); + tagStrBufflen -= tagTypeLen; + if (tagStrBufflen <= 0) { + code = TSDB_CODE_INVALID_PARA; + QUERY_CHECK_CODE(code, lino, _end); + } + if (tagType == TSDB_DATA_TYPE_NCHAR) { - tagTypeLen += sprintf( - varDataVal(tagTypeStr) + tagTypeLen, "(%d)", + tagTypeLen += tsnprintf( + varDataVal(tagTypeStr) + tagTypeLen, tagStrBufflen, "(%d)", (int32_t)(((*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } else if (IS_VAR_DATA_TYPE(tagType)) { - tagTypeLen += sprintf(varDataVal(tagTypeStr) + tagTypeLen, "(%d)", + tagTypeLen += tsnprintf(varDataVal(tagTypeStr) + tagTypeLen, tagStrBufflen, "(%d)", (int32_t)((*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE)); } varDataSetLen(tagTypeStr, tagTypeLen); @@ -1127,7 +1134,7 @@ static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, QUERY_CHECK_NULL(tagVarChar, code, lino, _end, terrno); int32_t len = -1; if (tagLen > 0) - convertTagDataToStr(varDataVal(tagVarChar), tagType, tagData, tagLen, &len); + convertTagDataToStr(varDataVal(tagVarChar), bufSize + 1 - VARSTR_HEADER_SIZE, tagType, tagData, tagLen, &len); else len = 0; varDataSetLen(tagVarChar, len); @@ -1197,13 +1204,19 @@ static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo, int8_t colType = schemaRow->pSchema[i].type; pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno); + int32_t colStrBufflen = 32; char colTypeStr[VARSTR_HEADER_SIZE + 32]; - int colTypeLen = sprintf(varDataVal(colTypeStr), "%s", tDataTypes[colType].name); + int colTypeLen = tsnprintf(varDataVal(colTypeStr), colStrBufflen, "%s", tDataTypes[colType].name); + colStrBufflen -= colTypeLen; + if (colStrBufflen <= 0) { + code = TSDB_CODE_INVALID_PARA; + QUERY_CHECK_CODE(code, lino, _end); + } if (colType == TSDB_DATA_TYPE_VARCHAR) { - colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)", + colTypeLen += tsnprintf(varDataVal(colTypeStr) + colTypeLen, colStrBufflen, "(%d)", (int32_t)(schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE)); } else if (colType == TSDB_DATA_TYPE_NCHAR) { - colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)", + colTypeLen += tsnprintf(varDataVal(colTypeStr) + colTypeLen, colStrBufflen, "(%d)", (int32_t)((schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } varDataSetLen(colTypeStr, colTypeLen); @@ -2020,8 +2033,7 @@ static EDealRes getDBNameFromConditionWalker(SNode* pNode, void* pContext) { SValueNode* node = (SValueNode*)pNode; char* dbName = nodesGetValueFromNode(node); - strncpy(pContext, varDataVal(dbName), varDataLen(dbName)); - *((char*)pContext + varDataLen(dbName)) = 0; + tstrncpy((char*)pContext, varDataVal(dbName), TSDB_DB_NAME_LEN); return DEAL_RES_END; // stop walk } default: @@ -2057,11 +2069,11 @@ static int32_t doSysTableScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) getDBNameFromCondition(pInfo->pCondition, dbName); if (strncasecmp(name, TSDB_INS_TABLE_COMPACTS, TSDB_TABLE_FNAME_LEN) != 0 && strncasecmp(name, TSDB_INS_TABLE_COMPACT_DETAILS, TSDB_TABLE_FNAME_LEN) != 0) { - sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName); + TAOS_UNUSED(tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName)); } } else if (strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0) { getDBNameFromCondition(pInfo->pCondition, dbName); - if (dbName[0]) sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName); + if (dbName[0]) TAOS_UNUSED(tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName)); (void)sysTableIsCondOnOneTable(pInfo->pCondition, pInfo->req.filterTb); } diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index cdfbd7a8502..190b327522c 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -39,22 +39,27 @@ static int32_t doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey); static void setNotFillColumn(SFillInfo* pFillInfo, SColumnInfoData* pDstColInfo, int32_t rowIndex, int32_t colIdx) { - SRowVal* p = NULL; - if (pFillInfo->type == TSDB_FILL_NEXT) { - p = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->next : &pFillInfo->prev; + SFillColInfo* pCol = &pFillInfo->pFillCol[colIdx]; + if (pCol->fillNull) { + colDataSetNULL(pDstColInfo, rowIndex); } else { - p = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->prev : &pFillInfo->next; - } + SRowVal* p = NULL; + if (pFillInfo->type == TSDB_FILL_NEXT) { + p = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->next : &pFillInfo->prev; + } else { + p = FILL_IS_ASC_FILL(pFillInfo) ? &pFillInfo->prev : &pFillInfo->next; + } - SGroupKeys* pKey = taosArrayGet(p->pRowVal, colIdx); - if (!pKey) { - qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno)); - T_LONG_JMP(pFillInfo->pTaskInfo->env, terrno); - } - int32_t code = doSetVal(pDstColInfo, rowIndex, pKey); - if (code != TSDB_CODE_SUCCESS) { - qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); - T_LONG_JMP(pFillInfo->pTaskInfo->env, code); + SGroupKeys* pKey = taosArrayGet(p->pRowVal, colIdx); + if (!pKey) { + qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno)); + T_LONG_JMP(pFillInfo->pTaskInfo->env, terrno); + } + int32_t code = doSetVal(pDstColInfo, rowIndex, pKey); + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + T_LONG_JMP(pFillInfo->pTaskInfo->env, code); + } } } @@ -545,9 +550,10 @@ static int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) { return pFillInfo->numOfRows - pFillInfo->index; } -int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity, - SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t primaryTsSlotId, - int32_t order, const char* id, SExecTaskInfo* pTaskInfo, SFillInfo** ppFillInfo) { +int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t fillNullCols, + int32_t capacity, SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, + int32_t primaryTsSlotId, int32_t order, const char* id, SExecTaskInfo* pTaskInfo, + SFillInfo** ppFillInfo) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; if (fillType == TSDB_FILL_NONE) { @@ -574,7 +580,7 @@ int32_t taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFi pFillInfo->type = fillType; pFillInfo->pFillCol = pCol; - pFillInfo->numOfCols = numOfFillCols + numOfNotFillCols; + pFillInfo->numOfCols = numOfFillCols + numOfNotFillCols + fillNullCols; pFillInfo->alloc = capacity; pFillInfo->id = id; pFillInfo->interval = *pInterval; @@ -761,10 +767,11 @@ int32_t taosFillResultDataBlock(SFillInfo* pFillInfo, SSDataBlock* p, int32_t ca int64_t getFillInfoStart(struct SFillInfo* pFillInfo) { return pFillInfo->start; } SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr, - int32_t numOfNoFillExpr, const struct SNodeListNode* pValNode) { + int32_t numOfNoFillExpr, SExprInfo* pFillNullExpr, int32_t numOfFillNullExpr, + const struct SNodeListNode* pValNode) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; - SFillColInfo* pFillCol = taosMemoryCalloc(numOfFillExpr + numOfNoFillExpr, sizeof(SFillColInfo)); + SFillColInfo* pFillCol = taosMemoryCalloc(numOfFillExpr + numOfNoFillExpr + numOfFillNullExpr, sizeof(SFillColInfo)); if (pFillCol == NULL) { return NULL; } @@ -797,6 +804,13 @@ SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprIn pFillCol[i + numOfFillExpr].notFillCol = true; } + for (int32_t i = 0; i < numOfFillNullExpr; ++i) { + SExprInfo* pExprInfo = &pFillNullExpr[i]; + pFillCol[i + numOfFillExpr + numOfNoFillExpr].pExpr = pExprInfo; + pFillCol[i + numOfFillExpr + numOfNoFillExpr].notFillCol = true; + pFillCol[i + numOfFillExpr + numOfNoFillExpr].fillNull = true; + } + return pFillCol; _end: diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 2ea300ace89..f77aa8f34a5 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -42,7 +42,7 @@ typedef struct STimeSliceOperatorInfo { SRowKey prevKey; bool prevTsSet; uint64_t groupId; - SGroupKeys* pPrevGroupKey; + SArray* pPrevGroupKeys; SSDataBlock* pNextGroupRes; SSDataBlock* pRemainRes; // save block unfinished processing int32_t remainIndex; // the remaining index in the block to be processed @@ -169,12 +169,12 @@ static FORCE_INLINE int32_t timeSliceEnsureBlockCapacity(STimeSliceOperatorInfo* return TSDB_CODE_SUCCESS; } -static bool isIrowtsPseudoColumn(SExprInfo* pExprInfo) { +bool isIrowtsPseudoColumn(SExprInfo* pExprInfo) { char* name = pExprInfo->pExpr->_function.functionName; return (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type) && strcasecmp(name, "_irowts") == 0); } -static bool isIsfilledPseudoColumn(SExprInfo* pExprInfo) { +bool isIsfilledPseudoColumn(SExprInfo* pExprInfo) { char* name = pExprInfo->pExpr->_function.functionName; return (IS_BOOLEAN_TYPE(pExprInfo->base.resSchema.type) && strcasecmp(name, "_isfilled") == 0); } @@ -224,7 +224,7 @@ static bool checkDuplicateTimestamps(STimeSliceOperatorInfo* pSliceInfo, SColumn return false; } -static bool isInterpFunc(SExprInfo* pExprInfo) { +bool isInterpFunc(SExprInfo* pExprInfo) { int32_t functionType = pExprInfo->pExpr->_function.functionType; return (functionType == FUNCTION_TYPE_INTERP); } @@ -239,7 +239,7 @@ static bool isSelectGroupConstValueFunc(SExprInfo* pExprInfo) { return (functionType == FUNCTION_TYPE_GROUP_CONST_VALUE); } -static bool getIgoreNullRes(SExprSupp* pExprSup) { +bool getIgoreNullRes(SExprSupp* pExprSup) { for (int32_t i = 0; i < pExprSup->numOfExprs; ++i) { SExprInfo* pExprInfo = &pExprSup->pExprInfo[i]; @@ -256,7 +256,7 @@ static bool getIgoreNullRes(SExprSupp* pExprSup) { return false; } -static bool checkNullRow(SExprSupp* pExprSup, SSDataBlock* pSrcBlock, int32_t index, bool ignoreNull) { +bool checkNullRow(SExprSupp* pExprSup, SSDataBlock* pSrcBlock, int32_t index, bool ignoreNull) { if (!ignoreNull) { return false; } @@ -288,6 +288,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp // output the result int32_t fillColIndex = 0; + int32_t groupKeyIndex = 0; bool hasInterp = true; for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) { SExprInfo* pExprInfo = &pExprSup->pExprInfo[j]; @@ -320,7 +321,9 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp QUERY_CHECK_CODE(code, lino, _end); } else if (!isSelectGroupConstValueFunc(pExprInfo)) { // use stored group key - SGroupKeys* pkey = pSliceInfo->pPrevGroupKey; + SGroupKeys *pkey = taosArrayGet(pSliceInfo->pPrevGroupKeys, groupKeyIndex); + QUERY_CHECK_NULL(pkey, code, lino, _end, terrno); + groupKeyIndex++; if (pkey->isNull == false) { code = colDataSetVal(pDst, rows, pkey->pData, false); QUERY_CHECK_CODE(code, lino, _end); @@ -645,13 +648,20 @@ static int32_t initFillLinearInfo(STimeSliceOperatorInfo* pInfo, SSDataBlock* pB return code; } +static void destroyGroupKey(void* pKey) { + SGroupKeys* key = (SGroupKeys*)pKey; + if (key->pData != NULL) { + taosMemoryFreeClear(key->pData); + } +} + static int32_t initGroupKeyKeeper(STimeSliceOperatorInfo* pInfo, SExprSupp* pExprSup) { - if (pInfo->pPrevGroupKey != NULL) { + if (pInfo->pPrevGroupKeys != NULL) { return TSDB_CODE_SUCCESS; } - pInfo->pPrevGroupKey = taosMemoryCalloc(1, sizeof(SGroupKeys)); - if (pInfo->pPrevGroupKey == NULL) { + pInfo->pPrevGroupKeys = taosArrayInit(pExprSup->numOfExprs, sizeof(SGroupKeys)); + if (pInfo->pPrevGroupKeys == NULL) { return terrno; } @@ -659,11 +669,19 @@ static int32_t initGroupKeyKeeper(STimeSliceOperatorInfo* pInfo, SExprSupp* pExp SExprInfo* pExprInfo = &pExprSup->pExprInfo[i]; if (isGroupKeyFunc(pExprInfo)) { - pInfo->pPrevGroupKey->bytes = pExprInfo->base.resSchema.bytes; - pInfo->pPrevGroupKey->type = pExprInfo->base.resSchema.type; - pInfo->pPrevGroupKey->isNull = false; - pInfo->pPrevGroupKey->pData = taosMemoryCalloc(1, pInfo->pPrevGroupKey->bytes); - if (!pInfo->pPrevGroupKey->pData) { + SGroupKeys key = {.bytes = pExprInfo->base.resSchema.bytes, + .type = pExprInfo->base.resSchema.type, + .isNull = false, + .pData = taosMemoryCalloc(1, pExprInfo->base.resSchema.bytes)}; + if (!key.pData) { + taosArrayDestroyEx(pInfo->pPrevGroupKeys, destroyGroupKey); + pInfo->pPrevGroupKeys = NULL; + return terrno; + } + if (NULL == taosArrayPush(pInfo->pPrevGroupKeys, &key)) { + taosMemoryFree(key.pData); + taosArrayDestroyEx(pInfo->pPrevGroupKeys, destroyGroupKey); + pInfo->pPrevGroupKeys = NULL; return terrno; } } @@ -910,7 +928,7 @@ static void genInterpAfterDataBlock(STimeSliceOperatorInfo* pSliceInfo, SOperato SInterval* pInterval = &pSliceInfo->interval; if (pSliceInfo->fillType == TSDB_FILL_NEXT || pSliceInfo->fillType == TSDB_FILL_LINEAR || - pSliceInfo->pPrevGroupKey == NULL) { + pSliceInfo->pPrevGroupKeys == NULL) { return; } @@ -921,12 +939,18 @@ static void genInterpAfterDataBlock(STimeSliceOperatorInfo* pSliceInfo, SOperato } } -static void copyPrevGroupKey(SExprSupp* pExprSup, SGroupKeys* pGroupKey, SSDataBlock* pSrcBlock) { +static int32_t copyPrevGroupKey(SExprSupp* pExprSup, SArray * pGroupKeys, SSDataBlock* pSrcBlock) { + int32_t groupKeyIdx = 0; for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) { SExprInfo* pExprInfo = &pExprSup->pExprInfo[j]; if (isGroupKeyFunc(pExprInfo)) { int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId; + SGroupKeys *pGroupKey = taosArrayGet(pGroupKeys, groupKeyIdx); + if (pGroupKey == NULL) { + return terrno; + } + groupKeyIdx++; SColumnInfoData* pSrc = taosArrayGet(pSrcBlock->pDataBlock, srcSlot); if (colDataIsNull_s(pSrc, 0)) { @@ -942,9 +966,9 @@ static void copyPrevGroupKey(SExprSupp* pExprSup, SGroupKeys* pGroupKey, SSDataB } pGroupKey->isNull = false; - break; } } + return TSDB_CODE_SUCCESS; } static void resetTimesliceInfo(STimeSliceOperatorInfo* pSliceInfo) { @@ -986,7 +1010,11 @@ static void doHandleTimeslice(SOperatorInfo* pOperator, SSDataBlock* pBlock) { T_LONG_JMP(pTaskInfo->env, code); } doTimesliceImpl(pOperator, pSliceInfo, pBlock, pTaskInfo, ignoreNull); - copyPrevGroupKey(&pOperator->exprSupp, pSliceInfo->pPrevGroupKey, pBlock); + code = copyPrevGroupKey(&pOperator->exprSupp, pSliceInfo->pPrevGroupKeys, pBlock); + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + T_LONG_JMP(pTaskInfo->env, code); + } } static int32_t doTimesliceNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { @@ -1147,7 +1175,8 @@ int32_t createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyN pInfo->fillType = convertFillType(pInterpPhyNode->fillMode); initResultSizeInfo(&pOperator->resultInfo, 4096); - pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfExprs, NULL, 0, (SNodeListNode*)pInterpPhyNode->pFillValues); + pInfo->pFillColInfo = + createFillColInfo(pExprInfo, numOfExprs, NULL, 0, NULL, 0, (SNodeListNode*)pInterpPhyNode->pFillValues); QUERY_CHECK_NULL(pInfo->pFillColInfo, code, lino, _error, terrno); pInfo->pLinearInfo = NULL; @@ -1159,7 +1188,7 @@ int32_t createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyN pInfo->prevTsSet = false; pInfo->prevKey.ts = INT64_MIN; pInfo->groupId = 0; - pInfo->pPrevGroupKey = NULL; + pInfo->pPrevGroupKeys = NULL; pInfo->pNextGroupRes = NULL; pInfo->pRemainRes = NULL; pInfo->remainIndex = 0; @@ -1232,9 +1261,9 @@ void destroyTimeSliceOperatorInfo(void* param) { } taosArrayDestroy(pInfo->pLinearInfo); - if (pInfo->pPrevGroupKey) { - taosMemoryFree(pInfo->pPrevGroupKey->pData); - taosMemoryFree(pInfo->pPrevGroupKey); + if (pInfo->pPrevGroupKeys) { + taosArrayDestroyEx(pInfo->pPrevGroupKeys, destroyGroupKey); + pInfo->pPrevGroupKeys = NULL; } if (pInfo->hasPk && IS_VAR_DATA_TYPE(pInfo->pkCol.type)) { taosMemoryFreeClear(pInfo->prevKey.pks[0].pData); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 5d45db631a5..429cc3dd888 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -38,6 +38,7 @@ typedef struct SSessionAggOperatorInfo { int32_t tsSlotId; // primary timestamp slot id STimeWindowAggSupp twAggSup; SOperatorInfo* pOperator; + bool cleanGroupResInfo; } SSessionAggOperatorInfo; typedef struct SStateWindowOperatorInfo { @@ -52,6 +53,7 @@ typedef struct SStateWindowOperatorInfo { int32_t tsSlotId; // primary timestamp column slot id STimeWindowAggSupp twAggSup; SOperatorInfo* pOperator; + bool cleanGroupResInfo; } SStateWindowOperatorInfo; typedef enum SResultTsInterpType { @@ -943,6 +945,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { int32_t scanFlag = MAIN_SCAN; int64_t st = taosGetTimestampUs(); + pInfo->cleanGroupResInfo = false; while (1) { SSDataBlock* pBlock = getNextBlockFromDownstream(pOperator, 0); if (pBlock == NULL) { @@ -965,6 +968,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { code = initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->binfo.outputTsOrder); QUERY_CHECK_CODE(code, lino, _end); + pInfo->cleanGroupResInfo = true; OPTR_SET_OPENED(pOperator); @@ -1092,6 +1096,7 @@ static int32_t openStateWindowAggOptr(SOperatorInfo* pOperator) { int64_t st = taosGetTimestampUs(); SOperatorInfo* downstream = pOperator->pDownstream[0]; + pInfo->cleanGroupResInfo = false; while (1) { SSDataBlock* pBlock = getNextBlockFromDownstream(pOperator, 0); if (pBlock == NULL) { @@ -1120,7 +1125,7 @@ static int32_t openStateWindowAggOptr(SOperatorInfo* pOperator) { pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; code = initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC); QUERY_CHECK_CODE(code, lino, _end); - + pInfo->cleanGroupResInfo = true; pOperator->status = OP_RES_TO_RETURN; _end: @@ -1230,8 +1235,8 @@ static void destroyStateWindowOperatorInfo(void* param) { cleanupBasicInfo(&pInfo->binfo); taosMemoryFreeClear(pInfo->stateKey.pData); if (pInfo->pOperator) { - cleanupResultInfoWithoutHash(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, - &pInfo->groupResInfo); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup, + pInfo->cleanGroupResInfo); pInfo->pOperator = NULL; } @@ -1257,8 +1262,8 @@ void destroyIntervalOperatorInfo(void* param) { cleanupBasicInfo(&pInfo->binfo); if (pInfo->pOperator) { - cleanupResultInfoWithoutHash(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, - &pInfo->groupResInfo); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup, + pInfo->cleanGroupResInfo); pInfo->pOperator = NULL; } @@ -1452,6 +1457,7 @@ int32_t createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode } pInfo->pOperator = pOperator; + pInfo->cleanGroupResInfo = false; initResultRowInfo(&pInfo->binfo.resultRowInfo); setOperatorInfo(pOperator, "TimeIntervalAggOperator", QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL, true, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -1573,6 +1579,7 @@ static int32_t doSessionWindowAggNext(SOperatorInfo* pOperator, SSDataBlock** pp SOptrBasicInfo* pBInfo = &pInfo->binfo; SExprSupp* pSup = &pOperator->exprSupp; + pInfo->cleanGroupResInfo = false; if (pOperator->status == OP_RES_TO_RETURN) { while (1) { doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); @@ -1628,6 +1635,7 @@ static int32_t doSessionWindowAggNext(SOperatorInfo* pOperator, SSDataBlock** pp code = initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC); QUERY_CHECK_CODE(code, lino, _end); + pInfo->cleanGroupResInfo = true; code = blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity); QUERY_CHECK_CODE(code, lino, _end); @@ -1731,6 +1739,7 @@ int32_t createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhy pInfo->tsSlotId = tsSlotId; pInfo->pOperator = pOperator; + pInfo->cleanGroupResInfo = false; setOperatorInfo(pOperator, "StateWindowOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE, true, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->fpSet = createOperatorFpSet(openStateWindowAggOptr, doStateWindowAggNext, NULL, destroyStateWindowOperatorInfo, @@ -1763,8 +1772,8 @@ void destroySWindowOperatorInfo(void* param) { cleanupBasicInfo(&pInfo->binfo); colDataDestroy(&pInfo->twAggSup.timeWindowData); if (pInfo->pOperator) { - cleanupResultInfoWithoutHash(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, - &pInfo->groupResInfo); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup, + pInfo->cleanGroupResInfo); pInfo->pOperator = NULL; } @@ -1835,6 +1844,7 @@ int32_t createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPh QUERY_CHECK_CODE(code, lino, _error); pInfo->pOperator = pOperator; + pInfo->cleanGroupResInfo = false; setOperatorInfo(pOperator, "SessionWindowAggOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION, true, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doSessionWindowAggNext, NULL, destroySWindowOperatorInfo, @@ -2018,7 +2028,12 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) { cleanupAfterGroupResultGen(pMiaInfo, pRes); code = doFilter(pRes, pOperator->exprSupp.pFilterInfo, NULL); QUERY_CHECK_CODE(code, lino, _end); - break; + if (pRes->info.rows == 0) { + // After filtering for last group, the result is empty, so we need to continue to process next group + continue; + } else { + break; + } } else { // continue pRes->info.id.groupId = pMiaInfo->groupId; diff --git a/source/libs/executor/test/CMakeLists.txt b/source/libs/executor/test/CMakeLists.txt index ebf7131aa5e..c75de23c32b 100644 --- a/source/libs/executor/test/CMakeLists.txt +++ b/source/libs/executor/test/CMakeLists.txt @@ -2,23 +2,22 @@ MESSAGE(STATUS "build parser unit test") # IF(NOT TD_DARWIN) -# # GoogleTest requires at least C++11 -# SET(CMAKE_CXX_STANDARD 11) -# AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) -# -# ADD_EXECUTABLE(executorTest ${SOURCE_LIST}) -# TARGET_LINK_LIBRARIES( -# executorTest -# PRIVATE os util common transport gtest taos_static qcom executor function planner scalar nodes vnode -# ) -# -# TARGET_INCLUDE_DIRECTORIES( -# executorTest -# PUBLIC "${TD_SOURCE_DIR}/include/libs/executor/" -# PRIVATE "${TD_SOURCE_DIR}/source/libs/executor/inc" -# ) +# # GoogleTest requires at least C++11 +# SET(CMAKE_CXX_STANDARD 11) +# AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) +# +# ADD_EXECUTABLE(executorTest ${SOURCE_LIST}) +# TARGET_LINK_LIBRARIES( +# executorTest +# PRIVATE os util common transport gtest taos_static qcom executor function planner scalar nodes vnode +# ) +# +# TARGET_INCLUDE_DIRECTORIES( +# executorTest +# PUBLIC "${TD_SOURCE_DIR}/include/libs/executor/" +# PRIVATE "${TD_SOURCE_DIR}/source/libs/executor/inc" +# ) # ENDIF () - SET(CMAKE_CXX_STANDARD 11) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) @@ -33,3 +32,15 @@ TARGET_INCLUDE_DIRECTORIES( PUBLIC "${TD_SOURCE_DIR}/include/common" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) + +ADD_EXECUTABLE(queryPlanTests queryPlanTests.cpp) +TARGET_LINK_LIBRARIES( + queryPlanTests + PRIVATE os util common executor gtest_main qcom function planner scalar nodes vnode +) + +TARGET_INCLUDE_DIRECTORIES( + queryPlanTests + PUBLIC "${TD_SOURCE_DIR}/include/common" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inc" +) diff --git a/source/libs/executor/test/executorTests.cpp b/source/libs/executor/test/executorTests.cpp index ff33732b232..87887d2b2f0 100644 --- a/source/libs/executor/test/executorTests.cpp +++ b/source/libs/executor/test/executorTests.cpp @@ -115,7 +115,7 @@ SSDataBlock* getDummyBlock(SOperatorInfo* pOperator) { int32_t code = colDataSetVal(pColInfo, i, reinterpret_cast(&v), false); ASSERT(code == 0); - // sprintf(buf, "this is %d row", i); + // tsnprintf(buf, "this is %d row", i); // STR_TO_VARSTR(b1, buf); // // SColumnInfoData* pColInfo2 = static_cast(TARRAY_GET_ELEM(pBlock->pDataBlock, 1)); @@ -179,7 +179,7 @@ SSDataBlock* get2ColsDummyBlock(SOperatorInfo* pOperator) { code = colDataSetVal(pColInfo1, i, reinterpret_cast(&v), false); ASSERT(code == 0); - // sprintf(buf, "this is %d row", i); + // tsnprintf(buf, "this is %d row", i); // STR_TO_VARSTR(b1, buf); // // SColumnInfoData* pColInfo2 = static_cast(TARRAY_GET_ELEM(pBlock->pDataBlock, 1)); diff --git a/source/libs/executor/test/lhashTests.cpp b/source/libs/executor/test/lhashTests.cpp index daf59c6058c..89e1cd2b079 100644 --- a/source/libs/executor/test/lhashTests.cpp +++ b/source/libs/executor/test/lhashTests.cpp @@ -26,7 +26,7 @@ TEST(testCase, linear_hash_Tests) { taosSeedRand(taosGetTimestampSec()); - strcpy(tsTempDir, "/tmp/"); + tstrncpy((char*)tsTempDir, "/tmp/", sizeof(tsTempDir)); _hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT); diff --git a/source/libs/executor/test/queryPlanTests.cpp b/source/libs/executor/test/queryPlanTests.cpp new file mode 100755 index 00000000000..6710435aba5 --- /dev/null +++ b/source/libs/executor/test/queryPlanTests.cpp @@ -0,0 +1,3414 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wwrite-strings" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wformat" +#include + + +#ifdef WINDOWS +#define TD_USE_WINSOCK +#endif + +#include "os.h" + +#include "executor.h" +#include "executorInt.h" +#include "function.h" +#include "operator.h" +#include "taos.h" +#include "tdatablock.h" +#include "tdef.h" +#include "tvariant.h" +#include "stub.h" +#include "querytask.h" +#include "functionMgt.h" +#include "ttime.h" +#include "scheduler.h" + +namespace { + +#define QPT_MAX_LOOP 100000 +#define QPT_MAX_LEVEL_SUBPLAN_NUM 10 +#define QPT_MAX_SUBPLAN_LEVEL 2 +#define QPT_MAX_SUBPLAN_GROUP 5 +#define QPT_MAX_WHEN_THEN_NUM 10 +#define QPT_MAX_NODE_LEVEL 5 +#define QPT_MAX_STRING_LEN 1048576 +#define QPT_MAX_FUNC_PARAM 5 +#define QPT_MAX_LOGIC_PARAM 5 +#define QPT_MAX_NODE_LIST_NUM 5 +#define QPT_DEFAULT_VNODE_NUM 5 +#define QPT_MAX_DS_SRC_NUM 10 +#define QPT_MAX_ORDER_BY_NUM 10 +#define QPT_MAX_COLUMN_NUM 6 //8192 + +#define QPT_QUERY_NODE_COL 10000 + +typedef enum { + QPT_NODE_COLUMN, + QPT_NODE_EXPR, + QPT_NODE_FUNCTION, + QPT_NODE_VALUE, + QPT_NODE_SUBPLAN, + QPT_NODE_MAX_VALUE +} QPT_NODE_TYPE; + +enum { + QPT_PLAN_PHYSIC = 1, + QPT_PLAN_SINK, + QPT_PLAN_SUBPLAN, + QPT_PLAN_PLAN +}; + +typedef SNode* (*planBuildFunc)(int32_t); + + +typedef struct { + ENodeType type; + void* param; +} SQPTNodeParam; + +typedef struct { + bool singlePhysiNode; + uint64_t queryId; + uint64_t taskId; + int32_t subplanMaxLevel; + int32_t subplanType[QPT_MAX_SUBPLAN_LEVEL]; + int32_t subplanIdx[QPT_MAX_SUBPLAN_LEVEL]; + int32_t physiNodeParamNum; + SQPTNodeParam* physicNodeParam; +} SQPTPlanParam; + +typedef struct { + uint8_t precision; + char dbName[TSDB_DB_NAME_LEN]; +} SQPTDbParam; + + +typedef struct { + int32_t vnodeNum; + int32_t vgId; + SEpSet epSet; +} SQPTVnodeParam; + +typedef struct { + int32_t type; + char name[TSDB_COL_NAME_LEN]; + int32_t dtype; + int32_t len; + int8_t inUse; + bool hasIndex; + bool isPrimTs; + bool isPk; + EColumnType colType; +} SQPTCol; + +typedef struct { + int64_t uid; + int64_t suid; + int8_t tblType; + int32_t colNum; + int32_t tagNum; + int16_t pkNum; + char tblName[TSDB_TABLE_NAME_LEN]; + char tblAlias[TSDB_TABLE_NAME_LEN]; + SNodeList* pColList; + SNodeList* pTagList; + SNodeList* pColTagList; +} SQPTTblParam; + + +typedef struct { + bool correctExpected; + uint64_t schedulerId; + char userName[TSDB_USER_LEN]; + SQPTPlanParam plan; + SQPTDbParam db; + SQPTVnodeParam vnode; + SQPTTblParam tbl; +} SQPTParam; + + +typedef struct { + SPhysiNode* pCurr; + int32_t childrenNum; + SPhysiNode* pChild; // current child + SPhysiNode* pLeftChild; + SPhysiNode* pRightChild; + EOrder currTsOrder; + int16_t nextBlockId; + int32_t primaryTsSlotId; + int32_t nextSubplanId; + SExecTaskInfo* pCurrTask; +} SQPTBuildPlanCtx; + +typedef struct { + int32_t nodeLevel; + bool fromTable; + bool onlyTag; + bool onlyCol; + int16_t inputBlockId; + SNodeList* pInputList; +} SQPTMakeNodeCtx; + +typedef struct { + int64_t startTsUs; + int32_t code; + int64_t succeedTimes; + int64_t failedTimes; +} SQPTExecResult; + +typedef struct { + int32_t loopIdx; + char caseName[128]; + SQPTParam param; + SQPTBuildPlanCtx buildCtx; + SQPTMakeNodeCtx makeCtx; + SQPTMakeNodeCtx makeCtxBak; + SQPTExecResult result; +} SQPTCtx; + +typedef struct { + bool printTestInfo; + bool printInputRow; + bool printResRow; + bool logHistory; + bool noKeepResRows; +} SQPTCtrl; + +typedef struct { + int32_t type; + int32_t classify; + char* name; + planBuildFunc buildFunc; +} SQPTPlan; + +SNode* qptCreateTagScanPhysiNode(int32_t nodeType); +SNode* qptCreateTableScanPhysiNode(int32_t nodeType); +SNode* qptCreateTableSeqScanPhysiNode(int32_t nodeType); +SNode* qptCreateTableMergeScanPhysiNode(int32_t nodeType); +SNode* qptCreateStreamScanPhysiNode(int32_t nodeType); +SNode* qptCreateSysTableScanPhysiNode(int32_t nodeType); +SNode* qptCreateBlockDistScanPhysiNode(int32_t nodeType); +SNode* qptCreateLastRowScanPhysiNode(int32_t nodeType); +SNode* qptCreateTableCountScanPhysiNode(int32_t nodeType); + +SNode* qptCreateProjectPhysiNode(int32_t nodeType); +SNode* qptCreateMergeJoinPhysiNode(int32_t nodeType); +SNode* qptCreateHashAggPhysiNode(int32_t nodeType); +SNode* qptCreateExchangePhysiNode(int32_t nodeType); +SNode* qptCreateMergePhysiNode(int32_t nodeType); +SNode* qptCreateSortPhysiNode(int32_t nodeType); +SNode* qptCreateGroupSortPhysiNode(int32_t nodeType); +SNode* qptCreateIntervalPhysiNode(int32_t nodeType); +SNode* qptCreateMergeIntervalPhysiNode(int32_t nodeType); +SNode* qptCreateMergeAlignedIntervalPhysiNode(int32_t nodeType); +SNode* qptCreateStreamIntervalPhysiNode(int32_t nodeType); +SNode* qptCreateStreamFinalIntervalPhysiNode(int32_t nodeType); +SNode* qptCreateStreamSemiIntervalPhysiNode(int32_t nodeType); +SNode* qptCreateStreamMidIntervalPhysiNode(int32_t nodeType); +SNode* qptCreateFillPhysiNode(int32_t nodeType); +SNode* qptCreateStreamFillPhysiNode(int32_t nodeType); +SNode* qptCreateSessionPhysiNode(int32_t nodeType); +SNode* qptCreateStreamSessionPhysiNode(int32_t nodeType); +SNode* qptCreateStreamSemiSessionPhysiNode(int32_t nodeType); +SNode* qptCreateStreamFinalSessionPhysiNode(int32_t nodeType); +SNode* qptCreateStateWindowPhysiNode(int32_t nodeType); +SNode* qptCreateStreamStatePhysiNode(int32_t nodeType); +SNode* qptCreatePartitionPhysiNode(int32_t nodeType); +SNode* qptCreateStreamPartitionPhysiNode(int32_t nodeType); +SNode* qptCreateIndefRowsFuncPhysiNode(int32_t nodeType); +SNode* qptCreateInterpFuncPhysiNode(int32_t nodeType); +SNode* qptCreateMergeEventPhysiNode(int32_t nodeType); +SNode* qptCreateStreamEventPhysiNode(int32_t nodeType); +SNode* qptCreateCountWindowPhysiNode(int32_t nodeType); +SNode* qptCreateStreamCountWindowPhysiNode(int32_t nodeType); +SNode* qptCreateHashJoinPhysiNode(int32_t nodeType); +SNode* qptCreateGroupCachePhysiNode(int32_t nodeType); +SNode* qptCreateDynQueryCtrlPhysiNode(int32_t nodeType); +SNode* qptCreateDataDispatchPhysiNode(int32_t nodeType); +SNode* qptCreateDataInsertPhysiNode(int32_t nodeType); +SNode* qptCreateDataQueryInsertPhysiNode(int32_t nodeType); +SNode* qptCreateDataDeletePhysiNode(int32_t nodeType); +SNode* qptCreatePhysicalPlanNode(int32_t nodeIdx); +void qptCreatePhysiNodesTree(SPhysiNode** ppRes, SPhysiNode* pParent, int32_t level); +SNode* qptCreateQueryPlanNode(int32_t nodeType); +SNode* qptCreateSubplanNode(int32_t nodeType); + + +SQPTPlan qptPlans[] = { + {QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN, QPT_PLAN_PHYSIC, "tagScan", qptCreateTagScanPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN, QPT_PLAN_PHYSIC, "tableScan", qptCreateTableScanPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN, QPT_PLAN_PHYSIC, "tableSeqScan", qptCreateTableSeqScanPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN, QPT_PLAN_PHYSIC, "tableMergeScan", qptCreateTableMergeScanPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, QPT_PLAN_PHYSIC, "streamScan", qptCreateStreamScanPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN, QPT_PLAN_PHYSIC, "sysTableScan", qptCreateSysTableScanPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN, QPT_PLAN_PHYSIC, "blockDistScan", qptCreateBlockDistScanPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN, QPT_PLAN_PHYSIC, "lastRowScan", qptCreateLastRowScanPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_PROJECT, QPT_PLAN_PHYSIC, "project", qptCreateProjectPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN, QPT_PLAN_PHYSIC, "mergeJoin", qptCreateMergeJoinPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_HASH_AGG, QPT_PLAN_PHYSIC, "hashAgg", qptCreateHashAggPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_EXCHANGE, QPT_PLAN_PHYSIC, "exchange", qptCreateExchangePhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_MERGE, QPT_PLAN_PHYSIC, "merge", qptCreateMergePhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_SORT, QPT_PLAN_PHYSIC, "sort", qptCreateSortPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT, QPT_PLAN_PHYSIC, "groupSort", qptCreateGroupSortPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL, QPT_PLAN_PHYSIC, "interval", qptCreateIntervalPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL, QPT_PLAN_PHYSIC, "mergeInterval", qptCreateMergeIntervalPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL, QPT_PLAN_PHYSIC, "mergeAlignedInterval", qptCreateMergeAlignedIntervalPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, QPT_PLAN_PHYSIC, "streamInterval", qptCreateStreamIntervalPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL, QPT_PLAN_PHYSIC, "streamFinalInterval", qptCreateStreamFinalIntervalPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL, QPT_PLAN_PHYSIC, "streamSemiInterval", qptCreateStreamSemiIntervalPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_FILL, QPT_PLAN_PHYSIC, "fill", qptCreateFillPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL, QPT_PLAN_PHYSIC, "streamFill", qptCreateStreamFillPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION, QPT_PLAN_PHYSIC, "sessionWindow", qptCreateSessionPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION, QPT_PLAN_PHYSIC, "streamSession", qptCreateStreamSessionPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION, QPT_PLAN_PHYSIC, "streamSemiSession", qptCreateStreamSemiSessionPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION, QPT_PLAN_PHYSIC, "streamFinalSession", qptCreateStreamFinalSessionPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE, QPT_PLAN_PHYSIC, "stateWindow", qptCreateStateWindowPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, QPT_PLAN_PHYSIC, "streamState", qptCreateStreamStatePhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_PARTITION, QPT_PLAN_PHYSIC, "partition", qptCreatePartitionPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION, QPT_PLAN_PHYSIC, "streamPartition", qptCreateStreamPartitionPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC, QPT_PLAN_PHYSIC, "indefRowsFunc", qptCreateIndefRowsFuncPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC, QPT_PLAN_PHYSIC, "interpFunc", qptCreateInterpFuncPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_DISPATCH, QPT_PLAN_SINK, "dataDispatch", qptCreateDataDispatchPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_INSERT, QPT_PLAN_SINK, "dataInseret", qptCreateDataInsertPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT, QPT_PLAN_SINK, "dataQueryInsert", qptCreateDataQueryInsertPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_DELETE, QPT_PLAN_SINK, "dataDelete", qptCreateDataDeletePhysiNode}, + {QUERY_NODE_PHYSICAL_SUBPLAN, QPT_PLAN_SUBPLAN, "subplan", qptCreateSubplanNode}, + {QUERY_NODE_PHYSICAL_PLAN, QPT_PLAN_PLAN, "plan", qptCreateQueryPlanNode}, + {QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN, QPT_PLAN_PHYSIC, "tableCountScan", qptCreateTableCountScanPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT, QPT_PLAN_PHYSIC, "eventWindow", qptCreateMergeEventPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT, QPT_PLAN_PHYSIC, "streamEventWindow", qptCreateStreamEventPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN, QPT_PLAN_PHYSIC, "hashJoin", qptCreateHashJoinPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE, QPT_PLAN_PHYSIC, "groupCache", qptCreateGroupCachePhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL, QPT_PLAN_PHYSIC, "dynQueryCtrl", qptCreateDynQueryCtrlPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT, QPT_PLAN_PHYSIC, "countWindow", qptCreateCountWindowPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT, QPT_PLAN_PHYSIC, "streamCountWindow", qptCreateStreamCountWindowPhysiNode}, + {QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL, QPT_PLAN_PHYSIC, "streamMidInterval", qptCreateStreamMidIntervalPhysiNode} +}; + + +#define QPT_PHYSIC_NODE_NUM() (sizeof(qptPlans)/sizeof(qptPlans[0])) +#define QPT_RAND_BOOL_V ((taosRand() % 2) ? true : false) +#define QPT_RAND_ORDER_V (QPT_RAND_BOOL_V ? ORDER_ASC : ORDER_DESC) +#define QPT_RAND_INT_V (taosRand() * (QPT_RAND_BOOL_V ? 1 : -1)) +#define QPT_LOW_PROB() ((taosRand() % 11) == 0) +#define QPT_MID_PROB() ((taosRand() % 11) <= 1) +#define QPT_HIGH_PROB() ((taosRand() % 11) <= 7) + +#define QPT_CORRECT_HIGH_PROB() (qptCtx.param.correctExpected || QPT_HIGH_PROB()) +#define QPT_NCORRECT_LOW_PROB() (!qptCtx.param.correctExpected && QPT_LOW_PROB()) + +#define QPT_VALID_DESC(_desc) ((_desc) && (QUERY_NODE_DATABLOCK_DESC == nodeType(_desc))) + +SQPTCtx qptCtx = {0}; +SQPTCtrl qptCtrl = {1, 0, 0, 0, 0}; +bool qptErrorRerun = false; +bool qptInRerun = false; +int32_t qptSink[] = {QUERY_NODE_PHYSICAL_PLAN_DISPATCH, QUERY_NODE_PHYSICAL_PLAN_INSERT, QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT, QUERY_NODE_PHYSICAL_PLAN_DELETE}; + + +SNode* qptMakeExprNode(SNode** ppNode); +void qptMakeNodeList(QPT_NODE_TYPE nodeType, SNodeList** ppList); + +int32_t qptGetSpecificPlanIndex(int32_t type) { + int32_t planNum = sizeof(qptPlans) / sizeof(qptPlans[0]); + for (int32_t i = 0; i < planNum; ++i) { + if (qptPlans[i].type == type) { + return i; + } + } + + return -1; +} + +int32_t qptGetColumnRandLen(int32_t colType) { + switch (colType) { + case TSDB_DATA_TYPE_NULL: + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_USMALLINT: + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_UBIGINT: + return tDataTypes[colType].bytes; + case TSDB_DATA_TYPE_VARCHAR: + case TSDB_DATA_TYPE_GEOMETRY: + case TSDB_DATA_TYPE_JSON: + case TSDB_DATA_TYPE_VARBINARY: + case TSDB_DATA_TYPE_DECIMAL: + case TSDB_DATA_TYPE_BLOB: + case TSDB_DATA_TYPE_MEDIUMBLOB: + case TSDB_DATA_TYPE_NCHAR: + return taosRand() % TSDB_MAX_BINARY_LEN; + default: + assert(0); + break; + } + + return 0; +} + + +void qptInitSingleTableCol(SQPTCol* pCol, int32_t idx, EColumnType colType) { + if (COLUMN_TYPE_COLUMN == colType && 0 == idx) { + sprintf(pCol->name, "primts%d", idx); + pCol->dtype = TSDB_DATA_TYPE_TIMESTAMP; + pCol->len = qptGetColumnRandLen(pCol->dtype); + pCol->inUse = 0; + pCol->hasIndex = false; + pCol->isPrimTs = true; + pCol->isPk = false; + pCol->colType = colType; + + return; + } + + sprintf(pCol->name, "%s%d", COLUMN_TYPE_COLUMN == colType ? "col" : "tag", idx); + pCol->dtype = taosRand() % TSDB_DATA_TYPE_MAX; + pCol->len = qptGetColumnRandLen(pCol->dtype); + pCol->inUse = 0; + pCol->hasIndex = COLUMN_TYPE_COLUMN == colType ? false : QPT_RAND_BOOL_V; + pCol->isPrimTs = false; + pCol->isPk = COLUMN_TYPE_COLUMN == colType ? QPT_RAND_BOOL_V : false;; + pCol->colType = colType; +} + + +void qptPrintBeginInfo() { + if (!qptCtrl.printTestInfo) { + return; + } + + printf("\n%dth TEST [%s] START\n", qptCtx.loopIdx, qptCtx.caseName); + +/* + char inputStat[4] = {0}; + JT_PRINTF("\n%dth TEST [%s] START\nBasic Info:\n\t asc:%d\n\t filter:%d\n\t maxRows:left-%d right-%d\n\t " + "maxGrpRows:left-%d right-%d\n\t blkRows:%d\n\t colCond:%s\n\t joinType:%s\n\t " + "subType:%s\n\t inputStat:%s\n\t groupJoin:%s\n", jtCtx.loopIdx, caseName, jtCtx.asc, jtCtx.filter, jtCtx.leftMaxRows, jtCtx.rightMaxRows, + jtCtx.leftMaxGrpRows, jtCtx.rightMaxGrpRows, jtCtx.blkRows, jtColCondStr[jtCtx.colCond], jtJoinTypeStr[jtCtx.joinType], + jtSubTypeStr[jtCtx.subType], getInputStatStr(inputStat), jtCtx.grpJoin ? "true" : "false"); + + if (JOIN_STYPE_ASOF == jtCtx.subType) { + JT_PRINTF("\t asofOp:%s\n\t JLimit:%" PRId64 "\n", getAsofOpStr(), jtCtx.jLimit); + } else if (JOIN_STYPE_WIN == jtCtx.subType) { + JT_PRINTF("\t windowOffset:[%" PRId64 ", %" PRId64 "]\n\t JLimit:%" PRId64 "\n", jtCtx.winStartOffset, jtCtx.winEndOffset, jtCtx.jLimit); + } + + JT_PRINTF("Input Info:\n\t totalBlk:left-%d right-%d\n\t totalRows:left-%d right-%d\n\t " + "blkRowSize:%d\n\t inputCols:left-%s %s %s %s right-%s %s %s %s\n", + (int32_t)taosArrayGetSize(jtCtx.leftBlkList), (int32_t)taosArrayGetSize(jtCtx.rightBlkList), + jtCtx.leftTotalRows, jtCtx.rightTotalRows, + jtCtx.blkRowSize, tDataTypes[jtInputColType[0]].name, tDataTypes[jtInputColType[1]].name, + tDataTypes[jtInputColType[2]].name, tDataTypes[jtInputColType[3]].name, tDataTypes[jtInputColType[0]].name, + tDataTypes[jtInputColType[1]].name, tDataTypes[jtInputColType[2]].name, tDataTypes[jtInputColType[3]].name); + + if (jtCtx.colEqNum) { + JT_PRINTF("\t colEqNum:%d\n", jtCtx.colEqNum); + printColList("colEqList", false, jtCtx.colEqList, false, "="); + } + + if (jtCtx.colOnNum) { + JT_PRINTF("\t colOnNum:%d\n", jtCtx.colOnNum); + printColList("colOnList", false, jtCtx.colOnList, false, ">"); + } + + if (jtCtx.leftFilterNum) { + JT_PRINTF("\t leftFilterNum:%d\n", jtCtx.leftFilterNum); + printColList("leftFilterList", true, jtCtx.leftFilterColList, true, ">"); + } + + if (jtCtx.rightFilterNum) { + JT_PRINTF("\t rightFilterNum:%d\n", jtCtx.rightFilterNum); + printColList("rightFilterList", false, jtCtx.rightFilterColList, true, ">"); + } + + JT_PRINTF("\t resColSize:%d\n\t resColNum:%d\n\t resColList:", jtCtx.resColSize, jtCtx.resColNum); + for (int32_t i = 0; i < jtCtx.resColNum; ++i) { + int32_t s = jtCtx.resColInSlot[i]; + int32_t idx = s >= MAX_SLOT_NUM ? s - MAX_SLOT_NUM : s; + JT_PRINTF("%sc%d[%s]\t", s >= MAX_SLOT_NUM ? "r" : "l", s, tDataTypes[jtInputColType[idx]].name); + } + + if (jtCtrl.printInputRow) { + printInputData(); + } +*/ +} + +void qptPrintEndInfo() { + if (!qptCtrl.printTestInfo) { + return; + } + + printf("\n\t%dth TEST [%s] END, result - %s%s\n", qptCtx.loopIdx, qptCtx.caseName, + (0 == qptCtx.result.code) ? "succeed" : "failed with error:", + (0 == qptCtx.result.code) ? "" : tstrerror(qptCtx.result.code)); +} + +void qptPrintStatInfo() { + printf("\n\tAll %d times TEST [%s] END, result - succeed:%" PRId64 " failed:%" PRId64 "\n", qptCtx.loopIdx + 1, qptCtx.caseName, + qptCtx.result.succeedTimes, qptCtx.result.failedTimes); +} + + +bool qptGetDynamicOp() { + if (QPT_NCORRECT_LOW_PROB()) { + return QPT_RAND_BOOL_V; + } + + if (qptCtx.buildCtx.pChild) { + return qptCtx.buildCtx.pChild->dynamicOp; + } + + return QPT_RAND_BOOL_V; +} + +EOrder qptGetCurrTsOrder() { + return QPT_CORRECT_HIGH_PROB() ? qptCtx.buildCtx.currTsOrder : QPT_RAND_ORDER_V; +} + +void qptGetRandValue(uint8_t* pType, int32_t* pLen, void** ppVal) { + if (*pType == (uint8_t)-1 || QPT_NCORRECT_LOW_PROB()) { + int32_t typeMax = TSDB_DATA_TYPE_MAX; + if (QPT_NCORRECT_LOW_PROB()) { + typeMax++; + } + + *pType = taosRand() % typeMax; + } + + switch (*pType) { + case TSDB_DATA_TYPE_NULL: + *pLen = QPT_CORRECT_HIGH_PROB() ? 0 : taosRand(); + if (ppVal) { + *ppVal = NULL; + } + break; + case TSDB_DATA_TYPE_BOOL: + *pLen = QPT_CORRECT_HIGH_PROB() ? tDataTypes[*pType].bytes : taosRand(); + if (ppVal) { + *ppVal = taosMemoryMalloc(tDataTypes[*pType].bytes); + assert(*ppVal); + *(bool*)*ppVal = QPT_RAND_BOOL_V; + } + break; + case TSDB_DATA_TYPE_TINYINT: + *pLen = QPT_CORRECT_HIGH_PROB() ? tDataTypes[*pType].bytes : taosRand(); + if (ppVal) { + *ppVal = taosMemoryMalloc(tDataTypes[*pType].bytes); + assert(*ppVal); + *(int8_t*)*ppVal = taosRand(); + } + break; + case TSDB_DATA_TYPE_SMALLINT: + *pLen = QPT_CORRECT_HIGH_PROB() ? tDataTypes[*pType].bytes : taosRand(); + if (ppVal) { + *ppVal = taosMemoryMalloc(tDataTypes[*pType].bytes); + assert(*ppVal); + *(int16_t*)*ppVal = taosRand(); + } + break; + case TSDB_DATA_TYPE_INT: + *pLen = QPT_CORRECT_HIGH_PROB() ? tDataTypes[*pType].bytes : taosRand(); + if (ppVal) { + *ppVal = taosMemoryMalloc(tDataTypes[*pType].bytes); + assert(*ppVal); + *(int32_t*)*ppVal = taosRand(); + } + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: + *pLen = QPT_CORRECT_HIGH_PROB() ? tDataTypes[*pType].bytes : taosRand(); + if (ppVal) { + *ppVal = taosMemoryMalloc(tDataTypes[*pType].bytes); + assert(*ppVal); + *(int64_t*)*ppVal = taosRand(); + } + break; + case TSDB_DATA_TYPE_FLOAT: + *pLen = QPT_CORRECT_HIGH_PROB() ? tDataTypes[*pType].bytes : taosRand(); + if (ppVal) { + *ppVal = taosMemoryMalloc(tDataTypes[*pType].bytes); + assert(*ppVal); + *(float*)*ppVal = taosRand(); + } + break; + case TSDB_DATA_TYPE_DOUBLE: + *pLen = QPT_CORRECT_HIGH_PROB() ? tDataTypes[*pType].bytes : taosRand(); + if (ppVal) { + *ppVal = taosMemoryMalloc(tDataTypes[*pType].bytes); + assert(*ppVal); + *(double*)*ppVal = taosRand(); + } + break; + case TSDB_DATA_TYPE_VARCHAR: + case TSDB_DATA_TYPE_GEOMETRY: + case TSDB_DATA_TYPE_JSON: + case TSDB_DATA_TYPE_VARBINARY: + case TSDB_DATA_TYPE_DECIMAL: + case TSDB_DATA_TYPE_BLOB: + case TSDB_DATA_TYPE_MEDIUMBLOB: + *pLen = taosRand() % QPT_MAX_STRING_LEN; + if (ppVal) { + *ppVal = taosMemoryCalloc(1, *pLen + VARSTR_HEADER_SIZE); + assert(*ppVal); + varDataSetLen(*ppVal, *pLen); + memset((char*)*ppVal + VARSTR_HEADER_SIZE, 'A' + taosRand() % 26, *pLen); + } + break; + case TSDB_DATA_TYPE_NCHAR: { + *pLen = taosRand() % QPT_MAX_STRING_LEN; + if (ppVal) { + char* pTmp = (char*)taosMemoryCalloc(1, *pLen + 1); + assert(pTmp); + memset(pTmp, 'A' + taosRand() % 26, *pLen); + *ppVal = taosMemoryCalloc(1, *pLen * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); + assert(*ppVal); + assert(taosMbsToUcs4(pTmp, *pLen, (TdUcs4 *)varDataVal(*ppVal), *pLen * TSDB_NCHAR_SIZE, NULL)); + *pLen *= TSDB_NCHAR_SIZE; + varDataSetLen(*ppVal, *pLen); + taosMemoryFree(pTmp); + } + break; + } + case TSDB_DATA_TYPE_UTINYINT: + *pLen = QPT_CORRECT_HIGH_PROB() ? tDataTypes[*pType].bytes : taosRand(); + if (ppVal) { + *ppVal = taosMemoryMalloc(tDataTypes[*pType].bytes); + assert(*ppVal); + *(uint8_t*)*ppVal = taosRand(); + } + break; + case TSDB_DATA_TYPE_USMALLINT: + *pLen = QPT_CORRECT_HIGH_PROB() ? tDataTypes[*pType].bytes : taosRand(); + if (ppVal) { + *ppVal = taosMemoryMalloc(tDataTypes[*pType].bytes); + assert(*ppVal); + *(uint16_t*)*ppVal = taosRand(); + } + break; + case TSDB_DATA_TYPE_UINT: + *pLen = QPT_CORRECT_HIGH_PROB() ? tDataTypes[*pType].bytes : taosRand(); + if (ppVal) { + *ppVal = taosMemoryMalloc(tDataTypes[*pType].bytes); + assert(*ppVal); + *(uint32_t*)*ppVal = taosRand(); + } + break; + case TSDB_DATA_TYPE_UBIGINT: + *pLen = QPT_CORRECT_HIGH_PROB() ? tDataTypes[*pType].bytes : taosRand(); + if (ppVal) { + *ppVal = taosMemoryMalloc(tDataTypes[*pType].bytes); + assert(*ppVal); + *(uint64_t*)*ppVal = taosRand(); + } + break; + default: + *pLen = taosRand() % QPT_MAX_STRING_LEN; + if (ppVal) { + *ppVal = taosMemoryCalloc(1, *pLen); + assert(*ppVal); + memset((char*)*ppVal, 'a' + taosRand() % 26, *pLen); + } + break; + } +} + +void qptFreeRandValue(int32_t* pType, void* pVal) { + switch (*pType) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_USMALLINT: + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_UBIGINT: + taosMemoryFree(pVal); + break; + case TSDB_DATA_TYPE_NULL: + case TSDB_DATA_TYPE_VARCHAR: + case TSDB_DATA_TYPE_GEOMETRY: + case TSDB_DATA_TYPE_NCHAR: + case TSDB_DATA_TYPE_JSON: + case TSDB_DATA_TYPE_VARBINARY: + case TSDB_DATA_TYPE_DECIMAL: + case TSDB_DATA_TYPE_BLOB: + case TSDB_DATA_TYPE_MEDIUMBLOB: + break; + default: + assert(0); + break; + } +} + +void qptGetRandRealTableType(int8_t* tableType) { + while (true) { + int8_t tType = taosRand() % TSDB_TABLE_MAX; + switch (tType) { + case TSDB_SUPER_TABLE: + case TSDB_CHILD_TABLE: + case TSDB_NORMAL_TABLE: + case TSDB_SYSTEM_TABLE: + *tableType = tType; + return; + default: + break; + } + } +} + +int32_t qptGetInputSlotId(SDataBlockDescNode* pInput) { + if (pInput && pInput->pSlots && pInput->pSlots->length > 0 && QPT_CORRECT_HIGH_PROB()) { + return taosRand() % pInput->pSlots->length; + } + + return taosRand(); +} + +ENullOrder qptGetRandNullOrder() { + if (QPT_NCORRECT_LOW_PROB()) { + return (ENullOrder)taosRand(); + } + + return (ENullOrder)(taosRand() % NULL_ORDER_LAST + 1); +} + +int8_t qptGetRandTimestampUnit() { + static int8_t units[] = {TIME_UNIT_NANOSECOND, TIME_UNIT_MICROSECOND, TIME_UNIT_MILLISECOND, TIME_UNIT_SECOND, + TIME_UNIT_MINUTE, TIME_UNIT_HOUR, TIME_UNIT_DAY, TIME_UNIT_WEEK, TIME_UNIT_MONTH, TIME_UNIT_YEAR}; + + return units[taosRand() % (sizeof(units) / sizeof(units[0]))]; +} + +int32_t qptGetInputPrimaryTsSlotId() { + if (QPT_CORRECT_HIGH_PROB()) { + return qptCtx.buildCtx.primaryTsSlotId; + } + + return taosRand() % QPT_MAX_COLUMN_NUM; +} + +int32_t qptGetRandSubplanMsgType() { + int32_t msgTypeList[] = {TDMT_VND_DELETE, TDMT_SCH_MERGE_QUERY, TDMT_SCH_QUERY, TDMT_VND_SUBMIT}; + + return QPT_CORRECT_HIGH_PROB() ? msgTypeList[taosRand() % (sizeof(msgTypeList)/sizeof(msgTypeList[0]))] : taosRand(); +} + +void qptNodesCalloc(int32_t num, int32_t size, void** pOut) { + void* p = taosMemoryCalloc(num, size); + assert(p); + *(char*)p = 0; + *pOut = (char*)p + 1; +} + +void qptNodesFree(void* pNode) { + void* p = (char*)pNode - 1; + taosMemoryFree(p); +} + +EFillMode qptGetRandFillMode() { + if (QPT_CORRECT_HIGH_PROB()) { + return (EFillMode)(taosRand() % FILL_MODE_NEXT + 1); + } + + return (EFillMode)(taosRand()); +} + + +void qptGetRandTimeWindow(STimeWindow* pWindow) { + if (QPT_CORRECT_HIGH_PROB()) { + pWindow->skey = taosRand(); + pWindow->ekey = pWindow->skey + taosRand(); + return; + } + + pWindow->skey = taosRand(); + pWindow->ekey = taosRand(); +} + +int32_t qptGetSubplanNum(SNodeList* pList) { + if (QPT_NCORRECT_LOW_PROB()) { + return taosRand(); + } + + int32_t subplanNum = 0; + SNode* pNode = NULL; + FOREACH(pNode, pList) { + if (NULL == pNode || QUERY_NODE_NODE_LIST != nodeType(pNode)) { + continue; + } + + SNodeListNode* pNodeListNode = (SNodeListNode*)pNode; + + if (NULL == pNodeListNode->pNodeList) { + continue; + } + + subplanNum += pNodeListNode->pNodeList->length; + } + + return subplanNum; +} + +int32_t qptNodesListAppend(SNodeList* pList, SNode* pNode) { + SListCell* p = NULL; + qptNodesCalloc(1, sizeof(SListCell), (void**)&p); + + p->pNode = pNode; + if (NULL == pList->pHead) { + pList->pHead = p; + } + if (NULL != pList->pTail) { + pList->pTail->pNext = p; + } + p->pPrev = pList->pTail; + pList->pTail = p; + ++(pList->length); + return TSDB_CODE_SUCCESS; +} + +int32_t qptNodesListStrictAppend(SNodeList* pList, SNode* pNode) { + int32_t code = qptNodesListAppend(pList, pNode); + if (TSDB_CODE_SUCCESS != code) { + nodesDestroyNode(pNode); + } + return code; +} + +int32_t qptNodesListMakeStrictAppend(SNodeList** pList, SNode* pNode) { + if (NULL == *pList) { + int32_t code = nodesMakeList(pList); + if (NULL == *pList) { + return code; + } + } + return qptNodesListStrictAppend(*pList, pNode); +} + +SNode* qptMakeRandNode(SNode** ppNode) { + SNode* pNode = NULL; + nodesMakeNode((ENodeType)taosRand(), ppNode ? ppNode : &pNode); + return ppNode ? *ppNode : pNode; +} + + + +SNode* qptMakeColumnFromTable(int32_t colIdx) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(NULL); + } + + if (colIdx < 0) { + return NULL; + } + + SColumnNode* pCol = NULL; + assert(0 == nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol)); + assert(pCol); + + SQPTCol fakeCol; + fakeCol.type = QPT_QUERY_NODE_COL; + qptInitSingleTableCol(&fakeCol, taosRand(), (EColumnType)(taosRand() % COLUMN_TYPE_GROUP_KEY + 1)); + + SQPTCol* pTbCol = qptCtx.makeCtx.pInputList ? (SQPTCol*)nodesListGetNode(qptCtx.makeCtx.pInputList, colIdx) : &fakeCol; + + int16_t blkId = QPT_CORRECT_HIGH_PROB() ? qptCtx.makeCtx.inputBlockId : taosRand(); + + if (QPT_CORRECT_HIGH_PROB()) { + pCol->node.resType.type = pTbCol->dtype; + pCol->node.resType.bytes = pTbCol->len; + + pCol->tableId = qptCtx.param.tbl.uid; + pCol->tableType = qptCtx.param.tbl.tblType; + pCol->colId = colIdx; + pCol->projIdx = colIdx; + pCol->colType = pTbCol->colType; + pCol->hasIndex = pTbCol->hasIndex; + pCol->isPrimTs = pTbCol->isPrimTs; + strcpy(pCol->dbName, qptCtx.param.db.dbName); + strcpy(pCol->tableName, qptCtx.param.tbl.tblName); + strcpy(pCol->tableAlias, qptCtx.param.tbl.tblAlias); + strcpy(pCol->colName, pTbCol->name); + pCol->dataBlockId = blkId; + pCol->slotId = colIdx; + pCol->numOfPKs = qptCtx.param.tbl.pkNum; + pCol->tableHasPk = qptCtx.param.tbl.pkNum > 0; + pCol->isPk = pTbCol->isPk; + pCol->projRefIdx = 0; + pCol->resIdx = 0; + } else { + qptGetRandValue(&pCol->node.resType.type, &pCol->node.resType.bytes, NULL); + + pCol->tableId = taosRand(); + pCol->tableType = taosRand() % TSDB_TABLE_MAX; + pCol->colId = QPT_RAND_BOOL_V ? taosRand() : colIdx; + pCol->projIdx = taosRand(); + pCol->colType = QPT_RAND_BOOL_V ? pTbCol->colType : (EColumnType)(taosRand() % (COLUMN_TYPE_GROUP_KEY + 1)); + pCol->hasIndex = QPT_RAND_BOOL_V; + pCol->isPrimTs = QPT_RAND_BOOL_V; + if (QPT_RAND_BOOL_V) { + pCol->dbName[0] = 0; + } else { + strcpy(pCol->dbName, qptCtx.param.db.dbName); + } + if (QPT_RAND_BOOL_V) { + pCol->tableName[0] = 0; + } else { + strcpy(pCol->tableName, qptCtx.param.tbl.tblName); + } + if (QPT_RAND_BOOL_V) { + pCol->tableAlias[0] = 0; + } else { + strcpy(pCol->tableAlias, qptCtx.param.tbl.tblAlias); + } + if (QPT_RAND_BOOL_V) { + pCol->colName[0] = 0; + } else { + strcpy(pCol->colName, pTbCol->name); + } + + pCol->dataBlockId = blkId; + pCol->slotId = QPT_RAND_BOOL_V ? taosRand() : colIdx; + pCol->numOfPKs = QPT_RAND_BOOL_V ? taosRand() : qptCtx.param.tbl.pkNum; + pCol->tableHasPk = QPT_RAND_BOOL_V ? QPT_RAND_BOOL_V : (qptCtx.param.tbl.pkNum > 0); + pCol->isPk = QPT_RAND_BOOL_V ? QPT_RAND_BOOL_V : pTbCol->isPk; + pCol->projRefIdx = taosRand(); + pCol->resIdx = taosRand(); + } + + return (SNode*)pCol; +} + + +SNode* qptMakeWhenThenNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + assert(0 == nodesMakeNode(QUERY_NODE_WHEN_THEN, ppNode)); + assert(*ppNode); + SWhenThenNode* pWhenThen = (SWhenThenNode*)*ppNode; + + qptMakeExprNode(&pWhenThen->pWhen); + + qptMakeExprNode(&pWhenThen->pThen); + + return *ppNode; +} + + +SNode* qptMakeCaseWhenNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + assert(0 == nodesMakeNode(QUERY_NODE_CASE_WHEN, ppNode)); + assert(*ppNode); + + SCaseWhenNode* pCaseWhen = (SCaseWhenNode*)*ppNode; + + qptCtx.makeCtx.nodeLevel++; + + if (QPT_RAND_BOOL_V) { + qptMakeExprNode(&pCaseWhen->pCase); + } + + if (QPT_RAND_BOOL_V) { + qptMakeExprNode(&pCaseWhen->pElse); + } + + int32_t whenNum = taosRand() % QPT_MAX_WHEN_THEN_NUM + (QPT_CORRECT_HIGH_PROB() ? 1 : 0); + for (int32_t i = 0; i < whenNum; ++i) { + SNode* pNode = NULL; + qptMakeWhenThenNode(&pNode); + qptNodesListMakeStrictAppend(&pCaseWhen->pWhenThenList, pNode); + } + + return *ppNode; +} + + +SNode* qptMakeOperatorNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + EOperatorType opType = OPERATOR_ARRAY[taosRand() % (sizeof(OPERATOR_ARRAY)/sizeof(OPERATOR_ARRAY[0]))]; + assert(0 == nodesMakeNode(QUERY_NODE_OPERATOR, ppNode)); + + SOperatorNode* pOp = (SOperatorNode*)*ppNode; + pOp->opType = QPT_CORRECT_HIGH_PROB() ? opType : (EOperatorType)(opType + 1); + + qptCtx.makeCtx.nodeLevel++; + + switch (pOp->opType) { + case OP_TYPE_ADD: + case OP_TYPE_SUB: + case OP_TYPE_MULTI: + case OP_TYPE_DIV: + case OP_TYPE_REM: + case OP_TYPE_BIT_AND: + case OP_TYPE_BIT_OR: + case OP_TYPE_GREATER_THAN: + case OP_TYPE_GREATER_EQUAL: + case OP_TYPE_LOWER_THAN: + case OP_TYPE_LOWER_EQUAL: + case OP_TYPE_EQUAL: + case OP_TYPE_NOT_EQUAL: + case OP_TYPE_LIKE: + case OP_TYPE_NOT_LIKE: + case OP_TYPE_MATCH: + case OP_TYPE_NMATCH: + case OP_TYPE_IN: + case OP_TYPE_NOT_IN: + case OP_TYPE_JSON_GET_VALUE: + case OP_TYPE_JSON_CONTAINS: + case OP_TYPE_ASSIGN: + if (QPT_CORRECT_HIGH_PROB()) { + qptMakeExprNode(&pOp->pLeft); + qptMakeExprNode(&pOp->pRight); + } else { + if (QPT_RAND_BOOL_V) { + qptMakeExprNode(&pOp->pLeft); + } + if (QPT_RAND_BOOL_V) { + qptMakeExprNode(&pOp->pRight); + } + } + break; + + case OP_TYPE_IS_NULL: + case OP_TYPE_IS_NOT_NULL: + case OP_TYPE_IS_TRUE: + case OP_TYPE_IS_FALSE: + case OP_TYPE_IS_UNKNOWN: + case OP_TYPE_IS_NOT_TRUE: + case OP_TYPE_IS_NOT_FALSE: + case OP_TYPE_IS_NOT_UNKNOWN: + case OP_TYPE_MINUS: + if (QPT_CORRECT_HIGH_PROB()) { + qptMakeExprNode(&pOp->pLeft); + } else { + if (QPT_RAND_BOOL_V) { + qptMakeExprNode(&pOp->pLeft); + } + if (QPT_RAND_BOOL_V) { + qptMakeExprNode(&pOp->pRight); + } + } + break; + default: + if (QPT_RAND_BOOL_V) { + qptMakeExprNode(&pOp->pLeft); + } + if (QPT_RAND_BOOL_V) { + qptMakeExprNode(&pOp->pRight); + } + break; + } + + return *ppNode; +} + +SNode* qptMakeColumnNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + SColumnNode* pCol = NULL; + + if (QPT_CORRECT_HIGH_PROB() && qptCtx.makeCtx.pInputList) { + SNodeList* pColList = qptCtx.makeCtx.pInputList; + int32_t colIdx = taosRand() % pColList->length; + SQPTCol* pNode = (SQPTCol*)nodesListGetNode(pColList, colIdx); + if (pNode) { + switch (pNode->type) { + case QUERY_NODE_SLOT_DESC: { + nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + SSlotDescNode* pSlot = (SSlotDescNode*)pNode; + pCol->node.resType = pSlot->dataType; + pCol->dataBlockId = qptCtx.makeCtx.inputBlockId; + pCol->slotId = pSlot->slotId; + break; + } + case QPT_QUERY_NODE_COL: { + pCol = (SColumnNode*)qptMakeColumnFromTable(colIdx); + break; + } + default: + break; + } + } + } + + if (NULL == pCol) { + nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + qptGetRandValue(&pCol->node.resType.type, &pCol->node.resType.bytes, NULL); + pCol->dataBlockId = taosRand(); + pCol->slotId = taosRand(); + } + + *ppNode = (SNode*)pCol; + + return *ppNode; +} + +void qptNodesSetValueNodeValue(SValueNode* pNode, void* value) { + switch (pNode->node.resType.type) { + case TSDB_DATA_TYPE_NULL: + taosMemoryFree(value); + break; + case TSDB_DATA_TYPE_BOOL: + pNode->datum.b = *(bool*)value; + taosMemoryFree(value); + *(bool*)&pNode->typeData = qptCtx.param.correctExpected ? pNode->datum.b : QPT_RAND_BOOL_V; + break; + case TSDB_DATA_TYPE_TINYINT: + pNode->datum.i = *(int8_t*)value; + taosMemoryFree(value); + *(int8_t*)&pNode->typeData = qptCtx.param.correctExpected ? pNode->datum.i : QPT_RAND_INT_V; + break; + case TSDB_DATA_TYPE_SMALLINT: + pNode->datum.i = *(int16_t*)value; + taosMemoryFree(value); + *(int16_t*)&pNode->typeData = qptCtx.param.correctExpected ? pNode->datum.i : QPT_RAND_INT_V; + break; + case TSDB_DATA_TYPE_INT: + pNode->datum.i = *(int32_t*)value; + taosMemoryFree(value); + *(int32_t*)&pNode->typeData = qptCtx.param.correctExpected ? pNode->datum.i : QPT_RAND_INT_V; + break; + case TSDB_DATA_TYPE_BIGINT: + pNode->datum.i = *(int64_t*)value; + taosMemoryFree(value); + *(int64_t*)&pNode->typeData = qptCtx.param.correctExpected ? pNode->datum.i : QPT_RAND_INT_V; + break; + case TSDB_DATA_TYPE_TIMESTAMP: + pNode->datum.i = *(int64_t*)value; + taosMemoryFree(value); + *(int64_t*)&pNode->typeData = qptCtx.param.correctExpected ? pNode->datum.i : QPT_RAND_INT_V; + break; + case TSDB_DATA_TYPE_UTINYINT: + pNode->datum.u = *(int8_t*)value; + taosMemoryFree(value); + *(int8_t*)&pNode->typeData = qptCtx.param.correctExpected ? pNode->datum.u : QPT_RAND_INT_V; + break; + case TSDB_DATA_TYPE_USMALLINT: + pNode->datum.u = *(int16_t*)value; + taosMemoryFree(value); + *(int16_t*)&pNode->typeData = qptCtx.param.correctExpected ? pNode->datum.u : QPT_RAND_INT_V; + break; + case TSDB_DATA_TYPE_UINT: + pNode->datum.u = *(int32_t*)value; + taosMemoryFree(value); + *(int32_t*)&pNode->typeData = qptCtx.param.correctExpected ? pNode->datum.u : QPT_RAND_INT_V; + break; + case TSDB_DATA_TYPE_UBIGINT: + pNode->datum.u = *(uint64_t*)value; + taosMemoryFree(value); + *(uint64_t*)&pNode->typeData = qptCtx.param.correctExpected ? pNode->datum.u : QPT_RAND_INT_V; + break; + case TSDB_DATA_TYPE_FLOAT: + pNode->datum.d = *(float*)value; + taosMemoryFree(value); + *(float*)&pNode->typeData = qptCtx.param.correctExpected ? pNode->datum.d : QPT_RAND_INT_V; + break; + case TSDB_DATA_TYPE_DOUBLE: + pNode->datum.d = *(double*)value; + taosMemoryFree(value); + *(double*)&pNode->typeData = qptCtx.param.correctExpected ? pNode->datum.d : QPT_RAND_INT_V; + break; + case TSDB_DATA_TYPE_NCHAR: + case TSDB_DATA_TYPE_VARCHAR: + case TSDB_DATA_TYPE_VARBINARY: + case TSDB_DATA_TYPE_JSON: + case TSDB_DATA_TYPE_GEOMETRY: + if (qptCtx.param.correctExpected || QPT_MID_PROB()) { + pNode->datum.p = (char*)value; + } else { + } + taosMemoryFree(value); + pNode->datum.p = NULL; + break; + case TSDB_DATA_TYPE_DECIMAL: + case TSDB_DATA_TYPE_BLOB: + case TSDB_DATA_TYPE_MEDIUMBLOB: + taosMemoryFree(value); + pNode->datum.p = NULL; + break; + default: + taosMemoryFree(value); + break; + } +} + + +SNode* qptMakeValueNode(uint8_t valType, SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + SValueNode* pVal = NULL; + nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); + + int32_t valBytes; + void* pValue = NULL; + qptGetRandValue(&valType, &valBytes, &pValue); + + pVal->node.resType.type = valType; + pVal->node.resType.bytes = valBytes; + + qptNodesSetValueNodeValue(pVal, pValue); + + *ppNode = (SNode*)pVal; + + return *ppNode; +} + +SNode* qptMakeFunctionNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + SFunctionNode* pFunc = NULL; + nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + + if (QPT_CORRECT_HIGH_PROB()) { + int32_t funcIdx = taosRand() % funcMgtBuiltinsNum; + char* funcName = fmGetFuncName(funcIdx); + strcpy(pFunc->functionName, funcName); + taosMemoryFree(funcName); + fmGetFuncInfo(pFunc, NULL, 0); + } else { + int32_t funcIdx = taosRand(); + if (QPT_RAND_BOOL_V) { + strcpy(pFunc->functionName, "invalidFuncName"); + } else { + pFunc->functionName[0] = 0; + } + fmGetFuncInfo(pFunc, NULL, 0); + } + + qptCtx.makeCtx.nodeLevel++; + + if (QPT_CORRECT_HIGH_PROB()) { + // TODO + } else { + int32_t paramNum = taosRand() % QPT_MAX_FUNC_PARAM; + for (int32_t i = 0; i < paramNum; ++i) { + SNode* pNode = NULL; + qptMakeExprNode(&pNode); + qptNodesListMakeStrictAppend(&pFunc->pParameterList, pNode); + } + } + + *ppNode = (SNode*)pFunc; + + return *ppNode; +} + + + + +SNode* qptMakeLogicCondNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + SLogicConditionNode* pLogic = NULL; + nodesMakeNode(QUERY_NODE_LOGIC_CONDITION, (SNode**)&pLogic); + + if (QPT_CORRECT_HIGH_PROB()) { + pLogic->condType = (taosRand() % 3) ? ((taosRand() % 2) ? LOGIC_COND_TYPE_AND : LOGIC_COND_TYPE_OR) : LOGIC_COND_TYPE_NOT; + } else { + pLogic->condType = (ELogicConditionType)taosRand(); + } + + qptCtx.makeCtx.nodeLevel++; + + int32_t paramNum = QPT_CORRECT_HIGH_PROB() ? (taosRand() % QPT_MAX_LOGIC_PARAM + 1) : (taosRand() % QPT_MAX_LOGIC_PARAM); + for (int32_t i = 0; i < paramNum; ++i) { + SNode* pNode = NULL; + qptMakeExprNode(&pNode); + qptNodesListMakeStrictAppend(&pLogic->pParameterList, pNode); + } + + *ppNode = (SNode*)pLogic; + + return *ppNode; +} + +SNode* qptMakeNodeListNode(QPT_NODE_TYPE nodeType, SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + SNode* pTmp = NULL; + if (NULL == ppNode) { + ppNode = &pTmp; + } + + SNodeListNode* pList = NULL; + nodesMakeNode(QUERY_NODE_NODE_LIST, (SNode**)&pList); + + qptCtx.makeCtx.nodeLevel++; + + qptMakeNodeList(nodeType, &pList->pNodeList); + + *ppNode = (SNode*)pList; + + return *ppNode; +} + +SNode* qptMakeTempTableNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + STempTableNode* pTemp = NULL; + assert(0 == nodesMakeNode(QUERY_NODE_TEMP_TABLE, (SNode**)&pTemp)); + + if (QPT_CORRECT_HIGH_PROB()) { + // TODO + } + + *ppNode = (SNode*)pTemp; + + return *ppNode; +} + +SNode* qptMakeJoinTableNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + SJoinTableNode* pJoin = NULL; + assert(0 == nodesMakeNode(QUERY_NODE_JOIN_TABLE, (SNode**)&pJoin)); + + if (QPT_CORRECT_HIGH_PROB()) { + // TODO + } + + *ppNode = (SNode*)pJoin; + + return *ppNode; +} + +SNode* qptMakeRealTableNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + SRealTableNode* pReal = NULL; + assert(0 == nodesMakeNode(QUERY_NODE_REAL_TABLE, (SNode**)&pReal)); + + if (QPT_CORRECT_HIGH_PROB()) { + // TODO + } + + *ppNode = (SNode*)pReal; + + return *ppNode; +} + + + +SNode* qptMakeNonRealTableNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + if (QPT_CORRECT_HIGH_PROB()) { + if (QPT_RAND_BOOL_V) { + qptMakeTempTableNode(ppNode); + } else { + qptMakeJoinTableNode(ppNode); + } + } else { + qptMakeRealTableNode(ppNode); + } + + return *ppNode; +} + +SNode* qptMakeExprNode(SNode** ppNode) { + SNode* pNode = NULL; + if (NULL == ppNode) { + ppNode = &pNode; + } + + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + int32_t nodeTypeMaxValue = 9; + if (qptCtx.makeCtx.nodeLevel >= QPT_MAX_NODE_LEVEL) { + nodeTypeMaxValue = 2; + } + + switch (taosRand() % nodeTypeMaxValue) { + case 0: + qptMakeColumnNode(ppNode); + break; + case 1: + qptMakeValueNode(-1, ppNode); + break; + case 2: + qptMakeFunctionNode(ppNode); + break; + case 3: + qptMakeLogicCondNode(ppNode); + break; + case 4: + qptMakeNodeListNode(QPT_NODE_EXPR, ppNode); + break; + case 5: + qptMakeOperatorNode(ppNode); + break; + case 6: + qptMakeNonRealTableNode(ppNode); + break; + case 7: + qptMakeCaseWhenNode(ppNode); + break; + case 8: + qptMakeWhenThenNode(ppNode); + break; + default: + assert(0); + break; + } + + return *ppNode; +} + + +SNode* qptMakeLimitNode(SNode** ppNode) { + SNode* pNode = NULL; + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(&pNode); + } + + assert(0 == nodesMakeNode(QUERY_NODE_LIMIT, &pNode)); + assert(pNode); + + SLimitNode* pLimit = (SLimitNode*)pNode; + + if (!qptCtx.param.correctExpected) { + if (taosRand() % 2) { + pLimit->limit = taosRand() * ((taosRand() % 2) ? 1 : -1); + } + if (taosRand() % 2) { + pLimit->offset = taosRand() * ((taosRand() % 2) ? 1 : -1); + } + } else { + pLimit->limit = taosRand(); + if (taosRand() % 2) { + pLimit->offset = taosRand(); + } + } + + *ppNode = pNode; + + return pNode; +} + + +SNode* qptMakeWindowOffsetNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + SNode* pNode = NULL; + assert(0 == nodesMakeNode(QUERY_NODE_WINDOW_OFFSET, &pNode)); + assert(pNode); + + SWindowOffsetNode* pWinOffset = (SWindowOffsetNode*)pNode; + qptMakeValueNode(TSDB_DATA_TYPE_BIGINT, &pWinOffset->pStartOffset); + qptMakeValueNode(TSDB_DATA_TYPE_BIGINT, &pWinOffset->pEndOffset); + + *ppNode = pNode; + + return pNode; +} + +void qptSaveMakeNodeCtx() { + qptCtx.makeCtxBak.nodeLevel = qptCtx.makeCtx.nodeLevel; +} + +void qptRestoreMakeNodeCtx() { + qptCtx.makeCtx.nodeLevel = qptCtx.makeCtxBak.nodeLevel; +} + + +void qptResetTableCols() { + SNode* pTmp = NULL; + FOREACH(pTmp, qptCtx.param.tbl.pColList) { + ((SQPTCol*)pTmp)->inUse = 0; + } + FOREACH(pTmp, qptCtx.param.tbl.pTagList) { + ((SQPTCol*)pTmp)->inUse = 0; + } +} + +void qptResetMakeNodeCtx() { + SQPTMakeNodeCtx* pCtx = &qptCtx.makeCtx; + pCtx->nodeLevel = 1; + + if (pCtx->fromTable) { + qptResetTableCols(); + } +} + +void qptInitMakeNodeCtx(bool fromTable, bool onlyTag, bool onlyCol, int16_t inputBlockId, SNodeList* pInputList) { + SQPTMakeNodeCtx* pCtx = &qptCtx.makeCtx; + + pCtx->onlyTag = onlyTag; + pCtx->fromTable = fromTable; + pCtx->onlyCol = onlyCol; + + if (NULL == pInputList) { + if (fromTable) { + inputBlockId = (qptCtx.buildCtx.pCurr && qptCtx.buildCtx.pCurr->pOutputDataBlockDesc) ? qptCtx.buildCtx.pCurr->pOutputDataBlockDesc->dataBlockId : taosRand(); + pInputList = onlyTag ? qptCtx.param.tbl.pTagList : (onlyCol ? qptCtx.param.tbl.pColList : qptCtx.param.tbl.pColTagList); + } else if (qptCtx.buildCtx.pChild && qptCtx.buildCtx.pChild->pOutputDataBlockDesc) { + inputBlockId = qptCtx.buildCtx.pChild->pOutputDataBlockDesc->dataBlockId; + pInputList = qptCtx.buildCtx.pChild->pOutputDataBlockDesc->pSlots; + } + } + + pCtx->inputBlockId = inputBlockId; + pCtx->pInputList = pInputList; + + qptResetMakeNodeCtx(); +} + +SNode* qptMakeConditionNode() { + SNode* pNode = NULL; + qptMakeExprNode(&pNode); + + return pNode; +} + + +SNode* qptMakeSlotDescNode(const char* pName, const SNode* pNode, int16_t slotId, bool output, bool reserve) { + SSlotDescNode* pSlot = NULL; + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode((SNode**)&pSlot); + } + + assert(0 == nodesMakeNode(QUERY_NODE_SLOT_DESC, (SNode**)&pSlot)); + + QPT_RAND_BOOL_V ? (pSlot->name[0] = 0) : snprintf(pSlot->name, sizeof(pSlot->name), "%s", pName); + pSlot->slotId = QPT_CORRECT_HIGH_PROB() ? slotId : taosRand(); + if (QPT_CORRECT_HIGH_PROB()) { + pSlot->dataType = ((SExprNode*)pNode)->resType; + } else { + qptGetRandValue(&pSlot->dataType.type, &pSlot->dataType.bytes, NULL); + } + + pSlot->reserve = reserve; + pSlot->output = output; + + return (SNode*)pSlot; +} + + +SNode* qptMakeDataBlockDescNode(bool forSink) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(NULL); + } + + SDataBlockDescNode* pDesc = NULL; + assert(0 == nodesMakeNode(QUERY_NODE_DATABLOCK_DESC, (SNode**)&pDesc)); + + pDesc->dataBlockId = QPT_CORRECT_HIGH_PROB() ? (forSink ? (qptCtx.buildCtx.nextBlockId - 1) : qptCtx.buildCtx.nextBlockId++) : QPT_RAND_INT_V; + pDesc->precision = QPT_CORRECT_HIGH_PROB() ? qptCtx.param.db.precision : QPT_RAND_INT_V; + + return (SNode*)pDesc; +} + +SNode* qptMakeDataBlockDescNodeFromNode(bool forSink) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(NULL); + } + + SDataBlockDescNode* pDesc = NULL; + SDataBlockDescNode* pInput = qptCtx.buildCtx.pCurr ? qptCtx.buildCtx.pCurr->pOutputDataBlockDesc : NULL; + SNode* pTmp = NULL, *pTmp2 = NULL; + + if (QPT_VALID_DESC(pInput)) { + if (QPT_CORRECT_HIGH_PROB()) { + nodesCloneNode((SNode*)pInput, (SNode**)&pDesc); + } else { + assert(0 == nodesMakeNode(QUERY_NODE_DATABLOCK_DESC, (SNode**)&pDesc)); + + pDesc->dataBlockId = QPT_CORRECT_HIGH_PROB() ? pInput->dataBlockId : QPT_RAND_INT_V; + pDesc->precision = QPT_CORRECT_HIGH_PROB() ? pInput->precision : QPT_RAND_INT_V; + pDesc->totalRowSize = QPT_CORRECT_HIGH_PROB() ? pInput->totalRowSize : QPT_RAND_INT_V; + pDesc->outputRowSize = QPT_CORRECT_HIGH_PROB() ? pInput->outputRowSize : QPT_RAND_INT_V; + + FOREACH(pTmp, pInput->pSlots) { + if (QPT_RAND_BOOL_V) { + nodesCloneNode(pTmp, &pTmp2); + qptNodesListMakeStrictAppend(&pDesc->pSlots, pTmp2); + } + } + } + } else { + assert(0 == nodesMakeNode(QUERY_NODE_DATABLOCK_DESC, (SNode**)&pDesc)); + + pDesc->dataBlockId = QPT_CORRECT_HIGH_PROB() ? (forSink ? (qptCtx.buildCtx.nextBlockId - 1) : qptCtx.buildCtx.nextBlockId++) : QPT_RAND_INT_V; + pDesc->precision = QPT_CORRECT_HIGH_PROB() ? qptCtx.param.db.precision : QPT_RAND_INT_V; + pDesc->totalRowSize = QPT_RAND_INT_V; + pDesc->outputRowSize = QPT_RAND_INT_V; + + int32_t slotNum = taosRand() % QPT_MAX_COLUMN_NUM; + for (int32_t i = 0; i < slotNum; ++i) { + pTmp2 = qptMakeExprNode(NULL); + if (QPT_CORRECT_HIGH_PROB()) { + pTmp = qptMakeSlotDescNode(NULL, pTmp2, i, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V); + nodesDestroyNode(pTmp2); + } else { + pTmp = pTmp2; + } + + qptNodesListMakeStrictAppend(&pDesc->pSlots, pTmp); + } + } + + return (SNode*)pDesc; +} + + + +SNode* qptMakeTargetNode(SNode* pNode, int16_t dataBlockId, int16_t slotId, SNode** pOutput) { + if (QPT_NCORRECT_LOW_PROB()) { + nodesDestroyNode(pNode); + return qptMakeRandNode(pOutput); + } + + STargetNode* pTarget = NULL; + assert(0 == nodesMakeNode(QUERY_NODE_TARGET, (SNode**)&pTarget)); + + pTarget->dataBlockId = QPT_CORRECT_HIGH_PROB() ? dataBlockId : taosRand(); + pTarget->slotId = QPT_CORRECT_HIGH_PROB() ? slotId : taosRand(); + pTarget->pExpr = QPT_CORRECT_HIGH_PROB() ? pNode : qptMakeRandNode(NULL); + if (pTarget->pExpr != pNode) { + nodesDestroyNode(pNode); + } + + *pOutput = (SNode*)pTarget; + + return *pOutput; +} + +SNode* qptMakeDownstreamSrcNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + SDownstreamSourceNode* pDs = NULL; + nodesMakeNode(QUERY_NODE_DOWNSTREAM_SOURCE, (SNode**)&pDs); + + pDs->addr.nodeId = qptCtx.param.vnode.vgId; + memcpy(&pDs->addr.epSet, &qptCtx.param.vnode.epSet, sizeof(pDs->addr.epSet)); + pDs->taskId = (QPT_CORRECT_HIGH_PROB() && qptCtx.buildCtx.pCurrTask) ? qptCtx.buildCtx.pCurrTask->id.taskId : taosRand(); + pDs->schedId = QPT_CORRECT_HIGH_PROB() ? qptCtx.param.schedulerId : taosRand(); + pDs->execId = taosRand(); + pDs->fetchMsgType = QPT_CORRECT_HIGH_PROB() ? (QPT_RAND_BOOL_V ? TDMT_SCH_FETCH : TDMT_SCH_MERGE_FETCH) : taosRand(); + pDs->localExec = QPT_RAND_BOOL_V; + + *ppNode = (SNode*)pDs; + + return *ppNode; +} + +SNode* qptMakeOrderByExprNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + SOrderByExprNode* pOrder = NULL; + nodesMakeNode(QUERY_NODE_ORDER_BY_EXPR, (SNode**)&pOrder); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pOrder->pExpr); + + pOrder->order = (EOrder)(QPT_CORRECT_HIGH_PROB() ? (QPT_RAND_BOOL_V ? ORDER_ASC : ORDER_DESC) : taosRand()); + pOrder->nullOrder = qptGetRandNullOrder(); + + *ppNode = (SNode*)pOrder; + + return *ppNode; +} + +SNode* qptMakeSubplanNode(SNode** ppNode) { + if (QPT_NCORRECT_LOW_PROB()) { + return qptMakeRandNode(ppNode); + } + + *ppNode = (SNode*)qptCreateSubplanNode(QUERY_NODE_PHYSICAL_SUBPLAN); + + return *ppNode; +} + + + + +SPhysiNode* qptCreatePhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = NULL; + assert(0 == nodesMakeNode((ENodeType)nodeType, (SNode**)&pPhysiNode)); + assert(pPhysiNode); + + qptCtx.buildCtx.pCurr = pPhysiNode; + + qptMakeLimitNode(&pPhysiNode->pLimit); + qptMakeLimitNode(&pPhysiNode->pSlimit); + pPhysiNode->dynamicOp = qptGetDynamicOp(); + pPhysiNode->inputTsOrder = qptGetCurrTsOrder(); + + pPhysiNode->pOutputDataBlockDesc = (SDataBlockDescNode*)qptMakeDataBlockDescNode(false); + + return pPhysiNode; +} + +void qptPostCreatePhysiNode(SPhysiNode* pPhysiNode) { + pPhysiNode->outputTsOrder = qptGetCurrTsOrder(); + + if (QPT_RAND_BOOL_V) { + qptInitMakeNodeCtx((QPT_CORRECT_HIGH_PROB() && qptCtx.buildCtx.pChild) ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + pPhysiNode->pConditions = qptMakeConditionNode(); + } + +} + +void qptMarkTableInUseCols(int32_t colNum, int32_t totalColNum) { + if (colNum >= totalColNum) { + for (int32_t i = 0; i < totalColNum; ++i) { + SQPTCol* pNode = (SQPTCol*)nodesListGetNode(qptCtx.makeCtx.pInputList, i); + assert(pNode->type == QPT_QUERY_NODE_COL); + pNode->inUse = 1; + } + return; + } + + int32_t colInUse = 0; + do { + int32_t colIdx = taosRand() % totalColNum; + SQPTCol* pNode = (SQPTCol*)nodesListGetNode(qptCtx.makeCtx.pInputList, colIdx); + assert(pNode->type == QPT_QUERY_NODE_COL); + + if (pNode->inUse) { + continue; + } + + pNode->inUse = 1; + colInUse++; + } while (colInUse < colNum); +} + + +void qptMakeTableScanColList( SNodeList** ppCols) { + if (QPT_NCORRECT_LOW_PROB()) { + if (QPT_RAND_BOOL_V) { + nodesMakeList(ppCols); + } else { + *ppCols = NULL; + } + + return; + } + + int32_t colNum = (QPT_CORRECT_HIGH_PROB() && qptCtx.makeCtx.pInputList) ? (taosRand() % qptCtx.makeCtx.pInputList->length + 1) : (taosRand() % QPT_MAX_COLUMN_NUM); + int32_t colAdded = 0; + + if (qptCtx.makeCtx.pInputList) { + if (QPT_CORRECT_HIGH_PROB()) { + qptMarkTableInUseCols(colNum, qptCtx.makeCtx.pInputList->length); + + for (int32_t i = 0; colAdded < colNum; ++i) { + int32_t idx = (i < qptCtx.makeCtx.pInputList->length) ? i : (taosRand() % qptCtx.makeCtx.pInputList->length); + SQPTCol* pNode = (SQPTCol*)nodesListGetNode(qptCtx.makeCtx.pInputList, idx); + assert(pNode->type == QPT_QUERY_NODE_COL); + + if (0 == pNode->inUse) { + continue; + } + + assert(0 == qptNodesListMakeStrictAppend(ppCols, qptMakeColumnFromTable(idx))); + colAdded++; + } + + return; + } + + for (int32_t i = 0; i < colNum; ++i) { + int32_t colIdx = taosRand(); + colIdx = (colIdx >= qptCtx.makeCtx.pInputList->length) ? -1 : colIdx; + + assert(0 == qptNodesListMakeStrictAppend(ppCols, qptMakeColumnFromTable(colIdx))); + } + } else { + for (int32_t i = 0; i < colNum; ++i) { + int32_t colIdx = taosRand(); + assert(0 == qptNodesListMakeStrictAppend(ppCols, qptMakeColumnFromTable(colIdx))); + } + } +} + + +void qptCreateTableScanCols( int16_t blockId, SNodeList** ppList) { + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? true : false, QPT_CORRECT_HIGH_PROB() ? false : true, QPT_CORRECT_HIGH_PROB() ? true : false, 0, NULL); + qptMakeTableScanColList(ppList); +} + +void qptCreateTableScanPseudoCols( int16_t blockId, SNodeList** ppList) { + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? true : false, QPT_CORRECT_HIGH_PROB() ? true : false, QPT_CORRECT_HIGH_PROB() ? false : true, 0, NULL); + qptMakeTableScanColList(ppList); +} + + +void qptAddDataBlockSlots(SNodeList* pList, SDataBlockDescNode* pDataBlockDesc) { + if (NULL == pDataBlockDesc || QUERY_NODE_DATABLOCK_DESC != nodeType(pDataBlockDesc)) { + return; + } + + int16_t nextSlotId = LIST_LENGTH(pDataBlockDesc->pSlots), slotId = 0; + SNode* pNode = NULL; + bool output = QPT_RAND_BOOL_V; + + FOREACH(pNode, pList) { + if (NULL == pNode) { + continue; + } + + SNode* pExpr = QUERY_NODE_ORDER_BY_EXPR == nodeType(pNode) ? ((SOrderByExprNode*)pNode)->pExpr : pNode; + if (QPT_CORRECT_HIGH_PROB()) { + SNode* pDesc = QPT_CORRECT_HIGH_PROB() ? qptMakeSlotDescNode(NULL, pExpr, nextSlotId, output, QPT_RAND_BOOL_V) : qptMakeExprNode(NULL); + assert(0 == qptNodesListMakeStrictAppend(&pDataBlockDesc->pSlots, pDesc)); + pDataBlockDesc->totalRowSize += QPT_CORRECT_HIGH_PROB() ? ((SExprNode*)pExpr)->resType.bytes : taosRand(); + if (output && QPT_RAND_BOOL_V) { + pDataBlockDesc->outputRowSize += QPT_CORRECT_HIGH_PROB() ? ((SExprNode*)pExpr)->resType.bytes : taosRand(); + } + } + + slotId = nextSlotId; + ++nextSlotId; + + if (QPT_CORRECT_HIGH_PROB()) { + SNode* pTarget = NULL; + qptMakeTargetNode(pNode, pDataBlockDesc->dataBlockId, slotId, &pTarget); + REPLACE_NODE(pTarget); + } + } +} + +SNode* qptMakeSpecTypeNode(QPT_NODE_TYPE nodeType, SNode** ppNode) { + switch (nodeType) { + case QPT_NODE_COLUMN: + return qptMakeColumnNode(ppNode); + case QPT_NODE_FUNCTION: + return qptMakeFunctionNode(ppNode); + case QPT_NODE_EXPR: + return qptMakeExprNode(ppNode); + case QPT_NODE_VALUE: + return qptMakeValueNode(-1, ppNode); + default: + break; + } + + return qptMakeRandNode(ppNode); +} + + +void qptMakeRandNodeList(SNodeList** ppList) { + qptSaveMakeNodeCtx(); + + int32_t exprNum = taosRand() % QPT_MAX_COLUMN_NUM + (QPT_CORRECT_HIGH_PROB() ? 1 : 0); + for (int32_t i = 0; i < exprNum; ++i) { + SNode* pNode = NULL; + qptRestoreMakeNodeCtx(); + qptMakeSpecTypeNode((QPT_NODE_TYPE)(taosRand() % (QPT_NODE_MAX_VALUE + 1)), &pNode); + qptNodesListMakeStrictAppend(ppList, pNode); + } +} + + +void qptMakeExprList(SNodeList** ppList) { + qptSaveMakeNodeCtx(); + + int32_t exprNum = taosRand() % QPT_MAX_COLUMN_NUM + (QPT_CORRECT_HIGH_PROB() ? 1 : 0); + for (int32_t i = 0; i < exprNum; ++i) { + SNode* pNode = NULL; + qptRestoreMakeNodeCtx(); + qptMakeExprNode(&pNode); + qptNodesListMakeStrictAppend(ppList, pNode); + } +} + +void qptMakeValueList(SNodeList** ppList) { + qptSaveMakeNodeCtx(); + + int32_t colNum = taosRand() % QPT_MAX_COLUMN_NUM + (QPT_CORRECT_HIGH_PROB() ? 1 : 0); + for (int32_t i = 0; i < colNum; ++i) { + SNode* pNode = NULL; + qptRestoreMakeNodeCtx(); + qptMakeValueNode(-1, &pNode); + qptNodesListMakeStrictAppend(ppList, pNode); + } +} + +void qptMakeColumnList(SNodeList** ppList) { + qptSaveMakeNodeCtx(); + + int32_t colNum = taosRand() % QPT_MAX_COLUMN_NUM + (QPT_CORRECT_HIGH_PROB() ? 1 : 0); + for (int32_t i = 0; i < colNum; ++i) { + SNode* pNode = NULL; + qptRestoreMakeNodeCtx(); + qptMakeColumnNode(&pNode); + qptNodesListMakeStrictAppend(ppList, pNode); + } +} + +void qptMakeTargetList(QPT_NODE_TYPE nodeType, int16_t datablockId, SNodeList** ppList) { + qptSaveMakeNodeCtx(); + + int32_t tarNum = taosRand() % QPT_MAX_COLUMN_NUM + (QPT_CORRECT_HIGH_PROB() ? 1 : 0); + for (int32_t i = 0; i < tarNum; ++i) { + SNode* pNode = NULL, *pExpr = NULL; + qptRestoreMakeNodeCtx(); + qptMakeSpecTypeNode(nodeType, &pExpr); + qptMakeTargetNode(pExpr, datablockId, i, &pNode); + qptNodesListMakeStrictAppend(ppList, pNode); + } +} + +void qptMakeFunctionList(SNodeList** ppList) { + qptSaveMakeNodeCtx(); + + int32_t funcNum = taosRand() % QPT_MAX_COLUMN_NUM + (QPT_CORRECT_HIGH_PROB() ? 1 : 0); + for (int32_t i = 0; i < funcNum; ++i) { + SNode* pNode = NULL; + qptRestoreMakeNodeCtx(); + qptMakeFunctionNode(&pNode); + qptNodesListMakeStrictAppend(ppList, pNode); + } +} + + +void qptMakeDownstreamSrcList(SNodeList** ppList) { + qptSaveMakeNodeCtx(); + + int32_t dsNum = taosRand() % QPT_MAX_DS_SRC_NUM + (QPT_CORRECT_HIGH_PROB() ? 1 : 0); + for (int32_t i = 0; i < dsNum; ++i) { + SNode* pNode = NULL; + qptRestoreMakeNodeCtx(); + qptMakeDownstreamSrcNode(&pNode); + qptNodesListMakeStrictAppend(ppList, pNode); + } +} + + +void qptMakeOrerByExprList(SNodeList** ppList) { + qptSaveMakeNodeCtx(); + + int32_t orderNum = taosRand() % QPT_MAX_ORDER_BY_NUM + (QPT_CORRECT_HIGH_PROB() ? 1 : 0); + for (int32_t i = 0; i < orderNum; ++i) { + SNode* pNode = NULL; + qptRestoreMakeNodeCtx(); + qptMakeOrderByExprNode(&pNode); + qptNodesListMakeStrictAppend(ppList, pNode); + } +} + + +void qptMakeSubplanList(SNodeList** ppList) { + qptSaveMakeNodeCtx(); + + int32_t planNum = taosRand() % QPT_MAX_LEVEL_SUBPLAN_NUM + (QPT_CORRECT_HIGH_PROB() ? 1 : 0); + for (int32_t i = 0; i < planNum; ++i) { + SNode* pNode = NULL; + qptRestoreMakeNodeCtx(); + qptMakeSubplanNode(&pNode); + qptNodesListMakeStrictAppend(ppList, pNode); + } +} + +void qptMakeSpecTypeNodeList(QPT_NODE_TYPE nodeType, SNodeList** ppList) { + switch (nodeType) { + case QPT_NODE_COLUMN: + return qptMakeColumnList(ppList); + case QPT_NODE_FUNCTION: + return qptMakeFunctionList(ppList); + case QPT_NODE_EXPR: + return qptMakeExprList(ppList); + case QPT_NODE_VALUE: + return qptMakeValueList(ppList); + case QPT_NODE_SUBPLAN: + return qptMakeSubplanList(ppList); + default: + break; + } + + return qptMakeRandNodeList(ppList); +} + + +void qptMakeNodeList(QPT_NODE_TYPE nodeType, SNodeList** ppList) { + qptMakeSpecTypeNodeList(nodeType, ppList); +} + + + +void qptMakeAppendToTargetList(SNodeList* pInputList, int16_t blockId, SNodeList** ppOutList) { + SNode* pNode = NULL; + FOREACH(pNode, pInputList) { + if (QPT_CORRECT_HIGH_PROB()) { + SNode* pTarget = NULL; + int16_t slotId = ((*ppOutList) && (*ppOutList)->length) ? (*ppOutList)->length : 0; + qptMakeTargetNode(pNode, blockId, slotId, &pTarget); + qptNodesListMakeStrictAppend(ppOutList, pTarget); + } + } +} + +void qptCreateScanPhysiNodeImpl( SScanPhysiNode* pScan) { + SDataBlockDescNode* pDesc = pScan->node.pOutputDataBlockDesc; + int16_t blockId = (QPT_CORRECT_HIGH_PROB() && QPT_VALID_DESC(pDesc)) ? pDesc->dataBlockId : taosRand(); + qptCreateTableScanCols(blockId, &pScan->pScanCols); + + qptAddDataBlockSlots(pScan->pScanCols, pDesc); + + if (taosRand() % 2) { + blockId = (QPT_CORRECT_HIGH_PROB() && QPT_VALID_DESC(pDesc)) ? pDesc->dataBlockId : taosRand(); + qptCreateTableScanPseudoCols(blockId, &pScan->pScanPseudoCols); + } + + qptAddDataBlockSlots(pScan->pScanPseudoCols, pDesc); + + pScan->uid = QPT_CORRECT_HIGH_PROB() ? qptCtx.param.tbl.uid : taosRand(); + pScan->suid = QPT_CORRECT_HIGH_PROB() ? qptCtx.param.tbl.suid : taosRand(); + pScan->tableType = QPT_CORRECT_HIGH_PROB() ? qptCtx.param.tbl.tblType : taosRand(); + pScan->groupOrderScan = (taosRand() % 2) ? true : false; + + SName tblName = {0}; + toName(1, qptCtx.param.db.dbName, qptCtx.param.tbl.tblName, &tblName); + if (QPT_CORRECT_HIGH_PROB()) { + memcpy(&pScan->tableName, &tblName, sizeof(SName)); + } else { + pScan->tableName.acctId = 0; + pScan->tableName.dbname[0] = 0; + pScan->tableName.tname[0] = 0; + } + + qptCtx.buildCtx.currTsOrder = QPT_CORRECT_HIGH_PROB() ? qptCtx.buildCtx.currTsOrder : QPT_RAND_ORDER_V; +} + + + +SNode* qptCreateTagScanPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + STagScanPhysiNode* pTagScanNode = (STagScanPhysiNode*)pPhysiNode; + pTagScanNode->onlyMetaCtbIdx = (taosRand() % 2) ? true : false; + + qptCreateScanPhysiNodeImpl(&pTagScanNode->scan); + + qptPostCreatePhysiNode(pPhysiNode); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateTableScanPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhysiNode; + pTableScanNode->scanSeq[0] = taosRand() % 4; + pTableScanNode->scanSeq[1] = taosRand() % 4; + pTableScanNode->scanRange.skey = taosRand(); + pTableScanNode->scanRange.ekey = taosRand(); + pTableScanNode->ratio = taosRand(); + pTableScanNode->dataRequired = taosRand(); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? true : false, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeFunctionList(&pTableScanNode->pDynamicScanFuncs); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? true : false, QPT_CORRECT_HIGH_PROB() ? true : false, QPT_CORRECT_HIGH_PROB() ? false : true, 0, NULL); + qptMakeColumnList(&pTableScanNode->pGroupTags); + + pTableScanNode->groupSort = QPT_RAND_BOOL_V; + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? true : false, QPT_CORRECT_HIGH_PROB() ? true : false, QPT_CORRECT_HIGH_PROB() ? false : true, 0, NULL); + qptMakeExprList(&pTableScanNode->pTags); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? true : false, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pTableScanNode->pSubtable); + pTableScanNode->interval = taosRand(); + pTableScanNode->offset = taosRand(); + pTableScanNode->sliding = taosRand(); + pTableScanNode->intervalUnit = taosRand(); + pTableScanNode->slidingUnit = taosRand(); + pTableScanNode->triggerType = taosRand(); + pTableScanNode->watermark = taosRand(); + pTableScanNode->igExpired = taosRand(); + pTableScanNode->assignBlockUid = QPT_RAND_BOOL_V; + pTableScanNode->igCheckUpdate = taosRand(); + pTableScanNode->filesetDelimited = QPT_RAND_BOOL_V; + pTableScanNode->needCountEmptyTable = QPT_RAND_BOOL_V; + pTableScanNode->paraTablesSort = QPT_RAND_BOOL_V; + pTableScanNode->smallDataTsSort = QPT_RAND_BOOL_V; + + qptCreateScanPhysiNodeImpl(&pTableScanNode->scan); + + qptPostCreatePhysiNode(pPhysiNode); + + return (SNode*)pPhysiNode; +} + + +SNode* qptCreateTableSeqScanPhysiNode(int32_t nodeType) { + return qptCreateTableScanPhysiNode(nodeType); +} + +SNode* qptCreateTableMergeScanPhysiNode(int32_t nodeType) { + return qptCreateTableScanPhysiNode(nodeType); +} + +SNode* qptCreateStreamScanPhysiNode(int32_t nodeType) { + return qptCreateTableScanPhysiNode(nodeType); +} + +SNode* qptCreateSysTableScanPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SSystemTableScanPhysiNode* pSysScanNode = (SSystemTableScanPhysiNode*)pPhysiNode; + + memcpy(&pSysScanNode->mgmtEpSet, &qptCtx.param.vnode.epSet, sizeof(pSysScanNode->mgmtEpSet)); + pSysScanNode->showRewrite = QPT_RAND_BOOL_V; + pSysScanNode->accountId = QPT_CORRECT_HIGH_PROB() ? 1 : taosRand(); + pSysScanNode->sysInfo = QPT_RAND_BOOL_V; + + qptCreateScanPhysiNodeImpl(&pSysScanNode->scan); + + qptPostCreatePhysiNode(pPhysiNode); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateBlockDistScanPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SBlockDistScanPhysiNode* pBlkScanNode = (SBlockDistScanPhysiNode*)pPhysiNode; + + qptCreateScanPhysiNodeImpl((SScanPhysiNode*)pBlkScanNode); + + qptPostCreatePhysiNode(pPhysiNode); + + return (SNode*)pPhysiNode; +} + + +SNode* qptCreateLastRowScanPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SLastRowScanPhysiNode* pLRScanNode = (SLastRowScanPhysiNode*)pPhysiNode; + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? true : false, QPT_CORRECT_HIGH_PROB() ? true : false, QPT_CORRECT_HIGH_PROB() ? false : true, 0, NULL); + qptMakeColumnList(&pLRScanNode->pGroupTags); + + pLRScanNode->groupSort = QPT_RAND_BOOL_V; + pLRScanNode->ignoreNull = QPT_RAND_BOOL_V; + + if (QPT_CORRECT_HIGH_PROB()) { + int16_t blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeAppendToTargetList(pLRScanNode->scan.pScanCols, blockId, &pLRScanNode->pTargets); + } + + if (QPT_CORRECT_HIGH_PROB()) { + int16_t blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeAppendToTargetList(pLRScanNode->scan.pScanPseudoCols, blockId, &pLRScanNode->pTargets); + } + + if (QPT_RAND_BOOL_V) { + int32_t funcNum = taosRand() % QPT_MAX_COLUMN_NUM; + pLRScanNode->pFuncTypes = taosArrayInit(funcNum, sizeof(int32_t)); + assert(pLRScanNode->pFuncTypes); + for (int32_t i = 0; i < funcNum; ++i) { + int32_t funcType = taosRand(); + taosArrayPush(pLRScanNode->pFuncTypes, &funcType); + } + } + + qptCreateScanPhysiNodeImpl(&pLRScanNode->scan); + + qptPostCreatePhysiNode(pPhysiNode); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateTableCountScanPhysiNode(int32_t nodeType) { + return qptCreateLastRowScanPhysiNode(nodeType); +} + + +SNode* qptCreateProjectPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SProjectPhysiNode* pProject = (SProjectPhysiNode*)pPhysiNode; + + pProject->mergeDataBlock = QPT_RAND_BOOL_V; + pProject->ignoreGroupId = QPT_RAND_BOOL_V; + pProject->inputIgnoreGroup = QPT_RAND_BOOL_V; + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprList(&pProject->pProjections); + + qptAddDataBlockSlots(pProject->pProjections, pProject->node.pOutputDataBlockDesc); + + qptPostCreatePhysiNode(pPhysiNode); + + return (SNode*)pPhysiNode; +} + + +SNode* qptCreateMergeJoinPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SSortMergeJoinPhysiNode* pJoin = (SSortMergeJoinPhysiNode*)pPhysiNode; + + pJoin->joinType = (EJoinType)(taosRand() % JOIN_TYPE_MAX_VALUE + (QPT_CORRECT_HIGH_PROB() ? 0 : 1)); + pJoin->subType = (EJoinSubType)(taosRand() % JOIN_STYPE_MAX_VALUE + (QPT_CORRECT_HIGH_PROB() ? 0 : 1)); + qptMakeWindowOffsetNode(&pJoin->pWindowOffset); + qptMakeLimitNode(&pJoin->pJLimit); + pJoin->asofOpType = OPERATOR_ARRAY[taosRand() % (sizeof(OPERATOR_ARRAY)/sizeof(OPERATOR_ARRAY[0]))] + (QPT_CORRECT_HIGH_PROB() ? 0 : 1); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->leftPrimExpr); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->rightPrimExpr); + + pJoin->leftPrimSlotId = qptGetInputSlotId(qptCtx.buildCtx.pChild ? qptCtx.buildCtx.pChild->pOutputDataBlockDesc : NULL); + pJoin->rightPrimSlotId = qptGetInputSlotId(qptCtx.buildCtx.pChild ? qptCtx.buildCtx.pChild->pOutputDataBlockDesc : NULL); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnList(&pJoin->pEqLeft); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnList(&pJoin->pEqRight); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->pPrimKeyCond); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->pColEqCond); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->pColOnCond); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->pFullOnCond); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + int16_t blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_EXPR, blockId, &pJoin->pTargets); + + for (int32_t i = 0; i < 2; i++) { + pJoin->inputStat[i].inputRowNum = taosRand(); + pJoin->inputStat[i].inputRowSize = taosRand(); + } + + pJoin->seqWinGroup = QPT_RAND_BOOL_V; + pJoin->grpJoin = QPT_RAND_BOOL_V; + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateHashAggPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SAggPhysiNode* pAgg = (SAggPhysiNode*)pPhysiNode; + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + int16_t blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_EXPR, blockId, &pAgg->pExprs); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_EXPR, blockId, &pAgg->pGroupKeys); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_FUNCTION, blockId, &pAgg->pAggFuncs); + + pAgg->mergeDataBlock = QPT_RAND_BOOL_V; + pAgg->groupKeyOptimized = QPT_RAND_BOOL_V; + pAgg->hasCountLikeFunc = QPT_RAND_BOOL_V; + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateExchangePhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SExchangePhysiNode* pExc = (SExchangePhysiNode*)pPhysiNode; + + pExc->srcStartGroupId = taosRand(); + pExc->srcEndGroupId = taosRand(); + pExc->singleChannel = QPT_RAND_BOOL_V; + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeDownstreamSrcList(&pExc->pSrcEndPoints); + + pExc->seqRecvData = QPT_RAND_BOOL_V; + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateMergePhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SMergePhysiNode* pMerge = (SMergePhysiNode*)pPhysiNode; + + pMerge->type = (EMergeType)(QPT_CORRECT_HIGH_PROB() ? (taosRand() % (MERGE_TYPE_MAX_VALUE - 1) + 1) : taosRand()); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeOrerByExprList(&pMerge->pMergeKeys); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + int16_t blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_EXPR, blockId, &pMerge->pTargets); + + pMerge->numOfChannels = taosRand(); + pMerge->numOfSubplans = taosRand(); + pMerge->srcGroupId = taosRand(); + pMerge->srcEndGroupId = taosRand(); + pMerge->groupSort = QPT_RAND_BOOL_V; + pMerge->ignoreGroupId = QPT_RAND_BOOL_V; + pMerge->inputWithGroupId = QPT_RAND_BOOL_V; + + return (SNode*)pPhysiNode; +} + + +SNode* qptCreateSortPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SSortPhysiNode* pSort = (SSortPhysiNode*)pPhysiNode; + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprList(&pSort->pExprs); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeOrerByExprList(&pSort->pSortKeys); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + int16_t blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_EXPR, blockId, &pSort->pTargets); + + pSort->calcGroupId = QPT_RAND_BOOL_V; + pSort->excludePkCol = QPT_RAND_BOOL_V; + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateGroupSortPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SGroupSortPhysiNode* pSort = (SGroupSortPhysiNode*)pPhysiNode; + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprList(&pSort->pExprs); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeOrerByExprList(&pSort->pSortKeys); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + int16_t blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_EXPR, blockId, &pSort->pTargets); + + pSort->calcGroupId = QPT_RAND_BOOL_V; + pSort->excludePkCol = QPT_RAND_BOOL_V; + + return (SNode*)pPhysiNode; +} + +void qptCreateWindowPhysiNode(SWindowPhysiNode* pWindow) { + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprList(&pWindow->pExprs); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeFunctionList(&pWindow->pFuncs); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnNode(&pWindow->pTspk); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnNode(&pWindow->pTsEnd); + + pWindow->triggerType = taosRand(); + pWindow->watermark = taosRand(); + pWindow->deleteMark = taosRand(); + pWindow->igExpired = taosRand(); + pWindow->destHasPrimaryKey = taosRand(); + pWindow->mergeDataBlock = QPT_RAND_BOOL_V; +} + +SNode* qptCreateIntervalPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SIntervalPhysiNode* pInterval = (SIntervalPhysiNode*)pPhysiNode; + + qptCreateWindowPhysiNode(&pInterval->window); + + pInterval->interval = taosRand(); + pInterval->offset = taosRand(); + pInterval->sliding = taosRand(); + pInterval->intervalUnit = qptGetRandTimestampUnit(); + pInterval->slidingUnit = qptGetRandTimestampUnit(); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateMergeIntervalPhysiNode(int32_t nodeType) { + return qptCreateIntervalPhysiNode(nodeType); +} + +SNode* qptCreateMergeAlignedIntervalPhysiNode(int32_t nodeType) { + return qptCreateIntervalPhysiNode(nodeType); +} + +SNode* qptCreateStreamIntervalPhysiNode(int32_t nodeType) { + return qptCreateIntervalPhysiNode(nodeType); +} + +SNode* qptCreateStreamFinalIntervalPhysiNode(int32_t nodeType) { + return qptCreateIntervalPhysiNode(nodeType); +} + +SNode* qptCreateStreamSemiIntervalPhysiNode(int32_t nodeType) { + return qptCreateIntervalPhysiNode(nodeType); +} + + +SNode* qptCreateStreamMidIntervalPhysiNode(int32_t nodeType) { + return qptCreateIntervalPhysiNode(nodeType); +} + + +SNode* qptCreateFillPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SFillPhysiNode* pFill = (SFillPhysiNode*)pPhysiNode; + + pFill->mode = qptGetRandFillMode(); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprList(&pFill->pFillExprs); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprList(&pFill->pNotFillExprs); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnNode(&pFill->pWStartTs); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeNodeListNode(QPT_NODE_VALUE, &pFill->pValues); + + qptGetRandTimeWindow(&pFill->timeRange); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateStreamFillPhysiNode(int32_t nodeType) { + return qptCreateFillPhysiNode(nodeType); +} + + +SNode* qptCreateSessionPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SSessionWinodwPhysiNode* pSession = (SSessionWinodwPhysiNode*)pPhysiNode; + + qptCreateWindowPhysiNode(&pSession->window); + + pSession->gap = taosRand(); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateStreamSessionPhysiNode(int32_t nodeType) { + return qptCreateSessionPhysiNode(nodeType); +} + +SNode* qptCreateStreamSemiSessionPhysiNode(int32_t nodeType) { + return qptCreateSessionPhysiNode(nodeType); +} + +SNode* qptCreateStreamFinalSessionPhysiNode(int32_t nodeType) { + return qptCreateSessionPhysiNode(nodeType); +} + +SNode* qptCreateStateWindowPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SStateWinodwPhysiNode* pState = (SStateWinodwPhysiNode*)pPhysiNode; + + qptCreateWindowPhysiNode(&pState->window); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnNode(&pState->pStateKey); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateStreamStatePhysiNode(int32_t nodeType) { + return qptCreateStateWindowPhysiNode(nodeType); +} + +void qptCreatePartitionPhysiNodeImpl(SPartitionPhysiNode* pPartition) { + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprList(&pPartition->pExprs); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, pPartition->node.pOutputDataBlockDesc ? pPartition->node.pOutputDataBlockDesc->dataBlockId : taosRand(), pPartition->pExprs); + qptMakeColumnList(&pPartition->pPartitionKeys); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnList(&pPartition->pPartitionKeys); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + int16_t blockId = (QPT_CORRECT_HIGH_PROB() && pPartition->node.pOutputDataBlockDesc) ? pPartition->node.pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_EXPR, blockId, &pPartition->pTargets); + + pPartition->needBlockOutputTsOrder = QPT_RAND_BOOL_V; + pPartition->tsSlotId = qptGetInputPrimaryTsSlotId(); +} + +SNode* qptCreatePartitionPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SPartitionPhysiNode* pPartition = (SPartitionPhysiNode*)pPhysiNode; + + qptCreatePartitionPhysiNodeImpl(pPartition); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateStreamPartitionPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SStreamPartitionPhysiNode* pPartition = (SStreamPartitionPhysiNode*)pPhysiNode; + + qptCreatePartitionPhysiNodeImpl(&pPartition->part); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_CORRECT_HIGH_PROB() ? true : false, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnList(&pPartition->pTags); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pPartition->pSubtable); + + return (SNode*)pPhysiNode; +} + + +SNode* qptCreateIndefRowsFuncPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SIndefRowsFuncPhysiNode* pFunc = (SIndefRowsFuncPhysiNode*)pPhysiNode; + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + int16_t blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_EXPR, blockId, &pFunc->pExprs); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_FUNCTION, blockId, &pFunc->pFuncs); + + return (SNode*)pPhysiNode; +} + + +SNode* qptCreateInterpFuncPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SInterpFuncPhysiNode* pFunc = (SInterpFuncPhysiNode*)pPhysiNode; + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + int16_t blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_EXPR, blockId, &pFunc->pExprs); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_FUNCTION, blockId, &pFunc->pFuncs); + + qptGetRandTimeWindow(&pFunc->timeRange); + + pFunc->interval = taosRand(); + pFunc->intervalUnit = qptGetRandTimestampUnit(); + + pFunc->fillMode = qptGetRandFillMode(); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeNodeListNode(QPT_NODE_VALUE, &pFunc->pFillValues); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnNode(&pFunc->pTimeSeries); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateMergeEventPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SEventWinodwPhysiNode* pEvent = (SEventWinodwPhysiNode*)pPhysiNode; + + qptCreateWindowPhysiNode(&pEvent->window); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pEvent->pStartCond); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pEvent->pEndCond); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateStreamEventPhysiNode(int32_t nodeType) { + return qptCreateMergeEventPhysiNode(nodeType); +} + +SNode* qptCreateCountWindowPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SCountWinodwPhysiNode* pCount = (SCountWinodwPhysiNode*)pPhysiNode; + + qptCreateWindowPhysiNode(&pCount->window); + + pCount->windowCount = taosRand(); + pCount->windowSliding = taosRand(); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateStreamCountWindowPhysiNode(int32_t nodeType) { + return qptCreateCountWindowPhysiNode(nodeType); +} + +SNode* qptCreateHashJoinPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SHashJoinPhysiNode* pJoin = (SHashJoinPhysiNode*)pPhysiNode; + + pJoin->joinType = (EJoinType)(taosRand() % JOIN_TYPE_MAX_VALUE + (QPT_CORRECT_HIGH_PROB() ? 0 : 1)); + pJoin->subType = (EJoinSubType)(taosRand() % JOIN_STYPE_MAX_VALUE + (QPT_CORRECT_HIGH_PROB() ? 0 : 1)); + qptMakeWindowOffsetNode(&pJoin->pWindowOffset); + qptMakeLimitNode(&pJoin->pJLimit); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnList(&pJoin->pOnLeft); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnList(&pJoin->pOnRight); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->leftPrimExpr); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->rightPrimExpr); + + pJoin->leftPrimSlotId = qptGetInputSlotId(qptCtx.buildCtx.pChild ? qptCtx.buildCtx.pChild->pOutputDataBlockDesc : NULL); + pJoin->rightPrimSlotId = qptGetInputSlotId(qptCtx.buildCtx.pChild ? qptCtx.buildCtx.pChild->pOutputDataBlockDesc : NULL); + + pJoin->timeRangeTarget = QPT_CORRECT_HIGH_PROB() ? (taosRand() % 3) : taosRand(); + qptGetRandTimeWindow(&pJoin->timeRange); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->pLeftOnCond); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->pRightOnCond); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->pFullOnCond); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + int16_t blockId = (QPT_CORRECT_HIGH_PROB() && pPhysiNode->pOutputDataBlockDesc) ? pPhysiNode->pOutputDataBlockDesc->dataBlockId : taosRand(); + qptMakeTargetList(QPT_NODE_EXPR, blockId, &pJoin->pTargets); + + for (int32_t i = 0; i < 2; i++) { + pJoin->inputStat[i].inputRowNum = taosRand(); + pJoin->inputStat[i].inputRowSize = taosRand(); + } + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->pPrimKeyCond); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->pColEqCond); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_CORRECT_HIGH_PROB() ? true : false, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pJoin->pTagEqCond); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateGroupCachePhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SGroupCachePhysiNode* pGroup = (SGroupCachePhysiNode*)pPhysiNode; + + pGroup->grpColsMayBeNull = QPT_RAND_BOOL_V; + pGroup->grpByUid = QPT_RAND_BOOL_V; + pGroup->globalGrp = QPT_RAND_BOOL_V; + pGroup->batchFetch = QPT_RAND_BOOL_V; + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnList(&pGroup->pGroupCols); + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateDynQueryCtrlPhysiNode(int32_t nodeType) { + SPhysiNode* pPhysiNode = qptCreatePhysiNode(nodeType); + + SDynQueryCtrlPhysiNode* pDyn = (SDynQueryCtrlPhysiNode*)pPhysiNode; + + pDyn->qType = QPT_CORRECT_HIGH_PROB() ? DYN_QTYPE_STB_HASH : (EDynQueryType)taosRand(); + + SStbJoinDynCtrlBasic* pJoin = &pDyn->stbJoin; + pJoin->batchFetch = QPT_RAND_BOOL_V; + pJoin->vgSlot[0] = taosRand(); + pJoin->vgSlot[1] = taosRand(); + pJoin->uidSlot[0] = taosRand(); + pJoin->uidSlot[1] = taosRand(); + pJoin->srcScan[0] = QPT_RAND_BOOL_V; + pJoin->srcScan[1] = QPT_RAND_BOOL_V; + + return (SNode*)pPhysiNode; +} + +SNode* qptCreateDataSinkNode(int32_t nodeType) { + SDataSinkNode* pSinkNode = NULL; + assert(0 == nodesMakeNode((ENodeType)nodeType, (SNode**)&pSinkNode)); + assert(pSinkNode); + + if (QPT_CORRECT_HIGH_PROB() && qptCtx.buildCtx.pCurr && qptCtx.buildCtx.pCurr->pOutputDataBlockDesc) { + pSinkNode->pInputDataBlockDesc = (SDataBlockDescNode*)qptMakeDataBlockDescNodeFromNode(true); + } else { + pSinkNode->pInputDataBlockDesc = (SDataBlockDescNode*)qptMakeDataBlockDescNode(true); + } + + return (SNode*)pSinkNode; +} + +SNode* qptCreateDataDispatchPhysiNode(int32_t nodeType) { + return (SNode*)qptCreateDataSinkNode(nodeType); +} + +SNode* qptCreateDataInsertPhysiNode(int32_t nodeType) { + SDataInserterNode* pInserter = (SDataInserterNode*)qptCreateDataSinkNode(nodeType); + + pInserter->numOfTables = taosRand(); + pInserter->size = taosRand(); + pInserter->pData = QPT_RAND_BOOL_V ? taosMemoryMalloc(1) : NULL; + + return (SNode*)pInserter; +} + +SNode* qptCreateDataQueryInsertPhysiNode(int32_t nodeType) { + SQueryInserterNode* pInserter = (SQueryInserterNode*)qptCreateDataSinkNode(nodeType); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptMakeColumnList(&pInserter->pCols); + + pInserter->tableId = QPT_CORRECT_HIGH_PROB() ? qptCtx.param.tbl.uid : taosRand(); + pInserter->stableId = QPT_CORRECT_HIGH_PROB() ? qptCtx.param.tbl.suid : taosRand(); + pInserter->tableType = QPT_CORRECT_HIGH_PROB() ? (QPT_RAND_BOOL_V ? TSDB_CHILD_TABLE : TSDB_NORMAL_TABLE) : (taosRand() % TSDB_TABLE_MAX); + if (QPT_CORRECT_HIGH_PROB()) { + strcpy(pInserter->tableName, qptCtx.param.tbl.tblName); + } else { + pInserter->tableName[0] = QPT_RAND_BOOL_V ? 'a' : 0; + } + pInserter->vgId = qptCtx.param.vnode.vgId; + memcpy(&pInserter->epSet, &qptCtx.param.vnode.epSet, sizeof(pInserter->epSet)); + pInserter->explain = QPT_RAND_BOOL_V; + + return (SNode*)pInserter; +} + + +SNode* qptCreateDataDeletePhysiNode(int32_t nodeType) { + SDataDeleterNode* pDeleter = (SDataDeleterNode*)qptCreateDataSinkNode(nodeType); + + pDeleter->tableId = QPT_CORRECT_HIGH_PROB() ? qptCtx.param.tbl.uid : taosRand(); + pDeleter->tableType = QPT_CORRECT_HIGH_PROB() ? (QPT_RAND_BOOL_V ? TSDB_CHILD_TABLE : TSDB_NORMAL_TABLE) : (taosRand() % TSDB_TABLE_MAX); + if (QPT_CORRECT_HIGH_PROB()) { + sprintf(pDeleter->tableFName, "1.%s.%s", qptCtx.param.db.dbName, qptCtx.param.tbl.tblName); + } else { + pDeleter->tableFName[0] = QPT_RAND_BOOL_V ? 'a' : 0; + } + + SQPTCol* pCol = (SQPTCol*)nodesListGetNode(qptCtx.param.tbl.pColList, 0); + if (QPT_CORRECT_HIGH_PROB() && pCol) { + strcpy(pDeleter->tsColName, pCol->name); + } else { + pDeleter->tsColName[0] = QPT_RAND_BOOL_V ? 't' : 0; + } + + qptGetRandTimeWindow(&pDeleter->deleteTimeRange); + + return (SNode*)pDeleter; +} + +void qptBuildSinkIdx(int32_t* pSinkIdx) { + +} + +void qptCreateSubplanDataSink(SDataSinkNode** ppOutput) { + static int32_t sinkIdx[sizeof(qptSink) / sizeof(qptSink[0])] = {-1}; + int32_t nodeIdx = 0; + + if (sinkIdx[0] < 0) { + qptBuildSinkIdx(sinkIdx); + } + + nodeIdx = taosRand() % (sizeof(sinkIdx)/sizeof(sinkIdx[0])); + + *ppOutput = (SDataSinkNode*)qptCreatePhysicalPlanNode(nodeIdx); +} + + +SNode* qptCreateSubplanNode(int32_t nodeType) { + SSubplan* pSubplan = NULL; + assert(0 == nodesMakeNode((ENodeType)nodeType, (SNode**)&pSubplan)); + + pSubplan->id.queryId = qptCtx.param.plan.queryId; + pSubplan->id.groupId = taosRand() % QPT_MAX_SUBPLAN_GROUP; + pSubplan->id.subplanId = qptCtx.buildCtx.nextSubplanId++; + + pSubplan->subplanType = QPT_CORRECT_HIGH_PROB() ? (ESubplanType)(taosRand() % SUBPLAN_TYPE_COMPUTE + 1) : (ESubplanType)taosRand(); + pSubplan->msgType = qptGetRandSubplanMsgType(); + pSubplan->level = taosRand() % QPT_MAX_SUBPLAN_LEVEL; + sprintf(pSubplan->dbFName, "1.%s", qptCtx.param.db.dbName); + strcpy(pSubplan->user, qptCtx.param.userName); + pSubplan->execNode.nodeId = qptCtx.param.vnode.vgId; + memcpy(&pSubplan->execNode.epSet, &qptCtx.param.vnode.epSet, sizeof(pSubplan->execNode.epSet)); + pSubplan->execNodeStat.tableNum = taosRand(); + + qptCreatePhysiNodesTree(&pSubplan->pNode, NULL, 0); + + qptCreateSubplanDataSink(&pSubplan->pDataSink); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_CORRECT_HIGH_PROB() ? true : false, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pSubplan->pTagCond); + + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_CORRECT_HIGH_PROB() ? true : false, QPT_RAND_BOOL_V, 0, NULL); + qptMakeExprNode(&pSubplan->pTagIndexCond); + + pSubplan->showRewrite = QPT_RAND_BOOL_V; + pSubplan->isView = QPT_RAND_BOOL_V; + pSubplan->isAudit = QPT_RAND_BOOL_V; + pSubplan->dynamicRowThreshold = QPT_RAND_BOOL_V; + pSubplan->rowsThreshold = taosRand(); + + return (SNode*)pSubplan; +} + + +SNode* qptCreatePhysicalPlanNode(int32_t nodeIdx) { + if (qptPlans[nodeIdx].buildFunc) { + return (*qptPlans[nodeIdx].buildFunc)(qptPlans[nodeIdx].type); + } + + return NULL; +} + +SNode* qptCreateRealPhysicalPlanNode() { + int32_t nodeIdx = 0; + + do { + nodeIdx = taosRand() % (sizeof(qptPlans) / sizeof(qptPlans[0])); + if (QPT_PLAN_PHYSIC != qptPlans[nodeIdx].classify) { + continue; + } + + return qptCreatePhysicalPlanNode(nodeIdx); + } while (true); +} + +void qptCreatePhysiNodesTree(SPhysiNode** ppRes, SPhysiNode* pParent, int32_t level) { + SPhysiNode* pNew = NULL; + if (level < QPT_MAX_SUBPLAN_LEVEL && (NULL == pParent || QPT_RAND_BOOL_V)) { + pNew = (SPhysiNode*)qptCreateRealPhysicalPlanNode(); + pNew->pParent = pParent; + int32_t childrenNum = taosRand() % QPT_MAX_LEVEL_SUBPLAN_NUM; + for (int32_t i = 0; i < childrenNum; ++i) { + qptCreatePhysiNodesTree(NULL, pNew, level + 1); + } + } else if (QPT_RAND_BOOL_V) { + return; + } + + if (pParent) { + qptNodesListMakeStrictAppend(&pParent->pChildren, (SNode*)pNew); + } else { + *ppRes = pNew; + } +} + +void qptAppendParentsSubplan(SNodeList* pParents, SNodeList* pNew) { + SNode* pNode = NULL; + FOREACH(pNode, pNew) { + if (NULL == pNode || QUERY_NODE_PHYSICAL_SUBPLAN != nodeType(pNode)) { + continue; + } + + qptNodesListMakeStrictAppend(&pParents, pNode); + } +} + +void qptSetSubplansRelation(SNodeList* pParents, SNodeList* pNew) { + int32_t parentIdx = 0; + SNode* pNode = NULL; + SSubplan* pChild = NULL; + SSubplan* pParent = NULL; + FOREACH(pNode, pNew) { + if (QPT_CORRECT_HIGH_PROB()) { + pChild = (SSubplan*)pNode; + parentIdx = taosRand() % pParents->length; + pParent = (SSubplan*)nodesListGetNode(pParents, parentIdx); + qptNodesListMakeStrictAppend(&pParent->pChildren, pNode); + qptNodesListMakeStrictAppend(&pChild->pParents, (SNode*)pParent); + } + } +} + +void qptBuildSubplansRelation(SNodeList* pList) { + SNode* pNode = NULL; + SNodeList* pParents = NULL; + FOREACH(pNode, pList) { + if (NULL == pNode || QUERY_NODE_NODE_LIST != nodeType(pNode)) { + continue; + } + + SNodeListNode* pNodeList = (SNodeListNode*)pNode; + + if (NULL == pParents) { + qptAppendParentsSubplan(pParents, pNodeList->pNodeList); + continue; + } + + qptSetSubplansRelation(pParents, pNodeList->pNodeList); + qptAppendParentsSubplan(pParents, pNodeList->pNodeList); + } +} + +SNode* qptCreateQueryPlanNode(int32_t nodeType) { + SQueryPlan* pPlan = NULL; + assert(0 == nodesMakeNode((ENodeType)nodeType, (SNode**)&pPlan)); + + int32_t subplanNum = 0, subplanLevelNum = taosRand() % QPT_MAX_SUBPLAN_LEVEL; + pPlan->queryId = QPT_CORRECT_HIGH_PROB() ? qptCtx.param.plan.queryId : taosRand(); + + for (int32_t l = 0; l < subplanLevelNum; ++l) { + qptInitMakeNodeCtx(QPT_CORRECT_HIGH_PROB() ? false : true, QPT_RAND_BOOL_V, QPT_RAND_BOOL_V, 0, NULL); + qptNodesListMakeStrictAppend(&pPlan->pSubplans, qptMakeNodeListNode(QPT_NODE_SUBPLAN, NULL)); + } + + pPlan->numOfSubplans = qptGetSubplanNum(pPlan->pSubplans); + qptBuildSubplansRelation(pPlan->pSubplans); + + pPlan->explainInfo.mode = (EExplainMode)(taosRand() % EXPLAIN_MODE_ANALYZE + 1); + pPlan->explainInfo.verbose = QPT_RAND_BOOL_V; + pPlan->explainInfo.ratio = taosRand(); + + pPlan->pPostPlan = QPT_RAND_BOOL_V ? NULL : (void*)0x1; + + return (SNode*)pPlan; +} + + +void qptRerunBlockedHere() { + while (qptInRerun) { + taosSsleep(1); + } +} + +void qptResetForReRun() { + qptCtx.param.plan.taskId = 1; + qptCtx.param.vnode.vgId = 1; + + qptResetTableCols(); + + qptCtx.buildCtx.pCurr = NULL; + qptCtx.buildCtx.pCurrTask = NULL; + + qptCtx.result.code = 0; +} + +void qptSingleTestDone(bool* contLoop) { +/* + if (jtRes.succeed) { + *contLoop = false; + return; + } +*/ + + if (qptErrorRerun) { + *contLoop = false; + return; + } + + qptInRerun = true; +} + + +void qptInitLogFile() { + const char *defaultLogFileNamePrefix = "queryPlanTestlog"; + const int32_t maxLogFileNum = 10; + + tsAsyncLog = 0; + qDebugFlag = 159; + TAOS_STRCPY(tsLogDir, TD_LOG_DIR_PATH); + + if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum, false) < 0) { + printf("failed to open log file in directory:%s\n", tsLogDir); + } +} + + + +void qptInitTest() { + qptInitLogFile(); +} + +void qptHandleTestEnd() { + +} + +void qptExecPlan(SReadHandle* pReadHandle, SNode* pNode, SExecTaskInfo* pTaskInfo, SOperatorInfo** ppOperaotr) { + switch (nodeType(pNode)) { + case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + qptCtx.result.code = createTagScanOperatorInfo(pReadHandle, (STagScanPhysiNode*)pNode, NULL, NULL, NULL, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: + qptCtx.result.code = createTableScanOperatorInfo((STableScanPhysiNode*)pNode, pReadHandle, NULL, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN: + qptCtx.result.code = createTableSeqScanOperatorInfo(pReadHandle, pTaskInfo, ppOperaotr); // usless + break; + case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN: + qptCtx.result.code = createTableMergeScanOperatorInfo((STableScanPhysiNode*)pNode, pReadHandle, NULL, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN: + qptCtx.result.code = createStreamScanOperatorInfo(pReadHandle, (STableScanPhysiNode*)pNode, NULL, NULL, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN: + qptCtx.result.code = createSysTableScanOperatorInfo(pReadHandle, (SSystemTableScanPhysiNode*)pNode, NULL, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: + qptCtx.result.code = createDataBlockInfoScanOperator(pReadHandle, (SBlockDistScanPhysiNode*)pNode, NULL, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: + qptCtx.result.code = createCacherowsScanOperator((SLastRowScanPhysiNode*)pNode, pReadHandle, NULL, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_PROJECT: + qptCtx.result.code = createProjectOperatorInfo(NULL, (SProjectPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: + qptCtx.result.code = createMergeJoinOperatorInfo(NULL, 0, (SSortMergeJoinPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: { + SAggPhysiNode* pAggNode = (SAggPhysiNode*)pNode; + if (pAggNode->pGroupKeys != NULL) { + qptCtx.result.code = createGroupOperatorInfo(NULL, pAggNode, pTaskInfo, ppOperaotr); + } else { + qptCtx.result.code = createAggregateOperatorInfo(NULL, pAggNode, pTaskInfo, ppOperaotr); + } + break; + } + case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: + qptCtx.result.code = createExchangeOperatorInfo(NULL, (SExchangePhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_MERGE: + qptCtx.result.code = createMultiwayMergeOperatorInfo(NULL, 0, (SMergePhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_SORT: + qptCtx.result.code = createSortOperatorInfo(NULL, (SSortPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT: + qptCtx.result.code = createGroupSortOperatorInfo(NULL, (SGroupSortPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL: + qptCtx.result.code = createIntervalOperatorInfo(NULL, (SIntervalPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL: + qptCtx.result.code = createMergeIntervalOperatorInfo(NULL, (SMergeIntervalPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL: + qptCtx.result.code = createMergeAlignedIntervalOperatorInfo(NULL, (SMergeAlignedIntervalPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL: + qptCtx.result.code = createStreamIntervalOperatorInfo(NULL, (SPhysiNode*)pNode, pTaskInfo, pReadHandle, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL: + qptCtx.result.code = createStreamFinalIntervalOperatorInfo(NULL, (SPhysiNode*)pNode, pTaskInfo, 0, pReadHandle, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_FILL: + qptCtx.result.code = createFillOperatorInfo(NULL, (SFillPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL: + qptCtx.result.code = createStreamFillOperatorInfo(NULL, (SStreamFillPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: + qptCtx.result.code = createSessionAggOperatorInfo(NULL, (SSessionWinodwPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: + qptCtx.result.code = createStreamSessionAggOperatorInfo(NULL, (SPhysiNode*)pNode, pTaskInfo, pReadHandle, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION: + qptCtx.result.code = createStreamFinalSessionAggOperatorInfo(NULL, (SPhysiNode*)pNode, pTaskInfo, 0, pReadHandle, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION: + qptCtx.result.code = createStreamFinalSessionAggOperatorInfo(NULL, (SPhysiNode*)pNode, pTaskInfo, 0, pReadHandle, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: + qptCtx.result.code = createStatewindowOperatorInfo(NULL, (SStateWinodwPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: + qptCtx.result.code = createStreamStateAggOperatorInfo(NULL, (SPhysiNode*)pNode, pTaskInfo, pReadHandle, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_PARTITION: + qptCtx.result.code = createPartitionOperatorInfo(NULL, (SPartitionPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION: + qptCtx.result.code = createStreamPartitionOperatorInfo(NULL, (SStreamPartitionPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC: + qptCtx.result.code = createIndefinitOutputOperatorInfo(NULL, (SPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: + qptCtx.result.code = createTimeSliceOperatorInfo(NULL, (SPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_INSERT: + qptCtx.result.code = 0; + break; + case QUERY_NODE_PHYSICAL_PLAN_DISPATCH: + case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT: + case QUERY_NODE_PHYSICAL_PLAN_DELETE: { + DataSinkHandle handle = NULL; + qptCtx.result.code = dsCreateDataSinker(NULL, (SDataSinkNode*)pNode, &handle, NULL, NULL); + dsDestroyDataSinker(handle); + break; + } + case QUERY_NODE_PHYSICAL_SUBPLAN: { + DataSinkHandle handle = NULL; + qptCtx.result.code = qCreateExecTask(pReadHandle, qptCtx.param.vnode.vgId, pTaskInfo->id.taskId, (SSubplan*)pNode, (qTaskInfo_t*)&pTaskInfo, &handle, + QPT_RAND_BOOL_V ? 0 : 1, taosStrdup("sql string"), OPTR_EXEC_MODEL_BATCH); + break; + } + case QUERY_NODE_PHYSICAL_PLAN: { + qptCtx.result.code = schedulerValidatePlan((SQueryPlan*)pNode); + break; + } + case QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN: + qptCtx.result.code = createTableCountScanOperatorInfo(pReadHandle, (STableCountScanPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT: + qptCtx.result.code = createEventwindowOperatorInfo(NULL, (SPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT: + qptCtx.result.code = createStreamEventAggOperatorInfo(NULL, (SPhysiNode*)pNode, pTaskInfo, pReadHandle, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN: + qptCtx.result.code = createHashJoinOperatorInfo(NULL, 0, (SHashJoinPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE: + qptCtx.result.code = createGroupCacheOperatorInfo(NULL, 0, (SGroupCachePhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL: + qptCtx.result.code = createDynQueryCtrlOperatorInfo(NULL, 0, (SDynQueryCtrlPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT: + qptCtx.result.code = createCountwindowOperatorInfo(NULL, (SPhysiNode*)pNode, pTaskInfo, ppOperaotr); + break; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT: + qptCtx.result.code = createStreamCountAggOperatorInfo(NULL, (SPhysiNode*)pNode, pTaskInfo, pReadHandle, ppOperaotr); + break; + default: + assert(0); + } + + if (qptCtx.result.code) { + qptCtx.result.failedTimes++; + } else { + qptCtx.result.succeedTimes++; + } + +} + +void qptRunSingleOpTest() { + SNode* pNode = NULL; + SReadHandle readHandle = {0}; + SOperatorInfo* pOperator = NULL; + SExecTaskInfo* pTaskInfo = NULL; + SStorageAPI storageAPI = {0}; + + qptResetForReRun(); + + doCreateTask(qptCtx.param.plan.queryId, qptCtx.param.plan.taskId, qptCtx.param.vnode.vgId, OPTR_EXEC_MODEL_BATCH, &storageAPI, &pTaskInfo); + qptCtx.buildCtx.pCurrTask = pTaskInfo; + + pNode = (SNode*)qptCreatePhysicalPlanNode(qptCtx.param.plan.subplanIdx[0]); + + qptPrintBeginInfo(); + + qptCtx.result.startTsUs = taosGetTimestampUs(); + + qptExecPlan(&readHandle, pNode, pTaskInfo, &pOperator); + + doDestroyTask(pTaskInfo); + destroyOperator(pOperator); + nodesDestroyNode((SNode*)pNode); + + qptPrintEndInfo(); + + qptHandleTestEnd(); +} + +void qptRunSubplanTest() { + SNode* pNode = NULL; + SReadHandle readHandle = {0}; + SOperatorInfo* pOperator = NULL; + + if (qptCtx.loopIdx > 0) { + qptResetForReRun(); + } + + //pNode = (SNode*)qptCreatePhysicalPlanNode(qptCtx.param.plan.subplanType[0]); + + qptPrintBeginInfo(); + + qptCtx.result.startTsUs = taosGetTimestampUs(); + //qptCtx.result.code = createTagScanOperatorInfo(&readHandle, (STagScanPhysiNode*)pNode, NULL, NULL, NULL, NULL, &pOperator); + //qptCtx.result.code = createProjectOperatorInfo(NULL, (SProjectPhysiNode*)pNode, NULL, &pOperator); + + destroyOperator(pOperator); + nodesDestroyNode((SNode*)pNode); + + qptPrintEndInfo(); + + qptHandleTestEnd(); +} + + +void qptRunPlanTest() { + if (qptCtx.param.plan.singlePhysiNode) { + qptRunSingleOpTest(); + } else { + qptRunSubplanTest(); + } +} + +SQPTNodeParam* qptInitNodeParam(int32_t nodeType) { + return NULL; +} + +void qptInitTableCols(SNodeList** ppList, int32_t colNum, EColumnType colType) { + SQPTCol* pCol = NULL; + int32_t tbnameIdx = -1; + if (QPT_RAND_BOOL_V && COLUMN_TYPE_TAG == colType) { + tbnameIdx = taosRand() % colNum; + } + + for (int32_t i = 0; i < colNum; ++i) { + qptNodesCalloc(1, sizeof(SQPTCol), (void**)&pCol); + pCol->type = QPT_QUERY_NODE_COL; + + if (tbnameIdx >= 0 && i == tbnameIdx) { + strcpy(pCol->name, "tbname"); + pCol->dtype = TSDB_DATA_TYPE_VARCHAR; + pCol->len = qptGetColumnRandLen(pCol->dtype); + pCol->inUse = 0; + pCol->hasIndex = QPT_RAND_BOOL_V; + pCol->isPrimTs = QPT_RAND_BOOL_V; + pCol->isPk = QPT_RAND_BOOL_V; + pCol->colType = COLUMN_TYPE_TBNAME; + + qptNodesListMakeStrictAppend(ppList, (SNode *)pCol); + continue; + } + + qptInitSingleTableCol(pCol, i, colType); + + qptNodesListMakeStrictAppend(ppList, (SNode *)pCol); + } +} + +void qptInitTestCtx(bool correctExpected, bool singleNode, int32_t nodeType, int32_t nodeIdx, int32_t paramNum, SQPTNodeParam* nodeParam) { + qptCtx.param.correctExpected = correctExpected; + qptCtx.param.schedulerId = taosRand(); + strcpy(qptCtx.param.userName, "user1"); + qptCtx.param.plan.singlePhysiNode = singleNode; + + if (singleNode) { + qptCtx.param.plan.subplanMaxLevel = 1; + qptCtx.param.plan.subplanType[0] = nodeType; + qptCtx.param.plan.subplanIdx[0] = nodeIdx; + } else { + qptCtx.param.plan.subplanMaxLevel = taosRand() % QPT_MAX_SUBPLAN_LEVEL + 1; + for (int32_t i = 0; i < qptCtx.param.plan.subplanMaxLevel; ++i) { + nodeIdx = taosRand() % QPT_PHYSIC_NODE_NUM(); + qptCtx.param.plan.subplanType[i] = qptPlans[nodeIdx].type; + qptCtx.param.plan.subplanIdx[i] = nodeIdx; + } + } + + if (paramNum > 0) { + qptCtx.param.plan.physiNodeParamNum = paramNum; + qptCtx.param.plan.physicNodeParam = nodeParam; + } + + qptCtx.param.plan.queryId++; + qptCtx.param.plan.taskId++; + + qptCtx.param.db.precision = TSDB_TIME_PRECISION_MILLI; + strcpy(qptCtx.param.db.dbName, "qptdb1"); + + qptCtx.param.vnode.vnodeNum = QPT_DEFAULT_VNODE_NUM; + qptCtx.param.vnode.vgId = 1; + qptCtx.param.vnode.epSet.numOfEps = 1; + qptCtx.param.vnode.epSet.inUse = 0; + strcpy(qptCtx.param.vnode.epSet.eps[0].fqdn, "127.0.0.1"); + qptCtx.param.vnode.epSet.eps[0].port = 6030; + + qptCtx.param.tbl.uid = 100; + qptCtx.param.tbl.suid = 1; + qptGetRandRealTableType(&qptCtx.param.tbl.tblType); + qptCtx.param.tbl.colNum = taosRand() % 4096 + 1; + qptCtx.param.tbl.tagNum = taosRand() % 128 + 1; + qptCtx.param.tbl.pkNum = taosRand() % 2; + strcpy(qptCtx.param.tbl.tblName, "qpttbl1"); + strcpy(qptCtx.param.tbl.tblName, "tbl1"); + + qptInitTableCols(&qptCtx.param.tbl.pColList, qptCtx.param.tbl.colNum, COLUMN_TYPE_COLUMN); + qptInitTableCols(&qptCtx.param.tbl.pTagList, qptCtx.param.tbl.tagNum, COLUMN_TYPE_TAG); + + SNode* pTmp = NULL; + FOREACH(pTmp, qptCtx.param.tbl.pColList) { + qptNodesListMakeStrictAppend(&qptCtx.param.tbl.pColTagList, pTmp); + } + FOREACH(pTmp, qptCtx.param.tbl.pTagList) { + qptNodesListMakeStrictAppend(&qptCtx.param.tbl.pColTagList, pTmp); + } + + qptCtx.buildCtx.nextBlockId++; + qptCtx.buildCtx.nextSubplanId++; +} + +void qptDestroyTestCtx() { + SNode* pTmp = NULL; + FOREACH(pTmp, qptCtx.param.tbl.pColList) { + qptNodesFree(pTmp); + } + FOREACH(pTmp, qptCtx.param.tbl.pTagList) { + qptNodesFree(pTmp); + } + nodesClearList(qptCtx.param.tbl.pColList); + nodesClearList(qptCtx.param.tbl.pTagList); + nodesClearList(qptCtx.param.tbl.pColTagList); + + qptCtx.param.tbl.pColList = NULL; + qptCtx.param.tbl.pTagList = NULL; + qptCtx.param.tbl.pColTagList = NULL; +} + +} // namespace + +#if 1 +#if 0 +TEST(singleRandNodeTest, loopPlans) { + char* caseType = "singleRandNodeTest:loopPlans"; + + for (qptCtx.loopIdx = 0; qptCtx.loopIdx < QPT_MAX_LOOP; ++qptCtx.loopIdx) { + for (int32_t i = 0; i < sizeof(qptPlans)/sizeof(qptPlans[0]); ++i) { + sprintf(qptCtx.caseName, "%s:%s", caseType, qptPlans[i].name); + qptInitTestCtx(false, true, qptPlans[i].type, i, 0, NULL); + + qptRunPlanTest(); + + qptDestroyTestCtx(); + } + } + + qptPrintStatInfo(); +} +#endif +#if 1 +TEST(singleRandNodeTest, specificPlan) { + char* caseType = "singleRandNodeTest:specificPlan"; + + int32_t idx = qptGetSpecificPlanIndex(QUERY_NODE_PHYSICAL_PLAN); + for (qptCtx.loopIdx = 0; qptCtx.loopIdx < QPT_MAX_LOOP; ++qptCtx.loopIdx) { + sprintf(qptCtx.caseName, "%s:%s", caseType, qptPlans[idx].name); + qptInitTestCtx(false, true, qptPlans[idx].type, idx, 0, NULL); + + qptRunPlanTest(); + + qptDestroyTestCtx(); + } + + qptPrintStatInfo(); +} +#endif + + +#endif + + +int main(int argc, char** argv) { + taosSeedRand(taosGetTimestampSec()); + qptInitTest(); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + + + +#pragma GCC diagnosti diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt index 4164852111c..6891653981d 100644 --- a/source/libs/function/CMakeLists.txt +++ b/source/libs/function/CMakeLists.txt @@ -1,6 +1,10 @@ aux_source_directory(src FUNCTION_SRC) aux_source_directory(src/detail FUNCTION_SRC_DETAIL) list(REMOVE_ITEM FUNCTION_SRC src/udfd.c) +IF(COMPILER_SUPPORT_AVX2) + MESSAGE(STATUS "AVX2 instructions is ACTIVATED") + set_source_files_properties(src/detail/tminmaxavx.c PROPERTIES COMPILE_FLAGS -mavx2) +ENDIF() add_library(function STATIC ${FUNCTION_SRC} ${FUNCTION_SRC_DETAIL}) target_include_directories( function @@ -140,6 +144,11 @@ target_link_libraries( # SET(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/build/bin) add_executable(udfd src/udfd.c) + +if(${TD_DARWIN}) + target_compile_options(udfd PRIVATE -Wno-error=deprecated-non-prototype) +endif() + target_include_directories( udfd PUBLIC diff --git a/source/libs/function/inc/builtins.h b/source/libs/function/inc/builtins.h index 5707ee76f4e..fb0db58f1c7 100644 --- a/source/libs/function/inc/builtins.h +++ b/source/libs/function/inc/builtins.h @@ -22,16 +22,48 @@ extern "C" { #include "functionMgtInt.h" +struct SFunctionParaInfo; + typedef int32_t (*FTranslateFunc)(SFunctionNode* pFunc, char* pErrBuf, int32_t len); typedef EFuncDataRequired (*FFuncDataRequired)(SFunctionNode* pFunc, STimeWindow* pTimeWindow); typedef int32_t (*FCreateMergeFuncParameters)(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters); typedef EFuncDataRequired (*FFuncDynDataRequired)(void* pRes, SDataBlockInfo* pBlocInfo); typedef EFuncReturnRows (*FEstimateReturnRows)(SFunctionNode* pFunc); +#define MAX_FUNC_PARA_NUM 16 +#define MAX_FUNC_PARA_FIXED_VALUE_NUM 16 +typedef struct SParamRange { + int64_t iMinVal; + int64_t iMaxVal; +} SParamRange; + +typedef struct SParamInfo { + bool isLastParam; + int8_t startParam; + int8_t endParam; + uint64_t validDataType; + uint64_t validNodeType; + uint64_t paramAttribute; + uint8_t valueRangeFlag; // 0 for no range and no fixed value, 1 for value has range, 2 for fixed value + uint8_t fixedValueSize; + char* fixedStrValue[MAX_FUNC_PARA_FIXED_VALUE_NUM]; // used for input parameter + int64_t fixedNumValue[MAX_FUNC_PARA_FIXED_VALUE_NUM]; // used for input parameter + SParamRange range; +} SParamInfo; + +typedef struct SFunctionParaInfo { + int8_t minParamNum; + int8_t maxParamNum; + uint8_t paramInfoPattern; + SParamInfo inputParaInfo[MAX_FUNC_PARA_NUM][MAX_FUNC_PARA_NUM]; + SParamInfo outputParaInfo; +} SFunctionParaInfo; + typedef struct SBuiltinFuncDefinition { const char* name; EFunctionType type; uint64_t classification; + SFunctionParaInfo parameters; FTranslateFunc translateFunc; FFuncDataRequired dataRequiredFunc; FFuncDynDataRequired dynDataRequiredFunc; diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index 36e53d0a80e..a1c82dc58b7 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -25,6 +25,11 @@ extern "C" { #include "functionResInfoInt.h" int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems); +int32_t i8VectorCmpAVX2(const void* pData, int32_t numOfRows, bool isMinFunc, bool signVal, int64_t* res); +int32_t i16VectorCmpAVX2(const void* pData, int32_t numOfRows, bool isMinFunc, bool signVal, int64_t* res); +int32_t i32VectorCmpAVX2(const void* pData, int32_t numOfRows, bool isMinFunc, bool signVal, int64_t* res); +int32_t floatVectorCmpAVX2(const float* pData, int32_t numOfRows, bool isMinFunc, float* res); +int32_t doubleVectorCmpAVX2(const double* pData, int32_t numOfRows, bool isMinFunc, double* res); int32_t saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos); int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos); diff --git a/source/libs/function/inc/functionMgtInt.h b/source/libs/function/inc/functionMgtInt.h index 3112245de9c..e10581beb63 100644 --- a/source/libs/function/inc/functionMgtInt.h +++ b/source/libs/function/inc/functionMgtInt.h @@ -64,6 +64,80 @@ extern "C" { #define FUNC_UDF_ID_START 5000 +#define FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(n) ((uint64_t)1 << n) +#define FUNC_PARAM_SUPPORT_ALL_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(0) +#define FUNC_PARAM_SUPPORT_NUMERIC_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(1) +#define FUNC_PARAM_SUPPORT_VAR_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(2) +#define FUNC_PARAM_SUPPORT_STRING_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(3) +#define FUNC_PARAM_SUPPORT_BOOL_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(4) +#define FUNC_PARAM_SUPPORT_TINYINT_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(5) +#define FUNC_PARAM_SUPPORT_SMALLINT_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(6) +#define FUNC_PARAM_SUPPORT_INT_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(7) +#define FUNC_PARAM_SUPPORT_BIGINT_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(8) +#define FUNC_PARAM_SUPPORT_FLOAT_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(9) +#define FUNC_PARAM_SUPPORT_DOUBLE_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(10) +#define FUNC_PARAM_SUPPORT_VARCHAR_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(11) +#define FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(12) +#define FUNC_PARAM_SUPPORT_NCHAR_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(13) +#define FUNC_PARAM_SUPPORT_UTINYINT_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(14) +#define FUNC_PARAM_SUPPORT_USMALLINT_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(15) +#define FUNC_PARAM_SUPPORT_UINT_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(16) +#define FUNC_PARAM_SUPPORT_UBIGINT_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(17) +#define FUNC_PARAM_SUPPORT_JSON_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(18) +#define FUNC_PARAM_SUPPORT_VARB_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(19) +#define FUNC_PARAM_SUPPORT_GEOMETRY_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(20) +#define FUNC_PARAM_SUPPORT_INTEGER_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(21) +#define FUNC_PARAM_SUPPORT_NULL_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(22) +#define FUNC_PARAM_SUPPORT_UNIX_TS_TYPE FUNC_MGT_FUNC_PARAM_SUPPORT_TYPE(23) + + + +#define FUNC_MGT_FUNC_PARAM_SUPPORT_NODE(n) ((uint64_t)1 << n) +#define FUNC_PARAM_SUPPORT_EXPR_NODE FUNC_MGT_FUNC_PARAM_SUPPORT_NODE(0) +#define FUNC_PARAM_SUPPORT_VALUE_NODE FUNC_MGT_FUNC_PARAM_SUPPORT_NODE(1) +#define FUNC_PARAM_SUPPORT_OPERATOR_NODE FUNC_MGT_FUNC_PARAM_SUPPORT_NODE(2) +#define FUNC_PARAM_SUPPORT_FUNCTION_NODE FUNC_MGT_FUNC_PARAM_SUPPORT_NODE(3) +#define FUNC_PARAM_SUPPORT_LOGIC_CONDITION_NODE FUNC_MGT_FUNC_PARAM_SUPPORT_NODE(4) +#define FUNC_PARAM_SUPPORT_CASE_WHEN_NODE FUNC_MGT_FUNC_PARAM_SUPPORT_NODE(5) +#define FUNC_PARAM_SUPPORT_COLUMN_NODE FUNC_MGT_FUNC_PARAM_SUPPORT_NODE(6) +#define FUNC_PARAM_SUPPORT_NOT_VALUE_NODE FUNC_MGT_FUNC_PARAM_SUPPORT_NODE(7) + +#define FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE 0 +#define FUNC_PARAM_MUST_BE_PRIMTS 1 +#define FUNC_PARAM_MUST_BE_PK 2 +#define FUNC_PARAM_MUST_HAVE_COLUMN 3 +#define FUNC_PARAM_MUST_BE_TIME_UNIT 4 +#define FUNC_PARAM_VALUE_NODE_NOT_NULL 5 + +#define FUNC_PARAM_NO_SPECIFIC_VALUE 0 +#define FUNC_PARAM_HAS_RANGE 1 +#define FUNC_PARAM_HAS_FIXED_VALUE 2 + +#define FUNC_ERR_RET(c) \ + do { \ + int32_t _code = c; \ + if (_code != TSDB_CODE_SUCCESS) { \ + terrno = _code; \ + return _code; \ + } \ + } while (0) +#define FUNC_RET(c) \ + do { \ + int32_t _code = c; \ + if (_code != TSDB_CODE_SUCCESS) { \ + terrno = _code; \ + } \ + return _code; \ + } while (0) +#define FUNC_ERR_JRET(c) \ + do { \ + code = c; \ + if (code != TSDB_CODE_SUCCESS) { \ + terrno = code; \ + goto _return; \ + } \ + } while (0) + #ifdef __cplusplus } #endif diff --git a/source/libs/function/inc/functionResInfoInt.h b/source/libs/function/inc/functionResInfoInt.h index 9ee1e884b33..f97d2e80248 100644 --- a/source/libs/function/inc/functionResInfoInt.h +++ b/source/libs/function/inc/functionResInfoInt.h @@ -237,6 +237,7 @@ typedef struct SElapsedInfo { } SElapsedInfo; typedef struct STwaInfo { + double dTwaRes; double dOutput; int64_t numOfElems; SPoint1 p; diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 1fd99125a03..2d68eb9d51c 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -19,7 +19,7 @@ #include "geomFunc.h" #include "querynodes.h" #include "scalar.h" -#include "tanal.h" +#include "tanalytics.h" #include "taoserror.h" #include "ttime.h" @@ -90,28 +90,6 @@ static bool validateMinuteRange(int8_t hour, int8_t minute, char sign) { return false; } -static bool validateTimestampDigits(const SValueNode* pVal) { - if (!IS_INTEGER_TYPE(pVal->node.resType.type)) { - return false; - } - - int64_t tsVal = pVal->datum.i; - char fraction[20] = {0}; - NUM_TO_STRING(pVal->node.resType.type, &tsVal, sizeof(fraction), fraction); - int32_t tsDigits = (int32_t)strlen(fraction); - - if (tsDigits > TSDB_TIME_PRECISION_SEC_DIGITS) { - if (tsDigits == TSDB_TIME_PRECISION_MILLI_DIGITS || tsDigits == TSDB_TIME_PRECISION_MICRO_DIGITS || - tsDigits == TSDB_TIME_PRECISION_NANO_DIGITS) { - return true; - } else { - return false; - } - } - - return true; -} - static bool validateTimezoneFormat(const SValueNode* pVal) { if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) { return false; @@ -298,944 +276,293 @@ static SDataType* getSDataTypeFromNode(SNode* pNode) { } } -// There is only one parameter of numeric type, and the return type is parameter type -static int32_t translateInOutNum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } else if (IS_NULL_TYPE(paraType)) { - paraType = TSDB_DATA_TYPE_BIGINT; - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[paraType].bytes, .type = paraType}; - return TSDB_CODE_SUCCESS; +static bool paramSupportNull(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NULL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE); } -// There is only one parameter of numeric type, and the return type is parameter type -static int32_t translateMinMax(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - SDataType* dataType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); - uint8_t paraType = dataType->type; - if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType) && !IS_STR_DATA_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } else if (IS_NULL_TYPE(paraType)) { - paraType = TSDB_DATA_TYPE_BIGINT; - } - int32_t bytes = IS_STR_DATA_TYPE(paraType) ? dataType->bytes : tDataTypes[paraType].bytes; - pFunc->node.resType = (SDataType){.bytes = bytes, .type = paraType}; - return TSDB_CODE_SUCCESS; +static bool paramSupportBool(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_BOOL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE); } -// There is only one parameter of numeric type, and the return type is double type -static int32_t translateInNumOutDou(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - return TSDB_CODE_SUCCESS; +static bool paramSupportTinyint(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_TINYINT_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NUMERIC_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_INTEGER_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UNIX_TS_TYPE); } -// There are two parameters of numeric type, and the return type is double type -static int32_t translateIn2NumOutDou(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (2 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if ((!IS_NUMERIC_TYPE(para1Type) && !IS_NULL_TYPE(para1Type)) || - (!IS_NUMERIC_TYPE(para2Type) && !IS_NULL_TYPE(para2Type))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - return TSDB_CODE_SUCCESS; +static bool paramSupportSmallint(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_SMALLINT_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NUMERIC_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_INTEGER_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UNIX_TS_TYPE); } -// There is only one parameter of string type, and the return type is parameter type -static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - SDataType* pRestType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); - if (TSDB_DATA_TYPE_VARBINARY == pRestType->type || !IS_STR_DATA_TYPE(pRestType->type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = pRestType->bytes, .type = pRestType->type}; - return TSDB_CODE_SUCCESS; +static bool paramSupportInt(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_INT_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NUMERIC_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_INTEGER_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UNIX_TS_TYPE); } -static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isLtrim) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - SDataType* pRestType1 = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); - if (TSDB_DATA_TYPE_VARBINARY == pRestType1->type || !IS_STR_DATA_TYPE(pRestType1->type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - int32_t numOfSpaces = 0; - SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 0); - // for select trim functions with constant value from table, - // need to set the proper result result schema bytes to avoid - // trailing garbage characters - if (nodeType(pParamNode1) == QUERY_NODE_VALUE) { - SValueNode* pValue = (SValueNode*)pParamNode1; - numOfSpaces = countTrailingSpaces(pValue, isLtrim); - } - - int32_t resBytes = pRestType1->bytes - numOfSpaces; - pFunc->node.resType = (SDataType){.bytes = resBytes, .type = pRestType1->type}; - return TSDB_CODE_SUCCESS; +static bool paramSupportBigint(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_BIGINT_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NUMERIC_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_INTEGER_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UNIX_TS_TYPE); } -static int32_t translateLtrim(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateTrimStr(pFunc, pErrBuf, len, true); +static bool paramSupportFloat(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_FLOAT_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NUMERIC_TYPE); } -static int32_t translateRtrim(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateTrimStr(pFunc, pErrBuf, len, false); +static bool paramSupportDouble(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_DOUBLE_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NUMERIC_TYPE); } -static int32_t translateLogarithm(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (1 != numOfParams && 2 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(para1Type) && !IS_NULL_TYPE(para1Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (2 == numOfParams) { - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_NUMERIC_TYPE(para2Type) && !IS_NULL_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - return TSDB_CODE_SUCCESS; +static bool paramSupportVarchar(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_VARCHAR_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_STRING_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_VAR_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UNIX_TS_TYPE); } -static int32_t translateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; - return TSDB_CODE_SUCCESS; +static bool paramSupportTimestamp(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UNIX_TS_TYPE); } -static int32_t translateSum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t resType = 0; - if (IS_SIGNED_NUMERIC_TYPE(paraType) || TSDB_DATA_TYPE_BOOL == paraType || IS_NULL_TYPE(paraType)) { - resType = TSDB_DATA_TYPE_BIGINT; - } else if (IS_UNSIGNED_NUMERIC_TYPE(paraType)) { - resType = TSDB_DATA_TYPE_UBIGINT; - } else if (IS_FLOAT_TYPE(paraType)) { - resType = TSDB_DATA_TYPE_DOUBLE; - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType}; - return TSDB_CODE_SUCCESS; +static bool paramSupportNchar(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NCHAR_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_STRING_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_VAR_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UNIX_TS_TYPE); } -static int32_t translateAvgPartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = getAvgInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; +static bool paramSupportUTinyInt(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UTINYINT_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NUMERIC_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_INTEGER_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UNIX_TS_TYPE); } -static int32_t translateAvgMiddle(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - if (TSDB_DATA_TYPE_BINARY != paraType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = getAvgInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; +static bool paramSupportUSmallInt(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_USMALLINT_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NUMERIC_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_INTEGER_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UNIX_TS_TYPE); } -static int32_t translateAvgMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (TSDB_DATA_TYPE_BINARY != paraType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } +static bool paramSupportUInt(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UINT_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NUMERIC_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_INTEGER_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UNIX_TS_TYPE); +} - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; +static bool paramSupportUBigInt(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UBIGINT_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NUMERIC_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_INTEGER_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_UNIX_TS_TYPE); +} - return TSDB_CODE_SUCCESS; +static bool paramSupportJSON(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_JSON_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_VAR_TYPE); } -static int32_t translateAvgState(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); +static bool paramSupportVarBinary(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_VARB_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_VAR_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_STRING_TYPE); +} - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); +static bool paramSupportGeometry(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_GEOMETRY_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_ALL_TYPE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_VAR_TYPE); +} - pFunc->node.resType = (SDataType){.bytes = getAvgInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; +static bool paramSupportValueNode(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_VALUE_NODE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_EXPR_NODE); } -static int32_t translateAvgStateMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); +static bool paramSupportOperatorNode(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_EXPR_NODE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_OPERATOR_NODE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NOT_VALUE_NODE); +} - pFunc->node.resType = (SDataType){.bytes = getAvgInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; +static bool paramSupportFunctionNode(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_EXPR_NODE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_FUNCTION_NODE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NOT_VALUE_NODE); } -static int32_t translateStdPartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } +static bool paramSupportLogicConNode(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_EXPR_NODE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_LOGIC_CONDITION_NODE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NOT_VALUE_NODE); +} - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } +static bool paramSupportCaseWhenNode(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_EXPR_NODE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_CASE_WHEN_NODE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NOT_VALUE_NODE); +} - pFunc->node.resType = (SDataType){.bytes = getStdInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; +static bool paramSupportColumnNode(uint64_t typeFlag) { + return FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_EXPR_NODE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_COLUMN_NODE) || + FUNC_MGT_TEST_MASK(typeFlag, FUNC_PARAM_SUPPORT_NOT_VALUE_NODE); } -static int32_t translateStdMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); +static bool paramSupportNodeType(SNode* pNode, uint64_t typeFlag) { + switch (pNode->type) { + case QUERY_NODE_VALUE: + return paramSupportValueNode(typeFlag); + case QUERY_NODE_OPERATOR: + return paramSupportOperatorNode(typeFlag); + case QUERY_NODE_FUNCTION: + return paramSupportFunctionNode(typeFlag); + case QUERY_NODE_LOGIC_CONDITION: + return paramSupportLogicConNode(typeFlag); + case QUERY_NODE_CASE_WHEN: + return paramSupportCaseWhenNode(typeFlag); + case QUERY_NODE_COLUMN: + return paramSupportColumnNode(typeFlag); + default: + return false; } +} - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (TSDB_DATA_TYPE_BINARY != paraType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); +static bool paramSupportDataType(SDataType* pDataType, uint64_t typeFlag) { + switch (pDataType->type) { + case TSDB_DATA_TYPE_NULL: + return paramSupportNull(typeFlag); + case TSDB_DATA_TYPE_BOOL: + return paramSupportBool(typeFlag); + case TSDB_DATA_TYPE_TINYINT: + return paramSupportTinyint(typeFlag); + case TSDB_DATA_TYPE_SMALLINT: + return paramSupportSmallint(typeFlag); + case TSDB_DATA_TYPE_INT: + return paramSupportInt(typeFlag); + case TSDB_DATA_TYPE_BIGINT: + return paramSupportBigint(typeFlag); + case TSDB_DATA_TYPE_FLOAT: + return paramSupportFloat(typeFlag); + case TSDB_DATA_TYPE_DOUBLE: + return paramSupportDouble(typeFlag); + case TSDB_DATA_TYPE_VARCHAR: + return paramSupportVarchar(typeFlag); + case TSDB_DATA_TYPE_TIMESTAMP: + return paramSupportTimestamp(typeFlag); + case TSDB_DATA_TYPE_NCHAR: + return paramSupportNchar(typeFlag); + case TSDB_DATA_TYPE_UTINYINT: + return paramSupportUTinyInt(typeFlag); + case TSDB_DATA_TYPE_USMALLINT: + return paramSupportUSmallInt(typeFlag); + case TSDB_DATA_TYPE_UINT: + return paramSupportUInt(typeFlag); + case TSDB_DATA_TYPE_UBIGINT: + return paramSupportUBigInt(typeFlag); + case TSDB_DATA_TYPE_JSON: + return paramSupportJSON(typeFlag); + case TSDB_DATA_TYPE_VARBINARY: + return paramSupportVarBinary(typeFlag); + case TSDB_DATA_TYPE_GEOMETRY: + return paramSupportGeometry(typeFlag); + default: + return false; } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - - return TSDB_CODE_SUCCESS; } -static int32_t translateStdState(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } +typedef enum { UNKNOWN_BIN = 0, USER_INPUT_BIN, LINEAR_BIN, LOG_BIN } EHistoBinType; - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); +static int8_t validateHistogramBinType(char* binTypeStr) { + int8_t binType; + if (strcasecmp(binTypeStr, "user_input") == 0) { + binType = USER_INPUT_BIN; + } else if (strcasecmp(binTypeStr, "linear_bin") == 0) { + binType = LINEAR_BIN; + } else if (strcasecmp(binTypeStr, "log_bin") == 0) { + binType = LOG_BIN; + } else { + binType = UNKNOWN_BIN; } - pFunc->node.resType = (SDataType){.bytes = getStdInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; + return binType; } -static int32_t translateStdStateMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } +static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* errMsg, int32_t msgLen) { + const char* msg1 = "HISTOGRAM function requires four parameters"; + const char* msg3 = "HISTOGRAM function invalid format for binDesc parameter"; + const char* msg4 = "HISTOGRAM function binDesc parameter \"count\" should be in range [1, 1000]"; + const char* msg5 = "HISTOGRAM function bin/parameter should be in range [-DBL_MAX, DBL_MAX]"; + const char* msg6 = "HISTOGRAM function binDesc parameter \"width\" cannot be 0"; + const char* msg7 = "HISTOGRAM function binDesc parameter \"start\" cannot be 0 with \"log_bin\" type"; + const char* msg8 = "HISTOGRAM function binDesc parameter \"factor\" cannot be negative or equal to 0/1"; + const char* msg9 = "HISTOGRAM function out of memory"; - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (TSDB_DATA_TYPE_BINARY != paraType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } + cJSON* binDesc = cJSON_Parse(binDescStr); + int32_t numOfBins; + double* intervals; + if (cJSON_IsObject(binDesc)) { /* linaer/log bins */ + int32_t numOfParams = cJSON_GetArraySize(binDesc); + int32_t startIndex; + if (numOfParams != 4) { + (void)snprintf(errMsg, msgLen, "%s", msg1); + cJSON_Delete(binDesc); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; + } - pFunc->node.resType = (SDataType){.bytes = getStdInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; + cJSON* start = cJSON_GetObjectItem(binDesc, "start"); + cJSON* factor = cJSON_GetObjectItem(binDesc, "factor"); + cJSON* width = cJSON_GetObjectItem(binDesc, "width"); + cJSON* count = cJSON_GetObjectItem(binDesc, "count"); + cJSON* infinity = cJSON_GetObjectItem(binDesc, "infinity"); - return TSDB_CODE_SUCCESS; -} + if (!cJSON_IsNumber(start) || !cJSON_IsNumber(count) || !cJSON_IsBool(infinity)) { + (void)snprintf(errMsg, msgLen, "%s", msg3); + cJSON_Delete(binDesc); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; + } -static int32_t translateWduration(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - // pseudo column do not need to check parameters - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT, - .precision = pFunc->node.resType.precision}; - return TSDB_CODE_SUCCESS; -} + if (count->valueint <= 0 || count->valueint > 1000) { // limit count to 1000 + (void)snprintf(errMsg, msgLen, "%s", msg4); + cJSON_Delete(binDesc); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; + } -static int32_t translateNowToday(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - // pseudo column do not need to check parameters - - // add database precision as param - uint8_t dbPrec = pFunc->node.resType.precision; - int32_t code = addUint8Param(&pFunc->pParameterList, dbPrec); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - pFunc->node.resType = - (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translatePi(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - pFunc->node.resType = - (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateRand(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (0 != LIST_LENGTH(pFunc->pParameterList) && 1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (1 == LIST_LENGTH(pFunc->pParameterList)) { - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_INTEGER_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - } - - if (!pFunc->dual) { - int32_t code = addPseudoParam(&pFunc->pParameterList); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - - pFunc->node.resType = - (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateRound(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (2 != LIST_LENGTH(pFunc->pParameterList) && 1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } else if (IS_NULL_TYPE(paraType)) { - paraType = TSDB_DATA_TYPE_BIGINT; - } - - if (2 == LIST_LENGTH(pFunc->pParameterList)) { - uint8_t paraType2 = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_NUMERIC_TYPE(paraType2) && !IS_NULL_TYPE(paraType2)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - } - pFunc->node.resType = (SDataType){.bytes = tDataTypes[paraType].bytes, .type = paraType}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateTrunc(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (2 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType2 = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_NUMERIC_TYPE(paraType2) && !IS_NULL_TYPE(paraType2)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[paraType].bytes, .type = paraType}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateTimePseudoColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - // pseudo column do not need to check parameters - - pFunc->node.resType = - (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP, - .precision = pFunc->node.resType.precision}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateIsFilledPseudoColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - // pseudo column do not need to check parameters - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes, .type = TSDB_DATA_TYPE_BOOL}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateTimezone(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - pFunc->node.resType = (SDataType){.bytes = TD_TIMEZONE_LEN, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (numOfParams < 2 || numOfParams > 11) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(para1Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - for (int32_t i = 1; i < numOfParams; ++i) { - SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, i); - if (QUERY_NODE_VALUE != nodeType(pValue)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - pValue->notReserved = true; - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; - if (!IS_NUMERIC_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - double v = 0; - if (IS_INTEGER_TYPE(paraType)) { - v = (double)pValue->datum.i; - } else { - v = pValue->datum.d; - } - - if (v < 0 || v > 100) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); - } - } - - // set result type - if (numOfParams > 2) { - pFunc->node.resType = (SDataType){.bytes = 3200, .type = TSDB_DATA_TYPE_VARCHAR}; - } else { - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - } - return TSDB_CODE_SUCCESS; -} - -static bool validateApercentileAlgo(const SValueNode* pVal) { - if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) { - return false; - } - return (0 == strcasecmp(varDataVal(pVal->datum.p), "default") || - 0 == strcasecmp(varDataVal(pVal->datum.p), "t-digest")); -} - -static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (2 != numOfParams && 3 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - // param1 - SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); - if (nodeType(pParamNode1) != QUERY_NODE_VALUE) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode1; - if (pValue->datum.i < 0 || pValue->datum.i > 100) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); - } - - pValue->notReserved = true; - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - // param2 - if (3 == numOfParams) { - uint8_t para3Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type; - if (!IS_STR_DATA_TYPE(para3Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2); - if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validateApercentileAlgo((SValueNode*)pParamNode2)) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "Third parameter algorithm of apercentile must be 'default' or 't-digest'"); - } - - pValue = (SValueNode*)pParamNode2; - pValue->notReserved = true; - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - - if (isPartial) { - if (2 != numOfParams && 3 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - // param1 - SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); - if (nodeType(pParamNode1) != QUERY_NODE_VALUE) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode1; - if (pValue->datum.i < 0 || pValue->datum.i > 100) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); - } - - pValue->notReserved = true; - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - // param2 - if (3 == numOfParams) { - uint8_t para3Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type; - if (!IS_STR_DATA_TYPE(para3Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2); - if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validateApercentileAlgo((SValueNode*)pParamNode2)) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "Third parameter algorithm of apercentile must be 'default' or 't-digest'"); - } - - pValue = (SValueNode*)pParamNode2; - pValue->notReserved = true; - } - - pFunc->node.resType = - (SDataType){.bytes = getApercentileMaxSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - } else { - // original percent param is reserved - if (3 != numOfParams && 2 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (TSDB_DATA_TYPE_BINARY != para1Type || !IS_INTEGER_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (3 == numOfParams) { - uint8_t para3Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type; - if (!IS_STR_DATA_TYPE(para3Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2); - if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validateApercentileAlgo((SValueNode*)pParamNode2)) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "Third parameter algorithm of apercentile must be 'default' or 't-digest'"); - } - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - } - - return TSDB_CODE_SUCCESS; -} - -static int32_t translateApercentilePartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateApercentileImpl(pFunc, pErrBuf, len, true); -} - -static int32_t translateApercentileMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateApercentileImpl(pFunc, pErrBuf, len, false); -} - -static int32_t translateTbnameColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - // pseudo column do not need to check parameters - pFunc->node.resType = - (SDataType){.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateTbUidColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - // pseudo column do not need to check parameters - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateVgIdColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - // pseudo column do not need to check parameters - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_INT].bytes, .type = TSDB_DATA_TYPE_INT}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateVgVerColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateTopBot(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (2 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_NUMERIC_TYPE(para1Type) || !IS_INTEGER_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - // param1 - SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); - if (nodeType(pParamNode1) != QUERY_NODE_VALUE) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode1; - if (!IS_INTEGER_TYPE(pValue->node.resType.type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (pValue->datum.i < 1 || pValue->datum.i > TOP_BOTTOM_QUERY_LIMIT) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); - } - - pValue->notReserved = true; - - // set result type - SDataType* pType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); - pFunc->node.resType = (SDataType){.bytes = pType->bytes, .type = pType->type}; - return TSDB_CODE_SUCCESS; -} - -static int32_t reserveFirstMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters) { - int32_t code = nodesListMakeAppend(pParameters, pPartialRes); - if (TSDB_CODE_SUCCESS == code) { - SNode* pNew = NULL; - code = nodesCloneNode(nodesListGetNode(pRawParameters, 1), &pNew); - if (TSDB_CODE_SUCCESS == code) { - code = nodesListStrictAppend(*pParameters, pNew); - } - } - return code; -} - -int32_t topBotCreateMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters) { - return reserveFirstMergeParam(pRawParameters, pPartialRes, pParameters); -} - -int32_t apercentileCreateMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters) { - int32_t code = reserveFirstMergeParam(pRawParameters, pPartialRes, pParameters); - if (TSDB_CODE_SUCCESS == code && pRawParameters->length >= 3) { - SNode* pNew = NULL; - code = nodesCloneNode(nodesListGetNode(pRawParameters, 2), &pNew); - if (TSDB_CODE_SUCCESS == code) { - code = nodesListStrictAppend(*pParameters, pNew); - } - } - return code; -} - -static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateSpreadImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (isPartial) { - if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - pFunc->node.resType = (SDataType){.bytes = getSpreadInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - } else { - if (TSDB_DATA_TYPE_BINARY != paraType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - } - - return TSDB_CODE_SUCCESS; -} - -static int32_t translateSpreadPartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateSpreadImpl(pFunc, pErrBuf, len, true); -} - -static int32_t translateSpreadMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateSpreadImpl(pFunc, pErrBuf, len, false); -} - -static int32_t translateSpreadState(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - pFunc->node.resType = (SDataType){.bytes = getSpreadInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateSpreadStateMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (paraType != TSDB_DATA_TYPE_BINARY) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - pFunc->node.resType = (SDataType){.bytes = getSpreadInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateElapsed(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (1 != numOfParams && 2 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - SNode* pPara1 = nodesListGetNode(pFunc->pParameterList, 0); - if (QUERY_NODE_COLUMN != nodeType(pPara1) || PRIMARYKEY_TIMESTAMP_COL_ID != ((SColumnNode*)pPara1)->colId) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "The first parameter of the ELAPSED function can only be the timestamp primary key"); - } - - // param1 - if (2 == numOfParams) { - SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); - if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode1; - - pValue->notReserved = true; - - if (!IS_INTEGER_TYPE(pValue->node.resType.type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t dbPrec = pFunc->node.resType.precision; - - int32_t code = validateTimeUnitParam(dbPrec, (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1)); - if (code == TSDB_CODE_FUNC_TIME_UNIT_TOO_SMALL) { - return buildFuncErrMsg(pErrBuf, len, code, - "ELAPSED function time unit parameter should be greater than db precision"); - } else if (code == TSDB_CODE_FUNC_TIME_UNIT_INVALID) { - return buildFuncErrMsg( - pErrBuf, len, code, - "ELAPSED function time unit parameter should be one of the following: [1b, 1u, 1a, 1s, 1m, 1h, 1d, 1w]"); - } - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateElapsedImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - - if (isPartial) { - if (1 != numOfParams && 2 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_TIMESTAMP_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - // param1 - if (2 == numOfParams) { - SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); - - if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode1; - - pValue->notReserved = true; - - paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_INTEGER_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (pValue->datum.i == 0) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "ELAPSED function time unit parameter should be greater than db precision"); - } - } - - pFunc->node.resType = - (SDataType){.bytes = getElapsedInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - } else { - if (1 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (TSDB_DATA_TYPE_BINARY != paraType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - } - return TSDB_CODE_SUCCESS; -} - -static int32_t translateElapsedPartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { -#if 0 - return translateElapsedImpl(pFunc, pErrBuf, len, true); -#endif - return 0; -} - -static int32_t translateElapsedMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { -#if 0 - return translateElapsedImpl(pFunc, pErrBuf, len, false); -#endif - return 0; -} - -static int32_t translateLeastSQR(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (3 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - for (int32_t i = 0; i < numOfParams; ++i) { - SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); - if (i > 0) { // param1 & param2 - if (QUERY_NODE_VALUE != nodeType(pParamNode)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode; - - pValue->notReserved = true; - } - - uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; - if (!IS_NUMERIC_TYPE(colType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - } - - pFunc->node.resType = (SDataType){.bytes = LEASTSQUARES_BUFF_LENGTH, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; -} - -typedef enum { UNKNOWN_BIN = 0, USER_INPUT_BIN, LINEAR_BIN, LOG_BIN } EHistoBinType; - -static int8_t validateHistogramBinType(char* binTypeStr) { - int8_t binType; - if (strcasecmp(binTypeStr, "user_input") == 0) { - binType = USER_INPUT_BIN; - } else if (strcasecmp(binTypeStr, "linear_bin") == 0) { - binType = LINEAR_BIN; - } else if (strcasecmp(binTypeStr, "log_bin") == 0) { - binType = LOG_BIN; - } else { - binType = UNKNOWN_BIN; - } - - return binType; -} - -static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* errMsg, int32_t msgLen) { - const char* msg1 = "HISTOGRAM function requires four parameters"; - const char* msg3 = "HISTOGRAM function invalid format for binDesc parameter"; - const char* msg4 = "HISTOGRAM function binDesc parameter \"count\" should be in range [1, 1000]"; - const char* msg5 = "HISTOGRAM function bin/parameter should be in range [-DBL_MAX, DBL_MAX]"; - const char* msg6 = "HISTOGRAM function binDesc parameter \"width\" cannot be 0"; - const char* msg7 = "HISTOGRAM function binDesc parameter \"start\" cannot be 0 with \"log_bin\" type"; - const char* msg8 = "HISTOGRAM function binDesc parameter \"factor\" cannot be negative or equal to 0/1"; - const char* msg9 = "HISTOGRAM function out of memory"; - - cJSON* binDesc = cJSON_Parse(binDescStr); - int32_t numOfBins; - double* intervals; - if (cJSON_IsObject(binDesc)) { /* linaer/log bins */ - int32_t numOfParams = cJSON_GetArraySize(binDesc); - int32_t startIndex; - if (numOfParams != 4) { - (void)snprintf(errMsg, msgLen, "%s", msg1); - cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; - } - - cJSON* start = cJSON_GetObjectItem(binDesc, "start"); - cJSON* factor = cJSON_GetObjectItem(binDesc, "factor"); - cJSON* width = cJSON_GetObjectItem(binDesc, "width"); - cJSON* count = cJSON_GetObjectItem(binDesc, "count"); - cJSON* infinity = cJSON_GetObjectItem(binDesc, "infinity"); - - if (!cJSON_IsNumber(start) || !cJSON_IsNumber(count) || !cJSON_IsBool(infinity)) { - (void)snprintf(errMsg, msgLen, "%s", msg3); - cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; - } - - if (count->valueint <= 0 || count->valueint > 1000) { // limit count to 1000 - (void)snprintf(errMsg, msgLen, "%s", msg4); - cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; - } - - if (isinf(start->valuedouble) || (width != NULL && isinf(width->valuedouble)) || - (factor != NULL && isinf(factor->valuedouble)) || (count != NULL && isinf(count->valuedouble))) { - (void)snprintf(errMsg, msgLen, "%s", msg5); - cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; - } + if (isinf(start->valuedouble) || (width != NULL && isinf(width->valuedouble)) || + (factor != NULL && isinf(factor->valuedouble)) || (count != NULL && isinf(count->valuedouble))) { + (void)snprintf(errMsg, msgLen, "%s", msg5); + cJSON_Delete(binDesc); + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; + } int32_t counter = (int32_t)count->valueint; if (infinity->valueint == false) { @@ -1250,7 +577,7 @@ static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* if (intervals == NULL) { (void)snprintf(errMsg, msgLen, "%s", msg9); cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } if (cJSON_IsNumber(width) && factor == NULL && binType == LINEAR_BIN) { // linear bin process @@ -1258,7 +585,7 @@ static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* (void)snprintf(errMsg, msgLen, "%s", msg6); taosMemoryFree(intervals); cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } for (int i = 0; i < counter + 1; ++i) { intervals[startIndex] = start->valuedouble + i * width->valuedouble; @@ -1266,7 +593,7 @@ static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* (void)snprintf(errMsg, msgLen, "%s", msg5); taosMemoryFree(intervals); cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } startIndex++; } @@ -1276,13 +603,13 @@ static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* (void)snprintf(errMsg, msgLen, "%s", msg7); taosMemoryFree(intervals); cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } if (factor->valuedouble < 0 || factor->valuedouble == 0 || factor->valuedouble == 1) { (void)snprintf(errMsg, msgLen, "%s", msg8); taosMemoryFree(intervals); cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } for (int i = 0; i < counter + 1; ++i) { intervals[startIndex] = start->valuedouble * pow(factor->valuedouble, i * 1.0); @@ -1290,7 +617,7 @@ static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* (void)snprintf(errMsg, msgLen, "%s", msg5); taosMemoryFree(intervals); cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } startIndex++; } @@ -1298,7 +625,7 @@ static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* (void)snprintf(errMsg, msgLen, "%s", msg3); taosMemoryFree(intervals); cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } if (infinity->valueint == true) { @@ -1306,7 +633,7 @@ static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* intervals[numOfBins - 1] = INFINITY; // in case of desc bin orders, -inf/inf should be swapped if (numOfBins < 4) { - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } if (intervals[1] > intervals[numOfBins - 2]) { @@ -1317,7 +644,7 @@ static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* if (binType != USER_INPUT_BIN) { (void)snprintf(errMsg, msgLen, "%s", msg3); cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } numOfBins = cJSON_GetArraySize(binDesc); intervals = taosMemoryCalloc(numOfBins, sizeof(double)); @@ -1331,7 +658,7 @@ static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* (void)snprintf(errMsg, msgLen, "%s", msg3); taosMemoryFree(intervals); cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } int i = 0; while (bin) { @@ -1340,13 +667,13 @@ static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* (void)snprintf(errMsg, msgLen, "%s", msg3); taosMemoryFree(intervals); cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } if (i != 0 && intervals[i] <= intervals[i - 1]) { (void)snprintf(errMsg, msgLen, "%s", msg3); taosMemoryFree(intervals); cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } bin = bin->next; i++; @@ -1354,7 +681,7 @@ static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* } else { (void)snprintf(errMsg, msgLen, "%s", msg3); cJSON_Delete(binDesc); - return TSDB_CODE_FAILED; + return TSDB_CODE_FUNC_HISTOGRAM_ERROR; } cJSON_Delete(binDesc); @@ -1362,491 +689,344 @@ static int32_t validateHistogramBinDesc(char* binDescStr, int8_t binType, char* return TSDB_CODE_SUCCESS; } -static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (4 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(colType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - // param1 ~ param3 - if (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type != TSDB_DATA_TYPE_BINARY || - getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type != TSDB_DATA_TYPE_BINARY || - !IS_INTEGER_TYPE(getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 3))->type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - int8_t binType; - char* binDesc; - for (int32_t i = 1; i < numOfParams; ++i) { - SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); - if (QUERY_NODE_VALUE != nodeType(pParamNode)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode; - - pValue->notReserved = true; - - if (i == 1) { - binType = validateHistogramBinType(varDataVal(pValue->datum.p)); - if (binType == UNKNOWN_BIN) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "HISTOGRAM function binType parameter should be " - "\"user_input\", \"log_bin\" or \"linear_bin\""); +static int32_t checkRangeValue(SNode *pNode, SParamRange range, bool *isMatch) { + int32_t code = TSDB_CODE_SUCCESS; + if (pNode->type == QUERY_NODE_VALUE) { + SValueNode* pVal = (SValueNode*)pNode; + if (IS_INTEGER_TYPE(getSDataTypeFromNode(pNode)->type)) { + if (pVal->datum.i < range.iMinVal || + pVal->datum.i > range.iMaxVal) { + code = TSDB_CODE_FUNC_FUNTION_PARA_RANGE; + *isMatch = false; } - } - - if (i == 2) { - char errMsg[128] = {0}; - binDesc = varDataVal(pValue->datum.p); - if (TSDB_CODE_SUCCESS != validateHistogramBinDesc(binDesc, binType, errMsg, (int32_t)sizeof(errMsg))) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, errMsg); + } else { + if ((int64_t)pVal->datum.d < range.iMinVal || + (int64_t)pVal->datum.d > range.iMaxVal) { + code = TSDB_CODE_FUNC_FUNTION_PARA_RANGE; + *isMatch = false; } } - - if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "HISTOGRAM function normalized parameter should be 0/1"); - } + } else { + // for other node type, range check should be done in process function } - - pFunc->node.resType = (SDataType){.bytes = 512, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; + return code; } -static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (isPartial) { - if (4 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(colType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - // param1 ~ param3 - if (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type != TSDB_DATA_TYPE_BINARY || - getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type != TSDB_DATA_TYPE_BINARY || - !IS_INTEGER_TYPE(getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 3))->type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - int8_t binType; - char* binDesc; - for (int32_t i = 1; i < numOfParams; ++i) { - SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); - if (QUERY_NODE_VALUE != nodeType(pParamNode)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode; - - pValue->notReserved = true; - - if (i == 1) { - binType = validateHistogramBinType(varDataVal(pValue->datum.p)); - if (binType == UNKNOWN_BIN) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "HISTOGRAM function binType parameter should be " - "\"user_input\", \"log_bin\" or \"linear_bin\""); +static int32_t checkFixedValue(SNode *pNode, const SParamInfo *paramPattern, int32_t paramIdx, bool *isMatch) { + int32_t code = TSDB_CODE_SUCCESS; + bool checkStr = paramSupportVarBinary(paramPattern->validDataType) || + paramSupportVarchar(paramPattern->validDataType) || + paramSupportNchar(paramPattern->validDataType); + if (pNode->type == QUERY_NODE_VALUE) { + SValueNode* pVal = (SValueNode*)pNode; + if (!checkStr) { + for (int32_t k = 0; k < paramPattern->fixedValueSize; k++) { + if (pVal->datum.i == paramPattern->fixedNumValue[k]) { + code = TSDB_CODE_SUCCESS; + *isMatch = true; + break; + } else { + code = TSDB_CODE_FUNC_FUNTION_PARA_VALUE; + *isMatch = false; } } - - if (i == 2) { - char errMsg[128] = {0}; - binDesc = varDataVal(pValue->datum.p); - if (TSDB_CODE_SUCCESS != validateHistogramBinDesc(binDesc, binType, errMsg, (int32_t)sizeof(errMsg))) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, errMsg); + } else { + for (int32_t k = 0; k < paramPattern->fixedValueSize; k++) { + if (strcasecmp(pVal->literal, paramPattern->fixedStrValue[k]) == 0) { + code = TSDB_CODE_SUCCESS; + *isMatch = true; + break; + } else { + code = TSDB_CODE_FUNC_FUNTION_PARA_VALUE; + *isMatch = false; } } - - if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "HISTOGRAM function normalized parameter should be 0/1"); - } } - - pFunc->node.resType = - (SDataType){.bytes = getHistogramInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; } else { - if (1 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type != TSDB_DATA_TYPE_BINARY) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = 512, .type = TSDB_DATA_TYPE_BINARY}; - } - return TSDB_CODE_SUCCESS; -} - -static int32_t translateHistogramPartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateHistogramImpl(pFunc, pErrBuf, len, true); -} - -static int32_t translateHistogramMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateHistogramImpl(pFunc, pErrBuf, len, false); -} - -static int32_t translateHLL(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + // for other node type, fixed value check should be done in process function } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; - return TSDB_CODE_SUCCESS; + return code; } -static int32_t translateHLLImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (isPartial) { - pFunc->node.resType = - (SDataType){.bytes = getHistogramInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - } else { - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; +static int32_t checkPrimTS(SNode *pNode, bool *isMatch) { + int32_t code = TSDB_CODE_SUCCESS; + if (nodeType(pNode) != QUERY_NODE_COLUMN || !IS_TIMESTAMP_TYPE(getSDataTypeFromNode(pNode)->type) || + !((SColumnNode*)pNode)->isPrimTs) { + code = TSDB_CODE_FUNC_FUNTION_PARA_PRIMTS; + *isMatch = false; } - - return TSDB_CODE_SUCCESS; -} - -static int32_t translateHLLPartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateHLLImpl(pFunc, pErrBuf, len, true); -} - -static int32_t translateHLLMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateHLLImpl(pFunc, pErrBuf, len, false); -} - -static int32_t translateHLLState(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateHLLPartial(pFunc, pErrBuf, len); + return code; } -static int32_t translateHLLStateMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - if (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type != TSDB_DATA_TYPE_BINARY) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); +static int32_t checkPrimaryKey(SNode *pNode, bool *isMatch) { + int32_t code = TSDB_CODE_SUCCESS; + if (nodeType(pNode) != QUERY_NODE_COLUMN || !IS_INTEGER_TYPE(getSDataTypeFromNode(pNode)->type) || + !((SColumnNode*)pNode)->isPk) { + code = TSDB_CODE_FUNC_FUNTION_PARA_PK; + *isMatch = false; } - - pFunc->node.resType = - (SDataType){.bytes = getHistogramInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; + return code; } -static bool validateStateOper(const SValueNode* pVal) { - if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) { - return false; +static int32_t checkHasColumn(SNode *pNode, bool *isMatch) { + int32_t code = TSDB_CODE_SUCCESS; + if (!nodesExprHasColumn(pNode)) { + code = TSDB_CODE_FUNC_FUNTION_PARA_HAS_COL; + *isMatch = false; } - if (strlen(varDataVal(pVal->datum.p)) == 2) { - return ( - 0 == strncasecmp(varDataVal(pVal->datum.p), "GT", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "GE", 2) || - 0 == strncasecmp(varDataVal(pVal->datum.p), "LT", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "LE", 2) || - 0 == strncasecmp(varDataVal(pVal->datum.p), "EQ", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "NE", 2)); - } - return false; + return code; } -static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (3 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(colType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - // param1 & param2 - for (int32_t i = 1; i < numOfParams; ++i) { - SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); - if (QUERY_NODE_VALUE != nodeType(pParamNode)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode; - - if (i == 1 && !validateStateOper(pValue)) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "Second parameter of STATECOUNT function" - "must be one of the following: 'GE', 'GT', 'LE', 'LT', 'EQ', 'NE'"); - } - - pValue->notReserved = true; - } - - if (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type != TSDB_DATA_TYPE_BINARY || - (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type != TSDB_DATA_TYPE_BIGINT && - getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type != TSDB_DATA_TYPE_DOUBLE)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); +static int32_t checkValueNodeNotNull(SNode *pNode, bool *isMatch) { + int32_t code = TSDB_CODE_SUCCESS; + if (IS_NULL_TYPE(getSDataTypeFromNode(pNode)->type) && QUERY_NODE_VALUE == nodeType(pNode)) { + code = TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + *isMatch = false; } - - // set result type - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; - return TSDB_CODE_SUCCESS; + return code; } -static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (3 != numOfParams && 4 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_NUMERIC_TYPE(colType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - // param1, param2 & param3 - for (int32_t i = 1; i < numOfParams; ++i) { - SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); - if (QUERY_NODE_VALUE != nodeType(pParamNode)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode; - - if (i == 1 && !validateStateOper(pValue)) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "Second parameter of STATEDURATION function" - "must be one of the following: 'GE', 'GT', 'LE', 'LT', 'EQ', 'NE'"); - } else if (i == 3 && pValue->datum.i == 0) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "STATEDURATION function time unit parameter should be greater than db precision"); - } - - pValue->notReserved = true; - } - - if (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type != TSDB_DATA_TYPE_BINARY || - (getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type != TSDB_DATA_TYPE_BIGINT && - getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type != TSDB_DATA_TYPE_DOUBLE)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (numOfParams == 4 && - getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 3))->type != TSDB_DATA_TYPE_BIGINT) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (numOfParams == 4) { - uint8_t dbPrec = pFunc->node.resType.precision; - - int32_t code = validateTimeUnitParam(dbPrec, (SValueNode*)nodesListGetNode(pFunc->pParameterList, 3)); - if (code == TSDB_CODE_FUNC_TIME_UNIT_TOO_SMALL) { - return buildFuncErrMsg(pErrBuf, len, code, - "STATEDURATION function time unit parameter should be greater than db precision"); - } else if (code == TSDB_CODE_FUNC_TIME_UNIT_INVALID) { - return buildFuncErrMsg(pErrBuf, len, code, - "STATEDURATION function time unit parameter should be one of the following: [1b, 1u, 1a, " - "1s, 1m, 1h, 1d, 1w]"); - } +static int32_t checkTimeUnit(SNode *pNode, int32_t precision, bool *isMatch) { + if (nodeType(pNode) != QUERY_NODE_VALUE || !IS_INTEGER_TYPE(getSDataTypeFromNode(pNode)->type)) { + *isMatch = false; + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; } - // set result type - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateCsum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + if (IS_NULL_TYPE(getSDataTypeFromNode(pNode)->type)) { + *isMatch = true; + return TSDB_CODE_SUCCESS; } - uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - uint8_t resType; - if (!IS_NUMERIC_TYPE(colType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } else { - if (IS_SIGNED_NUMERIC_TYPE(colType)) { - resType = TSDB_DATA_TYPE_BIGINT; - } else if (IS_UNSIGNED_NUMERIC_TYPE(colType)) { - resType = TSDB_DATA_TYPE_UBIGINT; - } else if (IS_FLOAT_TYPE(colType)) { - resType = TSDB_DATA_TYPE_DOUBLE; - } else { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } + int32_t code = validateTimeUnitParam(precision, (SValueNode*)pNode); + if (TSDB_CODE_SUCCESS != code) { + *isMatch = false; } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType}; - return TSDB_CODE_SUCCESS; + return code; } +static int32_t validateParam(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + int32_t code = TSDB_CODE_SUCCESS; + SNodeList* paramList = pFunc->pParameterList; + char errMsg[128] = {0}; -static EFuncReturnRows csumEstReturnRows(SFunctionNode* pFunc) { return FUNC_RETURN_ROWS_N; } - -static int32_t translateMavg(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (2 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - // param1 - SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); - if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode1; - if (pValue->datum.i < 1 || pValue->datum.i > 1000) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); - } - - pValue->notReserved = true; - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_NUMERIC_TYPE(colType) || !IS_INTEGER_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + // no need to check + if (funcMgtBuiltins[pFunc->funcId].parameters.paramInfoPattern == 0) { + return TSDB_CODE_SUCCESS; } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateSample(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (2 != LIST_LENGTH(pFunc->pParameterList)) { + + // check param num + if ((funcMgtBuiltins[pFunc->funcId].parameters.maxParamNum != -1 && + LIST_LENGTH(paramList) > funcMgtBuiltins[pFunc->funcId].parameters.maxParamNum) || + LIST_LENGTH(paramList) < funcMgtBuiltins[pFunc->funcId].parameters.minParamNum) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - SDataType* pSDataType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); - uint8_t colType = pSDataType->type; + // check each param + for (int32_t i = 0; i < funcMgtBuiltins[pFunc->funcId].parameters.paramInfoPattern; i++) { + bool isMatch = true; + int32_t paramIdx = 0; + const SParamInfo* paramPattern = funcMgtBuiltins[pFunc->funcId].parameters.inputParaInfo[i]; - // param1 - SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); - if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } + while (1) { + for (int8_t j = paramPattern[paramIdx].startParam; j <= (paramPattern[paramIdx].endParam == -1 ? INT8_MAX : paramPattern[paramIdx].endParam); j++) { + if (j > LIST_LENGTH(paramList)) { + code = TSDB_CODE_SUCCESS; + isMatch = true; + break; + } + SNode* pNode = nodesListGetNode(paramList, j - 1); + // check node type + if (!paramSupportNodeType(pNode, paramPattern[paramIdx].validNodeType)) { + code = TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + isMatch = false; + break; + } + // check data type + if (!paramSupportDataType(getSDataTypeFromNode(pNode), paramPattern[paramIdx].validDataType)) { + code = TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + isMatch = false; + break; + } + if (paramPattern[paramIdx].validNodeType == FUNC_PARAM_SUPPORT_VALUE_NODE) { + SValueNode* pVal = (SValueNode*)pNode; + pVal->notReserved = true; + } + switch (paramPattern[paramIdx].valueRangeFlag) { + case FUNC_PARAM_NO_SPECIFIC_VALUE: + break; + case FUNC_PARAM_HAS_RANGE: + code = checkRangeValue(pNode, paramPattern[paramIdx].range, &isMatch); + break; + case FUNC_PARAM_HAS_FIXED_VALUE: + code = checkFixedValue(pNode, ¶mPattern[paramIdx], paramIdx, &isMatch); + break; + default: + break; + } + if (!isMatch) { + break; + } + switch (paramPattern[paramIdx].paramAttribute) { + case FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE: + break; + case FUNC_PARAM_MUST_BE_PRIMTS: + code = checkPrimTS(pNode, &isMatch); + break; + case FUNC_PARAM_MUST_BE_PK: + code = checkPrimaryKey(pNode, &isMatch); + break; + case FUNC_PARAM_MUST_HAVE_COLUMN: + code = checkHasColumn(pNode, &isMatch); + break; + case FUNC_PARAM_VALUE_NODE_NOT_NULL: + code = checkValueNodeNotNull(pNode, &isMatch); + break; + case FUNC_PARAM_MUST_BE_TIME_UNIT: + code = checkTimeUnit(pNode, pFunc->node.resType.precision, &isMatch); + break; + default: + break; + } + if (!isMatch) { + break; + } + } - SValueNode* pValue = (SValueNode*)pParamNode1; - if (pValue->datum.i < 1 || pValue->datum.i > 1000) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); + if (paramPattern[paramIdx].isLastParam || !isMatch) { + break; + } + paramIdx++; + } + if (isMatch) { + return TSDB_CODE_SUCCESS; + } } - - pValue->notReserved = true; - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_INTEGER_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + switch (code) { + case TSDB_CODE_FUNC_FUNTION_PARA_NUM: + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + case TSDB_CODE_FUNC_FUNTION_PARA_TYPE: + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + case TSDB_CODE_FUNC_FUNTION_PARA_VALUE: + return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); + case TSDB_CODE_FUNC_FUNTION_PARA_RANGE: + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_PARA_RANGE, "Invalid parameter range : %s", + pFunc->functionName); + case TSDB_CODE_FUNC_FUNTION_PARA_PRIMTS: + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_PARA_PRIMTS, "Parameter should be primary timestamp : %s", + pFunc->functionName); + case TSDB_CODE_FUNC_FUNTION_PARA_PK: + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_PARA_PK, "Parameter should be primary key : %s", + pFunc->functionName); + case TSDB_CODE_FUNC_FUNTION_PARA_HAS_COL: + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_PARA_HAS_COL, "Parameter should have column : %s", + pFunc->functionName); + case TSDB_CODE_FUNC_TIME_UNIT_INVALID: + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_TIME_UNIT_INVALID, "Invalid timzone format : %s", + pFunc->functionName); + case TSDB_CODE_FUNC_TIME_UNIT_TOO_SMALL: + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_TIME_UNIT_TOO_SMALL, "Time unit is too small : %s", + pFunc->functionName); + case TSDB_CODE_FUNC_FUNCTION_HISTO_TYPE: + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNCTION_HISTO_TYPE, "Invalid histogram bin type : %s", + pFunc->functionName); + case TSDB_CODE_FUNC_HISTOGRAM_ERROR: + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_HISTOGRAM_ERROR, errMsg, pFunc->functionName); + default: + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "Function check parameter failed : %s", + pFunc->functionName); } +} - // set result type - if (IS_STR_DATA_TYPE(colType)) { - pFunc->node.resType = (SDataType){.bytes = pSDataType->bytes, .type = colType}; - } else { - pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType}; +// There is only one parameter of numeric type, and the return type is parameter type +static int32_t translateOutNum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + if (IS_NULL_TYPE(paraType)) { + paraType = TSDB_DATA_TYPE_BIGINT; } - + pFunc->node.resType = (SDataType){.bytes = tDataTypes[paraType].bytes, .type = paraType}; return TSDB_CODE_SUCCESS; } -static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (2 != numOfParams && 3 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } +// There is only one parameter, and the return type is parameter type +static int32_t translateMinMax(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); - SDataType* pSDataType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); - uint8_t colType = pSDataType->type; + SDataType* dataType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); + uint8_t paraType = IS_NULL_TYPE(dataType->type) ? TSDB_DATA_TYPE_BIGINT : dataType->type; + int32_t bytes = IS_STR_DATA_TYPE(paraType) ? dataType->bytes : tDataTypes[paraType].bytes; + pFunc->node.resType = (SDataType){.bytes = bytes, .type = paraType}; + return TSDB_CODE_SUCCESS; +} - // param1 & param2 - for (int32_t i = 1; i < numOfParams; ++i) { - SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i); - if (QUERY_NODE_VALUE != nodeType(pParamNode)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } +// The return type is DOUBLE type +static int32_t translateOutDouble(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; + return TSDB_CODE_SUCCESS; +} - SValueNode* pValue = (SValueNode*)pParamNode; - if ((IS_SIGNED_NUMERIC_TYPE(pValue->node.resType.type) ? pValue->datum.i : pValue->datum.u) < ((i > 1) ? 0 : 1) || - (IS_SIGNED_NUMERIC_TYPE(pValue->node.resType.type) ? pValue->datum.i : pValue->datum.u) > 100) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "TAIL function second parameter should be in range [1, 100], " - "third parameter should be in range [0, 100]"); - } +static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isLtrim) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); - pValue->notReserved = true; + SDataType* pRestType1 = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; - if (!IS_INTEGER_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } + int32_t numOfSpaces = 0; + SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 0); + // for select trim functions with constant value from table, + // need to set the proper result result schema bytes to avoid + // trailing garbage characters + if (nodeType(pParamNode1) == QUERY_NODE_VALUE) { + SValueNode* pValue = (SValueNode*)pParamNode1; + numOfSpaces = countTrailingSpaces(pValue, isLtrim); } - // set result type - if (IS_STR_DATA_TYPE(colType)) { - pFunc->node.resType = (SDataType){.bytes = pSDataType->bytes, .type = colType}; - } else { - pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType}; - } + int32_t resBytes = pRestType1->bytes - numOfSpaces; + pFunc->node.resType = (SDataType){.bytes = resBytes, .type = pRestType1->type}; return TSDB_CODE_SUCCESS; } -static int32_t translateDerivative(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (3 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - - // param1 - SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); - SValueNode* pValue1 = (SValueNode*)pParamNode1; - if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (pValue1->datum.i <= 0) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); - } +static int32_t translateLtrim(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + return translateTrimStr(pFunc, pErrBuf, len, true); +} - SValueNode* pValue = (SValueNode*)pParamNode1; - pValue->notReserved = true; +static int32_t translateRtrim(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + return translateTrimStr(pFunc, pErrBuf, len, false); +} - if (!IS_NUMERIC_TYPE(colType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } +// The return type is BIGINT type +static int32_t translateOutBigInt(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; + return TSDB_CODE_SUCCESS; +} - SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2); - SValueNode* pValue2 = (SValueNode*)pParamNode2; - pValue2->notReserved = true; +static int32_t translateSum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); - if (pValue2->datum.i != 0 && pValue2->datum.i != 1) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); + uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t resType = 0; + if (IS_SIGNED_NUMERIC_TYPE(paraType) || TSDB_DATA_TYPE_BOOL == paraType || IS_NULL_TYPE(paraType)) { + resType = TSDB_DATA_TYPE_BIGINT; + } else if (IS_UNSIGNED_NUMERIC_TYPE(paraType)) { + resType = TSDB_DATA_TYPE_UBIGINT; + } else if (IS_FLOAT_TYPE(paraType)) { + resType = TSDB_DATA_TYPE_DOUBLE; } - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; + pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType}; return TSDB_CODE_SUCCESS; } -static EFuncReturnRows derivativeEstReturnRows(SFunctionNode* pFunc) { - return 1 == ((SValueNode*)nodesListGetNode(pFunc->pParameterList, 2))->datum.i ? FUNC_RETURN_ROWS_INDEFINITE - : FUNC_RETURN_ROWS_N_MINUS_1; +static int32_t translateWduration(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + // pseudo column do not need to check parameters + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT, + .precision = pFunc->node.resType.precision}; + return TSDB_CODE_SUCCESS; } -static int32_t translateIrate(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - - if (!IS_NUMERIC_TYPE(colType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } +static int32_t translateNowToday(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + // pseudo column do not need to check parameters // add database precision as param uint8_t dbPrec = pFunc->node.resType.precision; @@ -1855,228 +1035,167 @@ static int32_t translateIrate(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return code; } - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; + pFunc->node.resType = + (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP}; return TSDB_CODE_SUCCESS; } -static int32_t translateIrateImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { - uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (isPartial) { - if (3 != LIST_LENGTH(pFunc->pParameterList) && 4 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - if (!IS_NUMERIC_TYPE(colType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - int32_t pkBytes = (pFunc->hasPk) ? pFunc->pkBytes : 0; - pFunc->node.resType = (SDataType){.bytes = getIrateInfoSize(pkBytes) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; - } else { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - if (TSDB_DATA_TYPE_BINARY != colType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; +static int32_t translateRand(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); - // add database precision as param - uint8_t dbPrec = pFunc->node.resType.precision; - int32_t code = addUint8Param(&pFunc->pParameterList, dbPrec); + if (!pFunc->dual) { + int32_t code = addPseudoParam(&pFunc->pParameterList); if (code != TSDB_CODE_SUCCESS) { return code; } } - + pFunc->node.resType = + (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; return TSDB_CODE_SUCCESS; } -static int32_t translateIratePartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateIrateImpl(pFunc, pErrBuf, len, true); -} - -static int32_t translateIrateMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateIrateImpl(pFunc, pErrBuf, len, false); +// return type is same as first input parameter's type +static int32_t translateOutFirstIn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + pFunc->node.resType = *getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); + return TSDB_CODE_SUCCESS; } -static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - uint8_t dbPrec = pFunc->node.resType.precision; +static int32_t translateTimePseudoColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + // pseudo column do not need to check parameters - if (2 < numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } + pFunc->node.resType = + (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP, + .precision = pFunc->node.resType.precision}; + return TSDB_CODE_SUCCESS; +} - uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 0)); - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if ((!IS_NUMERIC_TYPE(paraType) && !IS_BOOLEAN_TYPE(paraType)) || QUERY_NODE_VALUE == nodeType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } +static int32_t translateIsFilledPseudoColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + // pseudo column do not need to check parameters - if (2 == numOfParams) { - nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 1)); - paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_INTEGER_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes, .type = TSDB_DATA_TYPE_BOOL}; + return TSDB_CODE_SUCCESS; +} - SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); - if (pValue->datum.i != 0 && pValue->datum.i != 1) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "INTERP function second parameter should be 0/1"); - } +static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - pValue->notReserved = true; + // set result type + if (numOfParams > 2) { + pFunc->node.resType = (SDataType){.bytes = 3200, .type = TSDB_DATA_TYPE_VARCHAR}; + } else { + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; } + return TSDB_CODE_SUCCESS; +} -#if 0 - if (3 <= numOfParams) { - int64_t timeVal[2] = {0}; - for (int32_t i = 1; i < 3; ++i) { - nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i)); - paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; - if (!IS_STR_DATA_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } +static int32_t translateVgIdColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + // pseudo column do not need to check parameters + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_INT].bytes, .type = TSDB_DATA_TYPE_INT}; + return TSDB_CODE_SUCCESS; +} - SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, i); - int32_t ret = convertStringToTimestamp(paraType, pValue->datum.p, dbPrec, &timeVal[i - 1]); - if (ret != TSDB_CODE_SUCCESS) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); - } - } - if (timeVal[0] > timeVal[1]) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "INTERP function invalid time range"); +static int32_t reserveFirstMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters) { + int32_t code = nodesListMakeAppend(pParameters, pPartialRes); + if (TSDB_CODE_SUCCESS == code) { + SNode* pNew = NULL; + code = nodesCloneNode(nodesListGetNode(pRawParameters, 1), &pNew); + if (TSDB_CODE_SUCCESS == code) { + code = nodesListStrictAppend(*pParameters, pNew); } } + return code; +} - if (4 == numOfParams) { - nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 3)); - paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type; - if (!IS_INTEGER_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } +int32_t topBotCreateMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters) { + return reserveFirstMergeParam(pRawParameters, pPartialRes, pParameters); +} - int32_t ret = validateTimeUnitParam(dbPrec, (SValueNode*)nodesListGetNode(pFunc->pParameterList, 3)); - if (ret == TIME_UNIT_TOO_SMALL) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "INTERP function time interval parameter should be greater than db precision"); - } else if (ret == TIME_UNIT_INVALID) { - return buildFuncErrMsg( - pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "INTERP function time interval parameter should be one of the following: [1b, 1u, 1a, 1s, 1m, 1h, 1d, 1w]"); +int32_t apercentileCreateMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters) { + int32_t code = reserveFirstMergeParam(pRawParameters, pPartialRes, pParameters); + if (TSDB_CODE_SUCCESS == code && pRawParameters->length >= 3) { + SNode* pNew = NULL; + code = nodesCloneNode(nodesListGetNode(pRawParameters, 2), &pNew); + if (TSDB_CODE_SUCCESS == code) { + code = nodesListStrictAppend(*pParameters, pNew); } } -#endif - - pFunc->node.resType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType; - return TSDB_CODE_SUCCESS; + return code; } -static EFuncReturnRows interpEstReturnRows(SFunctionNode* pFunc) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (1 < numOfParams && 1 == ((SValueNode*)nodesListGetNode(pFunc->pParameterList, 1))->datum.i) { - return FUNC_RETURN_ROWS_INDEFINITE; - } else { - return FUNC_RETURN_ROWS_N; - } +static int32_t translateElapsedPartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + return 0; } -static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - // forbid null as first/last input, since first(c0, null, 1) may have different number of input - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - - for (int32_t i = 0; i < numOfParams; ++i) { - uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i)); - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; - if (IS_NULL_TYPE(paraType) && QUERY_NODE_VALUE == nodeType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - } - - pFunc->node.resType = *getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); - return TSDB_CODE_SUCCESS; +static int32_t translateElapsedMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + return 0; } -static int32_t translateFirstLastImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { - // first(col_list) will be rewritten as first(col) - SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); - uint8_t paraType = getSDataTypeFromNode(pPara)->type; - int32_t paraBytes = getSDataTypeFromNode(pPara)->bytes; - if (isPartial) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - for (int32_t i = 0; i < numOfParams; ++i) { - uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i)); - uint8_t pType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; - if (IS_NULL_TYPE(pType) && QUERY_NODE_VALUE == nodeType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - } - int32_t pkBytes = (pFunc->hasPk) ? pFunc->pkBytes : 0; - pFunc->node.resType = - (SDataType){.bytes = getFirstLastInfoSize(paraBytes, pkBytes) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; +static int32_t translateCsum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; + uint8_t resType; + if (IS_SIGNED_NUMERIC_TYPE(colType)) { + resType = TSDB_DATA_TYPE_BIGINT; + } else if (IS_UNSIGNED_NUMERIC_TYPE(colType)) { + resType = TSDB_DATA_TYPE_UBIGINT; + } else if (IS_FLOAT_TYPE(colType)) { + resType = TSDB_DATA_TYPE_DOUBLE; } else { - if (TSDB_DATA_TYPE_BINARY != paraType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = ((SExprNode*)pPara)->resType; + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType}; return TSDB_CODE_SUCCESS; } -static int32_t translateFirstLastPartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateFirstLastImpl(pFunc, pErrBuf, len, true); -} +static EFuncReturnRows csumEstReturnRows(SFunctionNode* pFunc) { return FUNC_RETURN_ROWS_N; } -static int32_t translateFirstLastMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateFirstLastImpl(pFunc, pErrBuf, len, false); -} +static int32_t translateSampleTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); -static int32_t translateFirstLastState(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); - int32_t paraBytes = getSDataTypeFromNode(pPara)->bytes; + SDataType* pSDataType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); + uint8_t colType = pSDataType->type; - int32_t pkBytes = (pFunc->hasPk) ? pFunc->pkBytes : 0; - pFunc->node.resType = - (SDataType){.bytes = getFirstLastInfoSize(paraBytes, pkBytes) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; + // set result type + pFunc->node.resType = (SDataType){.bytes = IS_STR_DATA_TYPE(colType) ? pSDataType->bytes : tDataTypes[colType].bytes, + .type = colType}; return TSDB_CODE_SUCCESS; } -static int32_t translateFirstLastStateMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); - int32_t paraBytes = getSDataTypeFromNode(pPara)->bytes; - uint8_t paraType = getSDataTypeFromNode(pPara)->type; - if (paraType != TSDB_DATA_TYPE_BINARY) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = paraBytes, .type = TSDB_DATA_TYPE_BINARY}; - return TSDB_CODE_SUCCESS; +static EFuncReturnRows derivativeEstReturnRows(SFunctionNode* pFunc) { + return 1 == ((SValueNode*)nodesListGetNode(pFunc->pParameterList, 2))->datum.i ? FUNC_RETURN_ROWS_INDEFINITE + : FUNC_RETURN_ROWS_N_MINUS_1; } -static int32_t translateUniqueMode(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isUnique) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); - if (!nodesExprHasColumn(pPara)) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "The parameters of %s must contain columns", - isUnique ? "UNIQUE" : "MODE"); +static int32_t translateAddPrecOutDouble(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + // add database precision as param + uint8_t dbPrec = pFunc->node.resType.precision; + int32_t code = addUint8Param(&pFunc->pParameterList, dbPrec); + if (code != TSDB_CODE_SUCCESS) { + return code; } - pFunc->node.resType = ((SExprNode*)pPara)->resType; + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; return TSDB_CODE_SUCCESS; } -static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateUniqueMode(pFunc, pErrBuf, len, true); +static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + pFunc->node.resType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType; + return TSDB_CODE_SUCCESS; } -static int32_t translateMode(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - return translateUniqueMode(pFunc, pErrBuf, len, false); +static EFuncReturnRows interpEstReturnRows(SFunctionNode* pFunc) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (1 < numOfParams && 1 == ((SValueNode*)nodesListGetNode(pFunc->pParameterList, 1))->datum.i) { + return FUNC_RETURN_ROWS_INDEFINITE; + } else { + return FUNC_RETURN_ROWS_N; + } } static int32_t translateForecast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { @@ -2121,37 +1240,8 @@ static int32_t translateForecastConf(SFunctionNode* pFunc, char* pErrBuf, int32_ static EFuncReturnRows forecastEstReturnRows(SFunctionNode* pFunc) { return FUNC_RETURN_ROWS_N; } static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (numOfParams > 2) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); uint8_t colType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_INTEGER_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType && - !IS_TIMESTAMP_TYPE(colType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - // param1 - if (numOfParams == 2) { - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_INTEGER_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1); - if (QUERY_NODE_VALUE != nodeType(pParamNode1)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - SValueNode* pValue = (SValueNode*)pParamNode1; - if (pValue->datum.i < 0 || pValue->datum.i > 3) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "Second parameter of DIFF function should be a number between 0 and 3."); - } - - pValue->notReserved = true; - } uint8_t resType; if (IS_SIGNED_NUMERIC_TYPE(colType) || IS_TIMESTAMP_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) { @@ -2173,65 +1263,19 @@ static EFuncReturnRows diffEstReturnRows(SFunctionNode* pFunc) { : FUNC_RETURN_ROWS_N_MINUS_1; } -static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_STR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateCharLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (paraType == TSDB_DATA_TYPE_VARBINARY || (!IS_STR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; - return TSDB_CODE_SUCCESS; -} - static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, int32_t minParaNum, int32_t maxParaNum, bool hasSep) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (numOfParams < minParaNum || numOfParams > maxParaNum) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } uint8_t resultType = TSDB_DATA_TYPE_BINARY; int32_t resultBytes = 0; int32_t sepBytes = 0; - // concat_ws separator should be constant string - if (hasSep) { - SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); - if (nodeType(pPara) != QUERY_NODE_VALUE) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, - "The first parameter of CONCAT_WS function can only be constant string"); - } - } - /* For concat/concat_ws function, if params have NCHAR type, promote the final result to NCHAR */ for (int32_t i = 0; i < numOfParams; ++i) { SNode* pPara = nodesListGetNode(pFunc->pParameterList, i); uint8_t paraType = getSDataTypeFromNode(pPara)->type; - if (TSDB_DATA_TYPE_VARBINARY == paraType) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (!IS_STR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } if (TSDB_DATA_TYPE_NCHAR == paraType) { resultType = paraType; } @@ -2274,130 +1318,29 @@ static int32_t translateConcatWs(SFunctionNode* pFunc, char* pErrBuf, int32_t le return translateConcatImpl(pFunc, pErrBuf, len, 3, 9, true); } -static int32_t translateSubstr(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (2 != numOfParams && 3 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - SExprNode* pPara0 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 1); - - uint8_t para0Type = pPara0->resType.type; - uint8_t para1Type = pPara1->resType.type; - if (TSDB_DATA_TYPE_VARBINARY == para0Type || - (!IS_STR_DATA_TYPE(para0Type) && !IS_NULL_TYPE(para0Type)) || - (!IS_INTEGER_TYPE(para1Type) && !IS_NULL_TYPE(para1Type))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (3 == numOfParams) { - SExprNode* pPara2 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 2); - uint8_t para2Type = pPara2->resType.type; - if (!IS_INTEGER_TYPE(para2Type) && !IS_NULL_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - int64_t v = ((SValueNode*)pPara2)->datum.i; - } - - pFunc->node.resType = (SDataType){.bytes = pPara0->resType.bytes, .type = pPara0->resType.type}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateSubstrIdx(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (3 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - SExprNode* pPara0 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 1); - SExprNode* pPara2 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 2); - - uint8_t para0Type = pPara0->resType.type; - uint8_t para1Type = pPara1->resType.type; - uint8_t para2Type = pPara2->resType.type; - if (TSDB_DATA_TYPE_VARBINARY == para0Type || (!IS_STR_DATA_TYPE(para0Type) && !IS_NULL_TYPE(para0Type)) || - TSDB_DATA_TYPE_VARBINARY == para1Type || (!IS_STR_DATA_TYPE(para1Type) && !IS_NULL_TYPE(para1Type)) || - (!IS_INTEGER_TYPE(para2Type) && !IS_NULL_TYPE(para2Type))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = pPara0->resType.bytes, .type = pPara0->resType.type}; - return TSDB_CODE_SUCCESS; -} - static int32_t translateChar(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - SNode *node; - FOREACH(node, pFunc->pParameterList) { - uint8_t paraType = getSDataTypeFromNode(node)->type; - if (paraType == TSDB_DATA_TYPE_VARBINARY || - (!IS_STR_DATA_TYPE(paraType) && !IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - } - + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); pFunc->node.resType = (SDataType){.bytes = 4 * numOfParams + 2, .type = TSDB_DATA_TYPE_VARCHAR}; return TSDB_CODE_SUCCESS; } static int32_t translateAscii(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (paraType == TSDB_DATA_TYPE_VARBINARY || - (!IS_STR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_UTINYINT].bytes, .type = TSDB_DATA_TYPE_UTINYINT}; return TSDB_CODE_SUCCESS; } -static int32_t translatePosition(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (2 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para0Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (para0Type == TSDB_DATA_TYPE_VARBINARY || - (!IS_STR_DATA_TYPE(para0Type) && !IS_NULL_TYPE(para0Type))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (para1Type == TSDB_DATA_TYPE_VARBINARY || - (!IS_STR_DATA_TYPE(para1Type) && !IS_NULL_TYPE(para1Type))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; - return TSDB_CODE_SUCCESS; -} - static int32_t translateTrim(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (2 != LIST_LENGTH(pFunc->pParameterList) && 1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); - uint8_t para0Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (para0Type == TSDB_DATA_TYPE_VARBINARY || - (!IS_STR_DATA_TYPE(para0Type) && !IS_NULL_TYPE(para0Type))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } + uint8_t para0Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; int32_t resLen = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->bytes; uint8_t type = para0Type; if (2 == LIST_LENGTH(pFunc->pParameterList)) { uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (para1Type == TSDB_DATA_TYPE_VARBINARY || - (!IS_STR_DATA_TYPE(para1Type) && !IS_NULL_TYPE(para1Type))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } resLen = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->bytes; type = para1Type; } @@ -2414,17 +1357,7 @@ static int32_t translateTrim(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { } static int32_t translateReplace(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (3 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - for (int32_t i = 0; i < 3; ++i) { - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; - if (paraType == TSDB_DATA_TYPE_VARBINARY || - (!IS_STR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - } + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); uint8_t orgType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; uint8_t fromType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; @@ -2448,31 +1381,26 @@ static int32_t translateReplace(SFunctionNode* pFunc, char* pErrBuf, int32_t len } static int32_t translateRepeat(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (2 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para0Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (para0Type == TSDB_DATA_TYPE_VARBINARY || - (!IS_STR_DATA_TYPE(para0Type) && !IS_NULL_TYPE(para0Type))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_INTEGER_TYPE(para1Type) && !IS_NULL_TYPE(para1Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); uint8_t type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; int32_t orgLen = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->bytes; - int32_t count = TMAX((int32_t)((SValueNode*)nodesListGetNode(pFunc->pParameterList, 1))->datum.i, 1); - - int32_t resLen = orgLen * count; + int32_t resLen; + if (nodeType(nodesListGetNode(pFunc->pParameterList, 1)) == QUERY_NODE_VALUE) { + resLen = orgLen * TMAX((int32_t)((SValueNode*)nodesListGetNode(pFunc->pParameterList, 1))->datum.i, 1); + } else { + resLen = TSDB_MAX_BINARY_LEN; + } pFunc->node.resType = (SDataType){.bytes = resLen, .type = type}; return TSDB_CODE_SUCCESS; } static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (numOfParams <= 0) { + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + } + // The number of parameters has been limited by the syntax definition SExprNode* pPara0 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); @@ -2512,25 +1440,11 @@ static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { } static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (1 != numOfParams && 2 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - // param0 - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - // param1 if (numOfParams == 2) { - SNode* pNode = (SNode*)nodesListGetNode(pFunc->pParameterList, 1); - if (QUERY_NODE_VALUE != nodeType(pNode)) { - return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "Not supported timzone format"); - } - - SValueNode* pValue = (SValueNode*)pNode; + SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); if (!validateTimezoneFormat(pValue)) { return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, "Invalid timzone format"); } @@ -2547,24 +1461,10 @@ static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t l } static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); int16_t resType = TSDB_DATA_TYPE_BIGINT; - - if (1 != numOfParams && 2 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (para1Type == TSDB_DATA_TYPE_VARBINARY || !IS_STR_DATA_TYPE(para1Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - if (2 == numOfParams) { - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_INTEGER_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); if (pValue->datum.i == 1) { resType = TSDB_DATA_TYPE_TIMESTAMP; @@ -2576,213 +1476,46 @@ static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int } } - // add database precision as param - uint8_t dbPrec = pFunc->node.resType.precision; - int32_t code = addUint8Param(&pFunc->pParameterList, dbPrec); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateToTimestamp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (LIST_LENGTH(pFunc->pParameterList) != 2) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_STR_DATA_TYPE(para1Type) || !IS_STR_DATA_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - pFunc->node.resType = - (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateToChar(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (LIST_LENGTH(pFunc->pParameterList) != 2) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - // currently only support to_char(timestamp, str) - if (!IS_STR_DATA_TYPE(para2Type) || !IS_TIMESTAMP_TYPE(para1Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - pFunc->node.resType = (SDataType){.bytes = 4096, .type = TSDB_DATA_TYPE_VARCHAR}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (2 != numOfParams && 3 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if ((!IS_STR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && !IS_TIMESTAMP_TYPE(para1Type)) || - !IS_INTEGER_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t dbPrec = pFunc->node.resType.precision; - int32_t code = validateTimeUnitParam(dbPrec, (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1)); - if (code == TSDB_CODE_FUNC_TIME_UNIT_TOO_SMALL) { - return buildFuncErrMsg(pErrBuf, len, code, - "TIMETRUNCATE function time unit parameter should be greater than db precision"); - } else if (code == TSDB_CODE_FUNC_TIME_UNIT_INVALID) { - return buildFuncErrMsg( - pErrBuf, len, code, - "TIMETRUNCATE function time unit parameter should be one of the following: [1b, 1u, 1a, 1s, 1m, 1h, 1d, 1w]"); - } - - if (3 == numOfParams) { - uint8_t para3Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type; - if (!IS_INTEGER_TYPE(para3Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 2); - if (pValue->datum.i != 0 && pValue->datum.i != 1) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); - } - } - - // add database precision as param - - code = addUint8Param(&pFunc->pParameterList, dbPrec); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - // add client timezone as param - code = addTimezoneParam(pFunc->pParameterList); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - pFunc->node.resType = - (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); - if (2 != numOfParams && 3 != numOfParams) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - for (int32_t i = 0; i < 2; ++i) { - uint8_t paraType = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i))->type; - if (!IS_STR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType) && !IS_NULL_TYPE(paraType)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - } - uint8_t para2Type; - if (3 == numOfParams) { - para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 2))->type; - if (!IS_INTEGER_TYPE(para2Type) && !IS_NULL_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - } - - // add database precision as param - uint8_t dbPrec = pFunc->node.resType.precision; - - if (3 == numOfParams && !IS_NULL_TYPE(para2Type)) { - int32_t code = validateTimeUnitParam(dbPrec, (SValueNode*)nodesListGetNode(pFunc->pParameterList, 2)); - if (code == TSDB_CODE_FUNC_TIME_UNIT_TOO_SMALL) { - return buildFuncErrMsg(pErrBuf, len, code, - "TIMEDIFF function time unit parameter should be greater than db precision"); - } else if (code == TSDB_CODE_FUNC_TIME_UNIT_INVALID) { - return buildFuncErrMsg( - pErrBuf, len, code, - "TIMEDIFF function time unit parameter should be one of the following: [1b, 1u, 1a, 1s, 1m, 1h, 1d, 1w]"); - } - } - + // add database precision as param + uint8_t dbPrec = pFunc->node.resType.precision; int32_t code = addUint8Param(&pFunc->pParameterList, dbPrec); if (code != TSDB_CODE_SUCCESS) { return code; } - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; + pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType}; return TSDB_CODE_SUCCESS; } -static int32_t translateWeekday(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if ((!IS_STR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && - !IS_TIMESTAMP_TYPE(para1Type) && !IS_NULL_TYPE(para1Type))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } +static int32_t translateToTimestamp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + pFunc->node.resType = + (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP}; + return TSDB_CODE_SUCCESS; +} - // add database precision as param +static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); uint8_t dbPrec = pFunc->node.resType.precision; - + // add database precision as param int32_t code = addUint8Param(&pFunc->pParameterList, dbPrec); if (code != TSDB_CODE_SUCCESS) { return code; } - pFunc->node.resType = - (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateWeek(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList) && 2 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if ((!IS_STR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && - !IS_TIMESTAMP_TYPE(para1Type)) && !IS_NULL_TYPE(para1Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - if (2 == LIST_LENGTH(pFunc->pParameterList)) { - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if (!IS_INTEGER_TYPE(para2Type) && !IS_NULL_TYPE(para2Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - if (IS_INTEGER_TYPE(para2Type)) { - SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); - if (pValue->datum.i < 0 || pValue->datum.i > 7) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); - } - } - } - - // add database precision as param - uint8_t dbPrec = pFunc->node.resType.precision; - - int32_t code = addUint8Param(&pFunc->pParameterList, dbPrec); + // add client timezone as param + code = addTimezoneParam(pFunc->pParameterList); if (code != TSDB_CODE_SUCCESS) { return code; } pFunc->node.resType = - (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; + (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP}; return TSDB_CODE_SUCCESS; } -static int32_t translateWeekofyear(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if ((!IS_STR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && - !IS_TIMESTAMP_TYPE(para1Type)) && !IS_NULL_TYPE(para1Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } +static int32_t translateAddPrecOutBigint(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); // add database precision as param uint8_t dbPrec = pFunc->node.resType.precision; @@ -2798,84 +1531,40 @@ static int32_t translateWeekofyear(SFunctionNode* pFunc, char* pErrBuf, int32_t } static int32_t translateToJson(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - SExprNode* pPara = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0); - if (QUERY_NODE_VALUE != nodeType(pPara) || TSDB_DATA_TYPE_VARBINARY == pPara->resType.type || (!IS_VAR_DATA_TYPE(pPara->resType.type))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_JSON].bytes, .type = TSDB_DATA_TYPE_JSON}; return TSDB_CODE_SUCCESS; } -static int32_t translateInStrOutGeom(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (!IS_STR_DATA_TYPE(para1Type) && !IS_NULL_TYPE(para1Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_GEOMETRY].bytes, .type = TSDB_DATA_TYPE_GEOMETRY}; +static int32_t translateOutGeom(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + SDataType dt = *getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); + pFunc->node.resType = (SDataType){.bytes = dt.bytes, .type = TSDB_DATA_TYPE_GEOMETRY}; return TSDB_CODE_SUCCESS; } static int32_t translateInGeomOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (para1Type != TSDB_DATA_TYPE_GEOMETRY && !IS_NULL_TYPE(para1Type)) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_VARCHAR].bytes, .type = TSDB_DATA_TYPE_VARCHAR}; - - return TSDB_CODE_SUCCESS; -} - -static int32_t translateIn2NumOutGeom(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (2 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if ((!IS_NUMERIC_TYPE(para1Type) && !IS_NULL_TYPE(para1Type)) || - (!IS_NUMERIC_TYPE(para2Type) && !IS_NULL_TYPE(para2Type))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_GEOMETRY].bytes, .type = TSDB_DATA_TYPE_GEOMETRY}; + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + SDataType dt = *getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); + pFunc->node.resType = (SDataType){.bytes = dt.bytes, .type = TSDB_DATA_TYPE_VARCHAR}; return TSDB_CODE_SUCCESS; } static int32_t translateIn2GeomOutBool(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (2 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - uint8_t para2Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 1))->type; - if ((para1Type != TSDB_DATA_TYPE_GEOMETRY && !IS_NULL_TYPE(para1Type)) || - (para2Type != TSDB_DATA_TYPE_GEOMETRY && !IS_NULL_TYPE(para2Type))) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); - } - + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BOOL].bytes, .type = TSDB_DATA_TYPE_BOOL}; return TSDB_CODE_SUCCESS; } static int32_t translateSelectValue(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (numOfParams <= 0) { + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + } + pFunc->node.resType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType; return TSDB_CODE_SUCCESS; } @@ -2885,11 +1574,6 @@ static int32_t translateBlockDistFunc(SFunctionNode* pFunc, char* pErrBuf, int32 return TSDB_CODE_SUCCESS; } -static int32_t translateBlockDistInfoFunc(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - pFunc->node.resType = (SDataType){.bytes = 128, .type = TSDB_DATA_TYPE_VARCHAR}; - return TSDB_CODE_SUCCESS; -} - static bool getBlockDistFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) { pEnv->calcMemSize = sizeof(STableBlockDistInfo); return true; @@ -2899,63 +1583,143 @@ static int32_t translateGroupKey(SFunctionNode* pFunc, char* pErrBuf, int32_t le if (1 != LIST_LENGTH(pFunc->pParameterList)) { return TSDB_CODE_SUCCESS; } - - SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0); - pFunc->node.resType = ((SExprNode*)pPara)->resType; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateDatabaseFunc(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - pFunc->node.resType = (SDataType){.bytes = TSDB_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateClientVersionFunc(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - pFunc->node.resType = (SDataType){.bytes = TSDB_VERSION_LEN, .type = TSDB_DATA_TYPE_VARCHAR}; + pFunc->node.resType = *getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); return TSDB_CODE_SUCCESS; } -static int32_t translateServerVersionFunc(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - pFunc->node.resType = (SDataType){.bytes = TSDB_VERSION_LEN, .type = TSDB_DATA_TYPE_VARCHAR}; - return TSDB_CODE_SUCCESS; -} static int32_t translateServerStatusFunc(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_INT].bytes, .type = TSDB_DATA_TYPE_INT}; return TSDB_CODE_SUCCESS; } -static int32_t translateCurrentUserFunc(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - pFunc->node.resType = (SDataType){.bytes = TSDB_USER_LEN, .type = TSDB_DATA_TYPE_VARCHAR}; - return TSDB_CODE_SUCCESS; -} - -static int32_t translateUserFunc(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - pFunc->node.resType = (SDataType){.bytes = TSDB_USER_LEN, .type = TSDB_DATA_TYPE_VARCHAR}; - return TSDB_CODE_SUCCESS; -} - static int32_t translateTagsPseudoColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { // The _tags pseudo-column will be expanded to the actual tags on the client side return TSDB_CODE_SUCCESS; } -static int32_t translateTableCountPseudoColumn(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; +static int32_t translateOutVarchar(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + int32_t bytes = 0; + switch (pFunc->funcType) { + case FUNCTION_TYPE_MD5: + bytes = MD5_OUTPUT_LEN + VARSTR_HEADER_SIZE; + break; + case FUNCTION_TYPE_USER: + case FUNCTION_TYPE_CURRENT_USER: + bytes = TSDB_USER_LEN; + break; + case FUNCTION_TYPE_SERVER_VERSION: + case FUNCTION_TYPE_CLIENT_VERSION: + bytes = TSDB_VERSION_LEN; + break; + case FUNCTION_TYPE_DATABASE: + bytes = TSDB_DB_NAME_LEN; + break; + case FUNCTION_TYPE_BLOCK_DIST: + case FUNCTION_TYPE_BLOCK_DIST_INFO: + bytes = sizeof(STableBlockDistInfo); + break; + case FUNCTION_TYPE_TO_CHAR: + bytes = 4096; + break; + case FUNCTION_TYPE_HYPERLOGLOG_STATE_MERGE: + case FUNCTION_TYPE_HYPERLOGLOG_PARTIAL: + case FUNCTION_TYPE_HYPERLOGLOG_STATE: + bytes = getHistogramInfoSize() + VARSTR_HEADER_SIZE; + break; + case FUNCTION_TYPE_SPREAD_PARTIAL: + case FUNCTION_TYPE_SPREAD_STATE: + case FUNCTION_TYPE_SPREAD_STATE_MERGE: + bytes = getSpreadInfoSize() + VARSTR_HEADER_SIZE; + break; + case FUNCTION_TYPE_APERCENTILE_PARTIAL: + bytes = getApercentileMaxSize() + VARSTR_HEADER_SIZE; + break; + case FUNCTION_TYPE_STD_STATE: + case FUNCTION_TYPE_STD_STATE_MERGE: + case FUNCTION_TYPE_STD_PARTIAL: + bytes = getStdInfoSize() + VARSTR_HEADER_SIZE; + break; + case FUNCTION_TYPE_AVG_PARTIAL: + case FUNCTION_TYPE_AVG_STATE: + case FUNCTION_TYPE_AVG_STATE_MERGE: + bytes = getAvgInfoSize() + VARSTR_HEADER_SIZE; + break; + case FUNCTION_TYPE_HISTOGRAM_PARTIAL: + bytes = getHistogramInfoSize() + VARSTR_HEADER_SIZE; + break; + case FUNCTION_TYPE_HISTOGRAM: + case FUNCTION_TYPE_HISTOGRAM_MERGE: + bytes = 512; + break; + case FUNCTION_TYPE_LEASTSQUARES: + bytes = LEASTSQUARES_BUFF_LENGTH; + break; + case FUNCTION_TYPE_TBNAME: + bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE; + break; + case FUNCTION_TYPE_TIMEZONE: + bytes = timeZoneStrLen(); + break; + case FUNCTION_TYPE_IRATE_PARTIAL: + bytes = getIrateInfoSize((pFunc->hasPk) ? pFunc->pkBytes : 0) + VARSTR_HEADER_SIZE; + break; + case FUNCTION_TYPE_FIRST_PARTIAL: + case FUNCTION_TYPE_LAST_PARTIAL: + case FUNCTION_TYPE_FIRST_STATE: + case FUNCTION_TYPE_LAST_STATE: + bytes = getFirstLastInfoSize(getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->bytes, + (pFunc->hasPk) ? pFunc->pkBytes : 0) + VARSTR_HEADER_SIZE; + break; + case FUNCTION_TYPE_FIRST_STATE_MERGE: + case FUNCTION_TYPE_LAST_STATE_MERGE: + bytes = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->bytes; + break; + default: + bytes = 0; + break; + } + pFunc->node.resType = (SDataType){.bytes = bytes, .type = TSDB_DATA_TYPE_VARCHAR}; return TSDB_CODE_SUCCESS; } -static int32_t translateMd5(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (1 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } +static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + int8_t binType; + char* binDesc; + for (int32_t i = 1; i < numOfParams; ++i) { + SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, i); + if (i == 1) { + binType = validateHistogramBinType(varDataVal(pValue->datum.p)); + if (binType == UNKNOWN_BIN) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, + "HISTOGRAM function binType parameter should be " + "\"user_input\", \"log_bin\" or \"linear_bin\""); + } + } - uint8_t para1Type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; - if (para1Type != TSDB_DATA_TYPE_VARCHAR) { - return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + if (i == 2) { + char errMsg[128] = {0}; + binDesc = varDataVal(pValue->datum.p); + if (TSDB_CODE_SUCCESS != validateHistogramBinDesc(binDesc, binType, errMsg, (int32_t)sizeof(errMsg))) { + return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, errMsg); + } + } } + return TSDB_CODE_SUCCESS; +} - pFunc->node.resType = (SDataType){.bytes = MD5_OUTPUT_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}; +static int32_t translateHitogram(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(translateHistogramImpl(pFunc, pErrBuf, len)); + pFunc->node.resType = (SDataType){.bytes = 512, .type = TSDB_DATA_TYPE_BINARY}; + return TSDB_CODE_SUCCESS; +} +static int32_t translateHistogramPartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + FUNC_ERR_RET(translateHistogramImpl(pFunc, pErrBuf, len)); + pFunc->node.resType = + (SDataType){.bytes = getHistogramInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; return TSDB_CODE_SUCCESS; } @@ -2965,7 +1729,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "count", .type = FUNCTION_TYPE_COUNT, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_IGNORE_NULL_FUNC | FUNC_MGT_TSMA_FUNC | FUNC_MGT_COUNT_LIKE_FUNC, - .translateFunc = translateCount, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateOutBigInt, .dataRequiredFunc = countDataRequired, .getEnvFunc = getCountFuncEnv, .initFunc = functionSetup, @@ -2984,6 +1759,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "sum", .type = FUNCTION_TYPE_SUM, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_IGNORE_NULL_FUNC | FUNC_MGT_TSMA_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE | FUNC_PARAM_SUPPORT_DOUBLE_TYPE | FUNC_PARAM_SUPPORT_UBIGINT_TYPE}}, .translateFunc = translateSum, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getSumFuncEnv, @@ -3003,6 +1789,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "min", .type = FUNCTION_TYPE_MIN, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IGNORE_NULL_FUNC | FUNC_MGT_TSMA_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_STRING_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_STRING_TYPE}}, .translateFunc = translateMinMax, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getMinmaxFuncEnv, @@ -3019,6 +1816,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "max", .type = FUNCTION_TYPE_MAX, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IGNORE_NULL_FUNC | FUNC_MGT_TSMA_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_STRING_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_STRING_TYPE}}, .translateFunc = translateMinMax, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getMinmaxFuncEnv, @@ -3035,7 +1843,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "stddev", .type = FUNCTION_TYPE_STDDEV, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = getStdFuncEnv, .initFunc = stdFunctionSetup, .processFunc = stdFunction, @@ -3053,7 +1872,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_std_partial", .type = FUNCTION_TYPE_STD_PARTIAL, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateStdPartial, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getStdFuncEnv, .initFunc = stdFunctionSetup, .processFunc = stdFunction, @@ -3067,7 +1897,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_stddev_merge", .type = FUNCTION_TYPE_STDDEV_MERGE, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateStdMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = getStdFuncEnv, .initFunc = stdFunctionSetup, .processFunc = stdFunctionMerge, @@ -3083,7 +1924,25 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "leastsquares", .type = FUNCTION_TYPE_LEASTSQUARES, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, - .translateFunc = translateLeastSQR, + .parameters = {.minParamNum = 3, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getLeastSQRFuncEnv, .initFunc = leastSQRFunctionSetup, .processFunc = leastSQRFunction, @@ -3098,7 +1957,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "avg", .type = FUNCTION_TYPE_AVG, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_IGNORE_NULL_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getAvgFuncEnv, .initFunc = avgFunctionSetup, @@ -3118,7 +1988,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_avg_partial", .type = FUNCTION_TYPE_AVG_PARTIAL, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateAvgPartial, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getAvgFuncEnv, .initFunc = avgFunctionSetup, @@ -3133,7 +2014,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_avg_merge", .type = FUNCTION_TYPE_AVG_MERGE, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateAvgMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = getAvgFuncEnv, .initFunc = avgFunctionSetup, .processFunc = avgFunctionMerge, @@ -3149,6 +2041,25 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "percentile", .type = FUNCTION_TYPE_PERCENTILE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_REPEAT_SCAN_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_FORBID_STREAM_FUNC, + .parameters = {.minParamNum = 2, + .maxParamNum = 11, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 11, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_RANGE, + .range = {.iMinVal = 0, .iMaxVal = 100}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, .translateFunc = translatePercentile, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getPercentileFuncEnv, @@ -3166,7 +2077,35 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "apercentile", .type = FUNCTION_TYPE_APERCENTILE, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateApercentile, + .parameters = {.minParamNum = 2, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_RANGE, + .range = {.iMinVal = 0, .iMaxVal = 100}}, + .inputParaInfo[0][2] = {.isLastParam = true, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_STRING_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 2, + .fixedStrValue = {"default", "t-digest"}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = getApercentileFuncEnv, .initFunc = apercentileFunctionSetup, .processFunc = apercentileFunction, @@ -3184,7 +2123,35 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_apercentile_partial", .type = FUNCTION_TYPE_APERCENTILE_PARTIAL, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateApercentilePartial, + .parameters = {.minParamNum = 2, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_RANGE, + .range = {.iMinVal = 0, .iMaxVal = 100}}, + .inputParaInfo[0][2] = {.isLastParam = true, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_STRING_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 2, + .fixedStrValue = {"default", "t-digest"}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getApercentileFuncEnv, .initFunc = apercentileFunctionSetup, .processFunc = apercentileFunction, @@ -3198,7 +2165,35 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_apercentile_merge", .type = FUNCTION_TYPE_APERCENTILE_MERGE, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateApercentileMerge, + .parameters = {.minParamNum = 2, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_RANGE, + .range = {.iMinVal = 0, .iMaxVal = 100}}, + .inputParaInfo[0][2] = {.isLastParam = true, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_STRING_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 2, + .fixedStrValue = {"default", "t-digest"}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = getApercentileFuncEnv, .initFunc = apercentileFunctionSetup, .processFunc = apercentileFunctionMerge, @@ -3213,7 +2208,26 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_TOP, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_FILL_FUNC | FUNC_MGT_IGNORE_NULL_FUNC, - .translateFunc = translateTopBot, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_RANGE, + .range = {.iMinVal = 1, .iMaxVal = TOP_BOTTOM_QUERY_LIMIT}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = getTopBotFuncEnv, .initFunc = topBotFunctionSetup, .processFunc = topFunction, @@ -3229,7 +2243,26 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_BOTTOM, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_FILL_FUNC | FUNC_MGT_IGNORE_NULL_FUNC, - .translateFunc = translateTopBot, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_RANGE, + .range = {.iMinVal = 1, .iMaxVal = TOP_BOTTOM_QUERY_LIMIT}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = getTopBotFuncEnv, .initFunc = topBotFunctionSetup, .processFunc = bottomFunction, @@ -3244,7 +2277,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "spread", .type = FUNCTION_TYPE_SPREAD, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateSpread, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getSpreadFuncEnv, .initFunc = spreadFunctionSetup, @@ -3263,7 +2307,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_spread_partial", .type = FUNCTION_TYPE_SPREAD_PARTIAL, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateSpreadPartial, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getSpreadFuncEnv, .initFunc = spreadFunctionSetup, @@ -3277,8 +2332,19 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_spread_merge", .type = FUNCTION_TYPE_SPREAD_MERGE, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateSpreadMerge, + .translateFunc = translateOutDouble, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getSpreadFuncEnv, .initFunc = spreadFunctionSetup, @@ -3294,10 +2360,28 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "elapsed", .type = FUNCTION_TYPE_ELAPSED, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED, + .parameters = {.minParamNum = 1, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_COLUMN_NODE, + .paramAttribute = FUNC_PARAM_MUST_BE_PRIMTS, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_MUST_BE_TIME_UNIT, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, .dataRequiredFunc = statisDataRequired, - .translateFunc = translateElapsed, + .translateFunc = translateOutDouble, .getEnvFunc = getElapsedFuncEnv, .initFunc = elapsedFunctionSetup, .processFunc = elapsedFunction, @@ -3341,70 +2425,183 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "interp", .type = FUNCTION_TYPE_INTERP, .classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | - FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateInterp, + FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_BOOL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_NOT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 2, + .fixedNumValue = {0, 1}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = getSelectivityFuncEnv, .initFunc = functionSetup, .processFunc = NULL, .finalizeFunc = NULL, - .estimateReturnRowsFunc = interpEstReturnRows + .estimateReturnRowsFunc = interpEstReturnRows, }, { .name = "derivative", .type = FUNCTION_TYPE_DERIVATIVE, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_CUMULATIVE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateDerivative, + .parameters = {.minParamNum = 3, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_RANGE, + .range = {.iMinVal = 1, .iMaxVal = INT64_MAX}}, + .inputParaInfo[0][2] = {.isLastParam = true, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 2, + .fixedNumValue = {0, 1}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = getDerivativeFuncEnv, .initFunc = derivativeFuncSetup, .processFunc = derivativeFunction, .sprocessFunc = derivativeScalarFunction, .finalizeFunc = functionFinalize, - .estimateReturnRowsFunc = derivativeEstReturnRows + .estimateReturnRowsFunc = derivativeEstReturnRows, }, { .name = "irate", .type = FUNCTION_TYPE_IRATE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateIrate, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateAddPrecOutDouble, .getEnvFunc = getIrateFuncEnv, .initFunc = irateFuncSetup, .processFunc = irateFunction, .sprocessFunc = irateScalarFunction, .finalizeFunc = irateFinalize, .pPartialFunc = "_irate_partial", - .pMergeFunc = "_irate_merge" + .pMergeFunc = "_irate_merge", }, { .name = "_irate_partial", .type = FUNCTION_TYPE_IRATE_PARTIAL, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateIratePartial, + .parameters = {.minParamNum = 3, + .maxParamNum = 4, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_TINYINT_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][2] = {.isLastParam = false, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_COLUMN_NODE, + .paramAttribute = FUNC_PARAM_MUST_BE_PRIMTS, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][3] = {.isLastParam = true, + .startParam = 4, + .endParam = 4, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_COLUMN_NODE, + .paramAttribute = FUNC_PARAM_MUST_BE_PK, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getIrateFuncEnv, .initFunc = irateFuncSetup, .processFunc = irateFunction, .sprocessFunc = irateScalarFunction, - .finalizeFunc = iratePartialFinalize + .finalizeFunc = iratePartialFinalize, }, { .name = "_irate_merge", .type = FUNCTION_TYPE_IRATE_MERGE, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateIrateMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateAddPrecOutDouble, .getEnvFunc = getIrateFuncEnv, .initFunc = irateFuncSetup, .processFunc = irateFunctionMerge, .sprocessFunc = irateScalarFunction, - .finalizeFunc = irateFinalize + .finalizeFunc = irateFinalize, }, { .name = "last_row", .type = FUNCTION_TYPE_LAST_ROW, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateFirstLast, + .parameters = {.minParamNum = 1, + .maxParamNum = -1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_VALUE_NODE_NOT_NULL, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, + .translateFunc = translateOutFirstIn, .dynDataRequiredFunc = lastDynDataReq, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, @@ -3413,35 +2610,68 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .pPartialFunc = "_last_row_partial", .pMergeFunc = "_last_row_merge", .finalizeFunc = firstLastFinalize, - .combineFunc = lastCombine + .combineFunc = lastCombine, }, { .name = "_cache_last_row", .type = FUNCTION_TYPE_CACHE_LAST_ROW, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, - .translateFunc = translateFirstLast, + .parameters = {.minParamNum = 1, + .maxParamNum = -1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_VALUE_NODE_NOT_NULL, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = cachedLastRowFunction, - .finalizeFunc = firstLastFinalize + .finalizeFunc = firstLastFinalize, }, { .name = "_cache_last", .type = FUNCTION_TYPE_CACHE_LAST, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_IGNORE_NULL_FUNC, - .translateFunc = translateFirstLast, + .parameters = {.minParamNum = 1, + .maxParamNum = -1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_VALUE_NODE_NOT_NULL, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = lastFunctionMerge, - .finalizeFunc = firstLastFinalize + .finalizeFunc = firstLastFinalize, }, { .name = "_last_row_partial", .type = FUNCTION_TYPE_LAST_PARTIAL, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateFirstLastPartial, + .parameters = {.minParamNum = 1, + .maxParamNum = -1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_VALUE_NODE_NOT_NULL, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .dynDataRequiredFunc = lastDynDataReq, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, @@ -3453,7 +2683,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_LAST_MERGE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateFirstLastMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_VALUE_NODE_NOT_NULL, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = lastFunctionMerge, @@ -3464,7 +2705,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_FIRST, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_IGNORE_NULL_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateFirstLast, + .parameters = {.minParamNum = 1, + .maxParamNum = -1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_VALUE_NODE_NOT_NULL, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, + .translateFunc = translateOutFirstIn, .dynDataRequiredFunc = firstDynDataReq, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, @@ -3481,7 +2733,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_FIRST_PARTIAL, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_IGNORE_NULL_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateFirstLastPartial, + .parameters = {.minParamNum = 1, + .maxParamNum = -1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .dynDataRequiredFunc = firstDynDataReq, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, @@ -3494,7 +2757,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_FIRST_MERGE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_IGNORE_NULL_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateFirstLastMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = firstFunctionMerge, @@ -3508,7 +2782,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_LAST, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_IGNORE_NULL_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateFirstLast, + .parameters = {.minParamNum = 1, + .maxParamNum = -1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_VALUE_NODE_NOT_NULL, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, + .translateFunc = translateOutFirstIn, .dynDataRequiredFunc = lastDynDataReq, .getEnvFunc = getFirstLastFuncEnv, .initFunc = firstLastFunctionSetup, @@ -3525,7 +2810,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_LAST_PARTIAL, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_IGNORE_NULL_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateFirstLastPartial, + .parameters = {.minParamNum = 1, + .maxParamNum = -1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_VALUE_NODE_NOT_NULL, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .dynDataRequiredFunc = lastDynDataReq, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, @@ -3538,7 +2834,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_LAST_MERGE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_IGNORE_NULL_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateFirstLastMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = -1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = lastFunctionMerge, @@ -3550,21 +2857,68 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "twa", .type = FUNCTION_TYPE_TWA, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getTwaFuncEnv, .initFunc = twaFunctionSetup, .processFunc = twaFunction, .sprocessFunc = twaScalarFunction, - .finalizeFunc = twaFinalize + .finalizeFunc = twaFinalize, }, { .name = "histogram", .type = FUNCTION_TYPE_HISTOGRAM, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, - .translateFunc = translateHistogram, + .parameters = {.minParamNum = 4, + .maxParamNum = 4, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 3, + .fixedStrValue = {"user_input", "linear_bin", "log_bin"}}, + .inputParaInfo[0][2] = {.isLastParam = false, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][3] = {.isLastParam = true, + .startParam = 4, + .endParam = 4, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 2, + .fixedNumValue = {0, 1}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateHitogram, .getEnvFunc = getHistogramFuncEnv, .initFunc = histogramFunctionSetup, .processFunc = histogramFunction, @@ -3581,6 +2935,42 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_histogram_partial", .type = FUNCTION_TYPE_HISTOGRAM_PARTIAL, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC, + .parameters = {.minParamNum = 4, + .maxParamNum = 4, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 3, + .fixedStrValue = {"user_input", "linear_bin", "log_bin"}}, + .inputParaInfo[0][2] = {.isLastParam = false, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][3] = {.isLastParam = true, + .startParam = 4, + .endParam = 4, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 2, + .fixedNumValue = {0, 1}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, .translateFunc = translateHistogramPartial, .getEnvFunc = getHistogramFuncEnv, .initFunc = histogramFunctionSetup, @@ -3595,7 +2985,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_histogram_merge", .type = FUNCTION_TYPE_HISTOGRAM_MERGE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC, - .translateFunc = translateHistogramMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getHistogramFuncEnv, .initFunc = functionSetup, .processFunc = histogramFunctionMerge, @@ -3609,7 +3010,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "hyperloglog", .type = FUNCTION_TYPE_HYPERLOGLOG, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_COUNT_LIKE_FUNC, - .translateFunc = translateHLL, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateOutBigInt, .getEnvFunc = getHLLFuncEnv, .initFunc = functionSetup, .processFunc = hllFunction, @@ -3620,13 +3032,24 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { #endif .combineFunc = hllCombine, .pPartialFunc = "_hyperloglog_partial", - .pMergeFunc = "_hyperloglog_merge" + .pMergeFunc = "_hyperloglog_merge", }, { .name = "_hyperloglog_partial", .type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateHLLPartial, + .translateFunc = translateOutVarchar, .getEnvFunc = getHLLFuncEnv, .initFunc = functionSetup, .processFunc = hllFunction, @@ -3639,8 +3062,19 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_hyperloglog_merge", .type = FUNCTION_TYPE_HYPERLOGLOG_MERGE, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateHLLMerge, + .translateFunc = translateOutBigInt, .getEnvFunc = getHLLFuncEnv, .initFunc = functionSetup, .processFunc = hllFunctionMerge, @@ -3656,6 +3090,26 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_DIFF, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_PROCESS_BY_ROW | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE | FUNC_PARAM_SUPPORT_BOOL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 4, + .fixedNumValue = {0, 1, 2, 3}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE | FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, .translateFunc = translateDiff, .getEnvFunc = getDiffFuncEnv, .initFunc = diffFunctionSetup, @@ -3670,30 +3124,102 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_STATE_COUNT, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, - .translateFunc = translateStateCount, + .parameters = {.minParamNum = 3, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_STRING_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 6, + .fixedStrValue = {"LT", "GT", "LE", "GE", "NE", "EQ"}}, + .inputParaInfo[0][2] = {.isLastParam = true, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE | FUNC_PARAM_SUPPORT_BIGINT_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateOutBigInt, .getEnvFunc = getStateFuncEnv, .initFunc = functionSetup, .processFunc = stateCountFunction, .sprocessFunc = stateCountScalarFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "stateduration", .type = FUNCTION_TYPE_STATE_DURATION, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, - .translateFunc = translateStateDuration, + .parameters = {.minParamNum = 3, + .maxParamNum = 4, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 6, + .fixedStrValue = {"LT", "GT", "LE", "GE", "NE", "EQ"}}, + .inputParaInfo[0][2] = {.isLastParam = false, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE | FUNC_PARAM_SUPPORT_BIGINT_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][3] = {.isLastParam = true, + .startParam = 4, + .endParam = 4, + .validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_MUST_BE_TIME_UNIT, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateOutBigInt, .getEnvFunc = getStateFuncEnv, .initFunc = functionSetup, .processFunc = stateDurationFunction, .sprocessFunc = stateDurationScalarFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "csum", .type = FUNCTION_TYPE_CSUM, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE | FUNC_PARAM_SUPPORT_DOUBLE_TYPE | FUNC_PARAM_SUPPORT_UBIGINT_TYPE}}, .translateFunc = translateCsum, .getEnvFunc = getCsumFuncEnv, .initFunc = functionSetup, @@ -3707,438 +3233,947 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_MAVG, .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, - .translateFunc = translateMavg, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_RANGE, + .range = {.iMinVal = 1, .iMaxVal = 1000}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = getMavgFuncEnv, .initFunc = mavgFunctionSetup, .processFunc = mavgFunction, .sprocessFunc = mavgScalarFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "sample", .type = FUNCTION_TYPE_SAMPLE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_FILL_FUNC, - .translateFunc = translateSample, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_RANGE, + .range = {.iMinVal = 1, .iMaxVal = 1000}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, + .translateFunc = translateSampleTail, .getEnvFunc = getSampleFuncEnv, .initFunc = sampleFunctionSetup, .processFunc = sampleFunction, .sprocessFunc = sampleScalarFunction, - .finalizeFunc = sampleFinalize + .finalizeFunc = sampleFinalize, }, { .name = "tail", .type = FUNCTION_TYPE_TAIL, .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, - .translateFunc = translateTail, + .parameters = {.minParamNum = 2, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_RANGE, + .range = {.iMinVal = 1, .iMaxVal = 100}}, + .inputParaInfo[0][2] = {.isLastParam = true, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_RANGE, + .range = {.iMinVal = 0, .iMaxVal = 100}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, + .translateFunc = translateSampleTail, .getEnvFunc = getTailFuncEnv, .initFunc = tailFunctionSetup, .processFunc = tailFunction, .sprocessFunc = tailScalarFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "unique", .type = FUNCTION_TYPE_UNIQUE, .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateUnique, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_MUST_HAVE_COLUMN, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = getUniqueFuncEnv, .initFunc = uniqueFunctionSetup, .processFunc = uniqueFunction, .sprocessFunc = uniqueScalarFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "mode", .type = FUNCTION_TYPE_MODE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, - .translateFunc = translateMode, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_MUST_HAVE_COLUMN, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = getModeFuncEnv, .initFunc = modeFunctionSetup, .processFunc = modeFunction, .sprocessFunc = modeScalarFunction, .finalizeFunc = modeFinalize, - .cleanupFunc = modeFunctionCleanupExt + .cleanupFunc = modeFunctionCleanupExt, }, { .name = "abs", .type = FUNCTION_TYPE_ABS, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInOutNum, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE}}, + .translateFunc = translateOutNum, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = absFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "log", .type = FUNCTION_TYPE_LOG, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateLogarithm, + .parameters = {.minParamNum = 1, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = logFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "pow", .type = FUNCTION_TYPE_POW, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateIn2NumOutDou, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = powFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "sqrt", .type = FUNCTION_TYPE_SQRT, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = sqrtFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "ceil", .type = FUNCTION_TYPE_CEIL, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInOutNum, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE}}, + .translateFunc = translateOutNum, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = ceilFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "floor", .type = FUNCTION_TYPE_FLOOR, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInOutNum, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE}}, + .translateFunc = translateOutNum, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = floorFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "round", .type = FUNCTION_TYPE_ROUND, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateRound, + .parameters = {.minParamNum = 1, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutNum, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = roundFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "sin", .type = FUNCTION_TYPE_SIN, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = sinFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "cos", .type = FUNCTION_TYPE_COS, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = cosFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "tan", .type = FUNCTION_TYPE_TAN, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = tanFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "asin", .type = FUNCTION_TYPE_ASIN, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = asinFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "acos", .type = FUNCTION_TYPE_ACOS, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = acosFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "atan", .type = FUNCTION_TYPE_ATAN, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = atanFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "length", .type = FUNCTION_TYPE_LENGTH, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, - .translateFunc = translateLength, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_STRING_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateOutBigInt, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = lengthFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "char_length", .type = FUNCTION_TYPE_CHAR_LENGTH, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, - .translateFunc = translateCharLength, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateOutBigInt, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = charLengthFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "concat", .type = FUNCTION_TYPE_CONCAT, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, + .parameters = {.minParamNum = 2, + .maxParamNum = 8, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 8, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE}}, .translateFunc = translateConcat, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = concatFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "concat_ws", .type = FUNCTION_TYPE_CONCAT_WS, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, + .parameters = {.minParamNum = 3, + .maxParamNum = 9, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 9, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE}}, .translateFunc = translateConcatWs, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = concatWsFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "lower", .type = FUNCTION_TYPE_LOWER, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, - .translateFunc = translateInOutStr, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = lowerFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "upper", .type = FUNCTION_TYPE_UPPER, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, - .translateFunc = translateInOutStr, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = upperFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "ltrim", .type = FUNCTION_TYPE_LTRIM, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE}}, .translateFunc = translateLtrim, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = ltrimFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "rtrim", .type = FUNCTION_TYPE_RTRIM, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE}}, .translateFunc = translateRtrim, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = rtrimFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "substr", .type = FUNCTION_TYPE_SUBSTR, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, - .translateFunc = translateSubstr, + .parameters = {.minParamNum = 2, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = substrFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "cast", .type = FUNCTION_TYPE_CAST, .classification = FUNC_MGT_SCALAR_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_BOOL_TYPE | FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_GEOMETRY_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE | FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, .translateFunc = translateCast, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = castFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "to_iso8601", .type = FUNCTION_TYPE_TO_ISO8601, .classification = FUNC_MGT_SCALAR_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE | FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, .translateFunc = translateToIso8601, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = toISO8601Function, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "to_unixtimestamp", .type = FUNCTION_TYPE_TO_UNIXTIMESTAMP, .classification = FUNC_MGT_SCALAR_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 2, + .fixedNumValue = {0, 1}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE | FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE}}, .translateFunc = translateToUnixtimestamp, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = toUnixtimestampFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "timetruncate", .type = FUNCTION_TYPE_TIMETRUNCATE, .classification = FUNC_MGT_SCALAR_FUNC, + .parameters = {.minParamNum = 2, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE | FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_MUST_BE_TIME_UNIT, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][2] = {.isLastParam = true, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 2, + .fixedNumValue = {0, 1}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE}}, .translateFunc = translateTimeTruncate, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = timeTruncateFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "timediff", .type = FUNCTION_TYPE_TIMEDIFF, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateTimeDiff, + .parameters = {.minParamNum = 2, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE | FUNC_PARAM_SUPPORT_INTEGER_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_MUST_BE_TIME_UNIT, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateAddPrecOutBigint, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = timeDiffFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "now", .type = FUNCTION_TYPE_NOW, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_DATETIME_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE}}, .translateFunc = translateNowToday, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = nowFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "today", .type = FUNCTION_TYPE_TODAY, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_DATETIME_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE}}, .translateFunc = translateNowToday, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = todayFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "timezone", .type = FUNCTION_TYPE_TIMEZONE, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateTimezone, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = timezoneFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "tbname", .type = FUNCTION_TYPE_TBNAME, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_SCAN_PC_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, - .translateFunc = translateTbnameColumn, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = qPseudoTagFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "_qstart", .type = FUNCTION_TYPE_QSTART, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_CLIENT_PC_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE}}, .translateFunc = translateTimePseudoColumn, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = NULL, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "_qend", .type = FUNCTION_TYPE_QEND, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_CLIENT_PC_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE}}, .translateFunc = translateTimePseudoColumn, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = NULL, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "_qduration", .type = FUNCTION_TYPE_QDURATION, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_CLIENT_PC_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, .translateFunc = translateWduration, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = NULL, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "_wstart", .type = FUNCTION_TYPE_WSTART, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_SKIP_SCAN_CHECK_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE}}, .translateFunc = translateTimePseudoColumn, .getEnvFunc = getTimePseudoFuncEnv, .initFunc = NULL, .sprocessFunc = winStartTsFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "_wend", .type = FUNCTION_TYPE_WEND, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_SKIP_SCAN_CHECK_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE}}, .translateFunc = translateTimePseudoColumn, .getEnvFunc = getTimePseudoFuncEnv, .initFunc = NULL, .sprocessFunc = winEndTsFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "_wduration", .type = FUNCTION_TYPE_WDURATION, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_SKIP_SCAN_CHECK_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, .translateFunc = translateWduration, .getEnvFunc = getTimePseudoFuncEnv, .initFunc = NULL, .sprocessFunc = winDurFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "to_json", .type = FUNCTION_TYPE_TO_JSON, .classification = FUNC_MGT_SCALAR_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_VALUE_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_JSON_TYPE}}, .translateFunc = translateToJson, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = toJsonFunction, - .finalizeFunc = NULL + .finalizeFunc = NULL, }, { .name = "_select_value", @@ -4150,13 +4185,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = NULL, .finalizeFunc = NULL, .pPartialFunc = "_select_value", - .pMergeFunc = "_select_value" + .pMergeFunc = "_select_value", }, { .name = "_block_dist", .type = FUNCTION_TYPE_BLOCK_DIST, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, - .translateFunc = translateBlockDistFunc, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getBlockDistFuncEnv, .initFunc = blockDistSetup, .processFunc = blockDistFunction, @@ -4166,7 +4205,11 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_block_dist_info", .type = FUNCTION_TYPE_BLOCK_DIST_INFO, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_SCAN_PC_FUNC, - .translateFunc = translateBlockDistInfoFunc, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, }, { .name = "_group_key", @@ -4185,42 +4228,70 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "database", .type = FUNCTION_TYPE_DATABASE, .classification = FUNC_MGT_SYSTEM_INFO_FUNC | FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateDatabaseFunc, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, }, { .name = "client_version", .type = FUNCTION_TYPE_CLIENT_VERSION, .classification = FUNC_MGT_SYSTEM_INFO_FUNC | FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateClientVersionFunc, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, }, { .name = "server_version", .type = FUNCTION_TYPE_SERVER_VERSION, .classification = FUNC_MGT_SYSTEM_INFO_FUNC | FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateServerVersionFunc, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, }, { .name = "server_status", .type = FUNCTION_TYPE_SERVER_STATUS, .classification = FUNC_MGT_SYSTEM_INFO_FUNC | FUNC_MGT_SCALAR_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, .translateFunc = translateServerStatusFunc, }, { .name = "current_user", .type = FUNCTION_TYPE_CURRENT_USER, .classification = FUNC_MGT_SYSTEM_INFO_FUNC | FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateCurrentUserFunc, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, }, { .name = "user", .type = FUNCTION_TYPE_USER, .classification = FUNC_MGT_SYSTEM_INFO_FUNC | FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateUserFunc, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, }, { .name = "_irowts", .type = FUNCTION_TYPE_IROWTS, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_INTERP_PC_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE}}, .translateFunc = translateTimePseudoColumn, .getEnvFunc = getTimePseudoFuncEnv, .initFunc = NULL, @@ -4231,6 +4302,10 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_isfilled", .type = FUNCTION_TYPE_ISFILLED, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_INTERP_PC_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BOOL_TYPE}}, .translateFunc = translateIsFilledPseudoColumn, .getEnvFunc = NULL, .initFunc = NULL, @@ -4251,7 +4326,11 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_table_count", .type = FUNCTION_TYPE_TABLE_COUNT, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_SCAN_PC_FUNC, - .translateFunc = translateTableCountPseudoColumn, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateOutBigInt, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = NULL, @@ -4261,7 +4340,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "st_geomfromtext", .type = FUNCTION_TYPE_GEOM_FROM_TEXT, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_GEOMETRY_FUNC, - .translateFunc = translateInStrOutGeom, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_STRING_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_GEOMETRY_TYPE}}, + .translateFunc = translateOutGeom, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = geomFromTextFunction, @@ -4271,6 +4361,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "st_astext", .type = FUNCTION_TYPE_AS_TEXT, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_GEOMETRY_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_GEOMETRY_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_STRING_TYPE}}, .translateFunc = translateInGeomOutStr, .getEnvFunc = NULL, .initFunc = NULL, @@ -4281,7 +4382,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "st_makepoint", .type = FUNCTION_TYPE_MAKE_POINT, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_GEOMETRY_FUNC, - .translateFunc = translateIn2NumOutGeom, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_GEOMETRY_TYPE}}, + .translateFunc = translateOutGeom, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = makePointFunction, @@ -4291,6 +4403,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "st_intersects", .type = FUNCTION_TYPE_INTERSECTS, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_GEOMETRY_FUNC, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_GEOMETRY_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BOOL_TYPE}}, .translateFunc = translateIn2GeomOutBool, .getEnvFunc = NULL, .initFunc = NULL, @@ -4301,6 +4424,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "st_equals", .type = FUNCTION_TYPE_EQUALS, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_GEOMETRY_FUNC, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_GEOMETRY_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BOOL_TYPE}}, .translateFunc = translateIn2GeomOutBool, .getEnvFunc = NULL, .initFunc = NULL, @@ -4311,6 +4445,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "st_touches", .type = FUNCTION_TYPE_TOUCHES, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_GEOMETRY_FUNC, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_GEOMETRY_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BOOL_TYPE}}, .translateFunc = translateIn2GeomOutBool, .getEnvFunc = NULL, .initFunc = NULL, @@ -4321,6 +4466,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "st_covers", .type = FUNCTION_TYPE_COVERS, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_GEOMETRY_FUNC, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_GEOMETRY_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BOOL_TYPE}}, .translateFunc = translateIn2GeomOutBool, .getEnvFunc = NULL, .initFunc = NULL, @@ -4331,6 +4487,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "st_contains", .type = FUNCTION_TYPE_CONTAINS, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_GEOMETRY_FUNC, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_GEOMETRY_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BOOL_TYPE}}, .translateFunc = translateIn2GeomOutBool, .getEnvFunc = NULL, .initFunc = NULL, @@ -4341,6 +4508,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "st_containsproperly", .type = FUNCTION_TYPE_CONTAINS_PROPERLY, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_GEOMETRY_FUNC, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_GEOMETRY_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BOOL_TYPE}}, .translateFunc = translateIn2GeomOutBool, .getEnvFunc = NULL, .initFunc = NULL, @@ -4351,7 +4529,11 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_tbuid", .type = FUNCTION_TYPE_TBUID, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_SCAN_PC_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, - .translateFunc = translateTbUidColumn, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateOutBigInt, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = qPseudoTagFunction, @@ -4361,6 +4543,10 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_vgid", .type = FUNCTION_TYPE_VGID, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_SCAN_PC_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_INT_TYPE}}, .translateFunc = translateVgIdColumn, .getEnvFunc = NULL, .initFunc = NULL, @@ -4371,6 +4557,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "to_timestamp", .type = FUNCTION_TYPE_TO_TIMESTAMP, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_DATETIME_FUNC, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_STRING_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE}}, .translateFunc = translateToTimestamp, .getEnvFunc = NULL, .initFunc = NULL, @@ -4381,7 +4578,25 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "to_char", .type = FUNCTION_TYPE_TO_CHAR, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateToChar, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_STRING_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = toCharFunction, @@ -4391,7 +4606,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_avg_middle", .type = FUNCTION_TYPE_AVG_PARTIAL, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateAvgMiddle, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .dataRequiredFunc = statisDataRequired, .getEnvFunc = getAvgFuncEnv, .initFunc = avgFunctionSetup, @@ -4406,7 +4632,11 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_vgver", .type = FUNCTION_TYPE_VGVER, .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_SCAN_PC_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, - .translateFunc = translateVgVerColumn, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateOutBigInt, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = qPseudoTagFunction, @@ -4416,7 +4646,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_std_state", .type = FUNCTION_TYPE_STD_STATE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateStdState, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getStdFuncEnv, .initFunc = stdFunctionSetup, .processFunc = stdFunction, @@ -4428,7 +4669,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_std_state_merge", .type = FUNCTION_TYPE_STD_STATE_MERGE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateStdStateMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getStdFuncEnv, .initFunc = stdFunctionSetup, .processFunc = stdFunctionMerge, @@ -4438,7 +4690,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_avg_state", .type = FUNCTION_TYPE_AVG_STATE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateAvgState, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getAvgFuncEnv, .initFunc = avgFunctionSetup, .processFunc = avgFunction, @@ -4450,7 +4713,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_avg_state_merge", .type = FUNCTION_TYPE_AVG_STATE_MERGE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateAvgStateMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getAvgFuncEnv, .initFunc = avgFunctionSetup, .processFunc = avgFunctionMerge, @@ -4460,7 +4734,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_spread_state", .type = FUNCTION_TYPE_SPREAD_STATE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateSpreadState, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getSpreadFuncEnv, .initFunc = spreadFunctionSetup, .processFunc = spreadFunction, @@ -4472,7 +4757,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_spread_state_merge", .type = FUNCTION_TYPE_SPREAD_STATE_MERGE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateSpreadStateMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getSpreadFuncEnv, .initFunc = spreadFunctionSetup, .processFunc = spreadFunctionMerge, @@ -4483,7 +4779,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_FIRST_STATE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_TSMA_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateFirstLastState, + .parameters = {.minParamNum = 1, + .maxParamNum = -1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_VALUE_NODE_NOT_NULL, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = firstFunction, @@ -4496,7 +4803,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_FIRST_STATE_MERGE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_TSMA_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateFirstLastStateMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = firstFunctionMerge, @@ -4507,7 +4825,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_LAST_STATE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_TSMA_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateFirstLastState, + .parameters = {.minParamNum = 1, + .maxParamNum = -1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_VALUE_NODE_NOT_NULL, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = lastFunction, @@ -4520,16 +4849,39 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .type = FUNCTION_TYPE_LAST_STATE_MERGE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC | FUNC_MGT_TSMA_FUNC | FUNC_MGT_PRIMARY_KEY_FUNC, - .translateFunc = translateFirstLastStateMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, .processFunc = lastFunctionMerge, .finalizeFunc = firstLastPartialFinalize, }, - { .name = "_hyperloglog_state", + { + .name = "_hyperloglog_state", .type = FUNCTION_TYPE_HYPERLOGLOG_STATE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_COUNT_LIKE_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateHLLState, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getHLLFuncEnv, .initFunc = functionSetup, .processFunc = hllFunction, @@ -4541,7 +4893,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_hyperloglog_state_merge", .type = FUNCTION_TYPE_HYPERLOGLOG_STATE_MERGE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_COUNT_LIKE_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateHLLStateMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = getHLLFuncEnv, .initFunc = functionSetup, .processFunc = hllFunctionMerge, @@ -4551,7 +4914,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "md5", .type = FUNCTION_TYPE_MD5, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateMd5, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, + .translateFunc = translateOutVarchar, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = md5Function, @@ -4561,6 +4935,10 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_group_const_value", .type = FUNCTION_TYPE_GROUP_CONST_VALUE, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_KEEP_ORDER_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}}, .translateFunc = translateSelectValue, .getEnvFunc = getSelectivityFuncEnv, .initFunc = functionSetup, @@ -4571,7 +4949,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "stddev_pop", .type = FUNCTION_TYPE_STDDEV, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = getStdFuncEnv, .initFunc = stdFunctionSetup, .processFunc = stdFunction, @@ -4589,7 +4978,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "var_pop", .type = FUNCTION_TYPE_STDVAR, .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TSMA_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = getStdFuncEnv, .initFunc = stdFunctionSetup, .processFunc = stdFunction, @@ -4607,7 +5007,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "_stdvar_merge", .type = FUNCTION_TYPE_STDVAR_MERGE, .classification = FUNC_MGT_AGG_FUNC, - .translateFunc = translateStdMerge, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = getStdFuncEnv, .initFunc = stdFunctionSetup, .processFunc = stdFunctionMerge, @@ -4623,7 +5034,11 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "pi", .type = FUNCTION_TYPE_PI, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translatePi, + .parameters = {.minParamNum = 0, + .maxParamNum = 0, + .paramInfoPattern = 0, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = piFunction, @@ -4633,7 +5048,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "exp", .type = FUNCTION_TYPE_EXP, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = expFunction, @@ -4643,7 +5069,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "ln", .type = FUNCTION_TYPE_LN, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = lnFunction, @@ -4653,7 +5090,25 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "mod", .type = FUNCTION_TYPE_MOD, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateIn2NumOutDou, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = modFunction, @@ -4663,7 +5118,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "sign", .type = FUNCTION_TYPE_SIGN, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInOutNum, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE}}, + .translateFunc = translateOutNum, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = signFunction, @@ -4673,7 +5139,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "degrees", .type = FUNCTION_TYPE_DEGREES, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = degreesFunction, @@ -4683,7 +5160,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "radians", .type = FUNCTION_TYPE_RADIANS, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateInNumOutDou, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutDouble, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = radiansFunction, @@ -4693,7 +5181,25 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "truncate", .type = FUNCTION_TYPE_TRUNCATE, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateTrunc, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = truncFunction, @@ -4703,7 +5209,25 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "trunc", .type = FUNCTION_TYPE_TRUNCATE, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateTrunc, + .parameters = {.minParamNum = 1, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = truncFunction, @@ -4713,7 +5237,32 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "substring", .type = FUNCTION_TYPE_SUBSTR, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, - .translateFunc = translateSubstr, + .parameters = {.minParamNum = 2, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][2] = {.isLastParam = true, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = substrFunction, @@ -4723,7 +5272,32 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "substring_index", .type = FUNCTION_TYPE_SUBSTR_IDX, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, - .translateFunc = translateSubstrIdx, + .parameters = {.minParamNum = 3, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = false, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][2] = {.isLastParam = true, + .startParam = 3, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE}}, + .translateFunc = translateOutFirstIn, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = substrIdxFunction, @@ -4733,6 +5307,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "char", .type = FUNCTION_TYPE_CHAR, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = -1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = -1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE}}, .translateFunc = translateChar, .getEnvFunc = NULL, .initFunc = NULL, @@ -4743,6 +5328,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "ascii", .type = FUNCTION_TYPE_ASCII, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, .translateFunc = translateAscii, .getEnvFunc = NULL, .initFunc = NULL, @@ -4753,7 +5349,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "position", .type = FUNCTION_TYPE_POSITION, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, - .translateFunc = translatePosition, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateOutBigInt, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = positionFunction, @@ -4763,6 +5370,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "trim", .type = FUNCTION_TYPE_TRIM, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, + .parameters = {.minParamNum = 1, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE}}, .translateFunc = translateTrim, .getEnvFunc = NULL, .initFunc = NULL, @@ -4773,6 +5391,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "replace", .type = FUNCTION_TYPE_REPLACE, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, + .parameters = {.minParamNum = 3, + .maxParamNum = 3, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 3, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE}}, .translateFunc = translateReplace, .getEnvFunc = NULL, .initFunc = NULL, @@ -4783,6 +5412,24 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "repeat", .type = FUNCTION_TYPE_REPEAT, .classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC, + .parameters = {.minParamNum = 2, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE}}, .translateFunc = translateRepeat, .getEnvFunc = NULL, .initFunc = NULL, @@ -4793,7 +5440,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "weekday", .type = FUNCTION_TYPE_WEEKDAY, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateWeekday, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_UNIX_TS_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateAddPrecOutBigint, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = weekdayFunction, @@ -4803,7 +5461,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "dayofweek", .type = FUNCTION_TYPE_DAYOFWEEK, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateWeekday, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_UNIX_TS_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateAddPrecOutBigint, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = dayofweekFunction, @@ -4813,7 +5482,27 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "week", .type = FUNCTION_TYPE_WEEK, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateWeek, + .parameters = {.minParamNum = 1, + .maxParamNum = 2, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = false, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_UNIX_TS_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .inputParaInfo[0][1] = {.isLastParam = true, + .startParam = 2, + .endParam = 2, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_HAS_FIXED_VALUE, + .fixedValueSize = 8, + .fixedNumValue = {0, 1, 2, 3, 4, 5, 6, 7}}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateAddPrecOutBigint, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = weekFunction, @@ -4823,7 +5512,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "weekofyear", .type = FUNCTION_TYPE_WEEKOFYEAR, .classification = FUNC_MGT_SCALAR_FUNC, - .translateFunc = translateWeekofyear, + .parameters = {.minParamNum = 1, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_UNIX_TS_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_BIGINT_TYPE}}, + .translateFunc = translateAddPrecOutBigint, .getEnvFunc = NULL, .initFunc = NULL, .sprocessFunc = weekofyearFunction, @@ -4833,6 +5533,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .name = "rand", .type = FUNCTION_TYPE_RAND, .classification = FUNC_MGT_SCALAR_FUNC, + .parameters = {.minParamNum = 0, + .maxParamNum = 1, + .paramInfoPattern = 1, + .inputParaInfo[0][0] = {.isLastParam = true, + .startParam = 1, + .endParam = 1, + .validDataType = FUNC_PARAM_SUPPORT_INTEGER_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE, + .validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE, + .paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE, + .valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,}, + .outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_DOUBLE_TYPE}}, .translateFunc = translateRand, .getEnvFunc = NULL, .initFunc = NULL, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 983fccac1eb..acdac7cbc39 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -19,7 +19,7 @@ #include "functionResInfoInt.h" #include "query.h" #include "querynodes.h" -#include "tanal.h" +#include "tanalytics.h" #include "tcompare.h" #include "tdatablock.h" #include "tdigest.h" @@ -4266,6 +4266,10 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) { numOfElems = pInput->numOfRows; // since this is the primary timestamp, no need to exclude NULL values if (numOfElems == 0) { + // for stream + if (pCtx->end.key != INT64_MIN) { + pInfo->max = pCtx->end.key + 1; + } goto _elapsed_over; } @@ -6207,11 +6211,11 @@ int32_t twaFinalize(struct SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { pResInfo->numOfRes = 0; } else { if (pInfo->win.ekey == pInfo->win.skey) { - pInfo->dOutput = pInfo->p.val; + pInfo->dTwaRes = pInfo->p.val; } else if (pInfo->win.ekey == INT64_MAX || pInfo->win.skey == INT64_MIN) { // no data in timewindow - pInfo->dOutput = 0; + pInfo->dTwaRes = 0; } else { - pInfo->dOutput = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey); + pInfo->dTwaRes = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey); } pResInfo->numOfRes = 1; diff --git a/source/libs/function/src/detail/tminmax.c b/source/libs/function/src/detail/tminmax.c index 69c1a8a6ddc..87120960331 100644 --- a/source/libs/function/src/detail/tminmax.c +++ b/source/libs/function/src/detail/tminmax.c @@ -72,173 +72,6 @@ #define GET_INVOKE_INTRINSIC_THRESHOLD(_bits, _bytes) ((_bits) / ((_bytes) << 3u)) -#ifdef __AVX2__ -static void calculateRounds(int32_t numOfRows, int32_t bytes, int32_t* remainder, int32_t* rounds, int32_t* width) { - const int32_t bitWidth = 256; - - *width = (bitWidth >> 3u) / bytes; - *remainder = numOfRows % (*width); - *rounds = numOfRows / (*width); -} - -#define EXTRACT_MAX_VAL(_first, _sec, _width, _remain, _v) \ - __COMPARE_EXTRACT_MAX(0, (_width), (_v), (_first)) \ - __COMPARE_EXTRACT_MAX(0, (_remain), (_v), (_sec)) - -#define EXTRACT_MIN_VAL(_first, _sec, _width, _remain, _v) \ - __COMPARE_EXTRACT_MIN(0, (_width), (_v), (_first)) \ - __COMPARE_EXTRACT_MIN(0, (_remain), (_v), (_sec)) - -#define CMP_TYPE_MIN_MAX(type, cmp) \ - const type* p = pData; \ - __m256i initVal = _mm256_lddqu_si256((__m256i*)p); \ - p += width; \ - for (int32_t i = 1; i < (rounds); ++i) { \ - __m256i next = _mm256_lddqu_si256((__m256i*)p); \ - initVal = CMP_FUNC_##cmp##_##type(initVal, next); \ - p += width; \ - } \ - const type* q = (const type*)&initVal; \ - type* v = (type*)res; \ - EXTRACT_##cmp##_VAL(q, p, width, remain, *v) - -static void i8VectorCmpAVX2(const void* pData, int32_t numOfRows, bool isMinFunc, bool signVal, int64_t* res) { - const int8_t* p = pData; - - int32_t width, remain, rounds; - calculateRounds(numOfRows, sizeof(int8_t), &remain, &rounds, &width); - -#define CMP_FUNC_MIN_int8_t _mm256_min_epi8 -#define CMP_FUNC_MAX_int8_t _mm256_max_epi8 -#define CMP_FUNC_MIN_uint8_t _mm256_min_epu8 -#define CMP_FUNC_MAX_uint8_t _mm256_max_epu8 - - if (!isMinFunc) { // max function - if (signVal) { - CMP_TYPE_MIN_MAX(int8_t, MAX); - } else { - CMP_TYPE_MIN_MAX(uint8_t, MAX); - } - } else { // min function - if (signVal) { - CMP_TYPE_MIN_MAX(int8_t, MIN); - } else { - CMP_TYPE_MIN_MAX(uint8_t, MIN); - } - } -} - -static void i16VectorCmpAVX2(const void* pData, int32_t numOfRows, bool isMinFunc, bool signVal, int64_t* res) { - int32_t width, remain, rounds; - calculateRounds(numOfRows, sizeof(int16_t), &remain, &rounds, &width); - -#define CMP_FUNC_MIN_int16_t _mm256_min_epi16 -#define CMP_FUNC_MAX_int16_t _mm256_max_epi16 -#define CMP_FUNC_MIN_uint16_t _mm256_min_epu16 -#define CMP_FUNC_MAX_uint16_t _mm256_max_epu16 - if (!isMinFunc) { // max function - if (signVal) { - CMP_TYPE_MIN_MAX(int16_t, MAX); - } else { - CMP_TYPE_MIN_MAX(uint16_t, MAX); - } - } else { // min function - if (signVal) { - CMP_TYPE_MIN_MAX(int16_t, MIN); - } else { - CMP_TYPE_MIN_MAX(uint16_t, MIN); - } - } -} - -static void i32VectorCmpAVX2(const void* pData, int32_t numOfRows, bool isMinFunc, bool signVal, int64_t* res) { - int32_t width, remain, rounds; - calculateRounds(numOfRows, sizeof(int32_t), &remain, &rounds, &width); - -#define CMP_FUNC_MIN_int32_t _mm256_min_epi32 -#define CMP_FUNC_MAX_int32_t _mm256_max_epi32 -#define CMP_FUNC_MIN_uint32_t _mm256_min_epu32 -#define CMP_FUNC_MAX_uint32_t _mm256_max_epu32 - if (!isMinFunc) { // max function - if (signVal) { - CMP_TYPE_MIN_MAX(int32_t, MAX); - } else { - CMP_TYPE_MIN_MAX(uint32_t, MAX); - } - } else { // min function - if (signVal) { - CMP_TYPE_MIN_MAX(int32_t, MIN); - } else { - CMP_TYPE_MIN_MAX(uint32_t, MIN); - } - } -} - -static void floatVectorCmpAVX2(const float* pData, int32_t numOfRows, bool isMinFunc, float* res) { - const float* p = pData; - - int32_t width, remain, rounds; - calculateRounds(numOfRows, sizeof(float), &remain, &rounds, &width); - - __m256 next; - __m256 initVal = _mm256_loadu_ps(p); - p += width; - - if (!isMinFunc) { // max function - for (int32_t i = 1; i < rounds; ++i) { - next = _mm256_loadu_ps(p); - initVal = _mm256_max_ps(initVal, next); - p += width; - } - - const float* q = (const float*)&initVal; - EXTRACT_MAX_VAL(q, p, width, remain, *res) - } else { // min function - for (int32_t i = 1; i < rounds; ++i) { - next = _mm256_loadu_ps(p); - initVal = _mm256_min_ps(initVal, next); - p += width; - } - - const float* q = (const float*)&initVal; - EXTRACT_MIN_VAL(q, p, width, remain, *res) - } -} - -static void doubleVectorCmpAVX2(const double* pData, int32_t numOfRows, bool isMinFunc, double* res) { - const double* p = pData; - - int32_t width, remain, rounds; - calculateRounds(numOfRows, sizeof(double), &remain, &rounds, &width); - - __m256d next; - __m256d initVal = _mm256_loadu_pd(p); - p += width; - - if (!isMinFunc) { // max function - for (int32_t i = 1; i < rounds; ++i) { - next = _mm256_loadu_pd(p); - initVal = _mm256_max_pd(initVal, next); - p += width; - } - - // let sum up the final results - const double* q = (const double*)&initVal; - EXTRACT_MAX_VAL(q, p, width, remain, *res) - } else { // min function - for (int32_t i = 1; i < rounds; ++i) { - next = _mm256_loadu_pd(p); - initVal = _mm256_min_pd(initVal, next); - p += width; - } - - // let sum up the final results - const double* q = (const double*)&initVal; - EXTRACT_MIN_VAL(q, p, width, remain, *res) - } -} -#endif - static int32_t findFirstValPosition(const SColumnInfoData* pCol, int32_t start, int32_t numOfRows, bool isStr) { int32_t i = start; @@ -255,31 +88,31 @@ static void handleInt8Col(const void* data, int32_t start, int32_t numOfRows, SM pBuf->v = ((const int8_t*)data)[start]; } -#ifdef __AVX2__ - if (tsAVX2Supported && tsSIMDEnable && numOfRows * sizeof(int8_t) >= sizeof(__m256i)) { - i8VectorCmpAVX2(data + start * sizeof(int8_t), numOfRows, isMinFunc, signVal, &pBuf->v); - } else { -#else - if (true) { -#endif - if (signVal) { - const int8_t* p = (const int8_t*)data; - int8_t* v = (int8_t*)&pBuf->v; - - if (isMinFunc) { - __COMPARE_EXTRACT_MIN(start, start + numOfRows, *v, p); - } else { - __COMPARE_EXTRACT_MAX(start, start + numOfRows, *v, p); - } + if (tsAVX2Supported && tsSIMDEnable && numOfRows * sizeof(int8_t) >= M256_BYTES) { + int32_t code = i8VectorCmpAVX2(((char*)data) + start * sizeof(int8_t), numOfRows, isMinFunc, signVal, &pBuf->v); + if (code == TSDB_CODE_SUCCESS) { + pBuf->assign = true; + return; + } + } + + if (signVal) { + const int8_t* p = (const int8_t*)data; + int8_t* v = (int8_t*)&pBuf->v; + + if (isMinFunc) { + __COMPARE_EXTRACT_MIN(start, start + numOfRows, *v, p); } else { - const uint8_t* p = (const uint8_t*)data; - uint8_t* v = (uint8_t*)&pBuf->v; + __COMPARE_EXTRACT_MAX(start, start + numOfRows, *v, p); + } + } else { + const uint8_t* p = (const uint8_t*)data; + uint8_t* v = (uint8_t*)&pBuf->v; - if (isMinFunc) { - __COMPARE_EXTRACT_MIN(start, start + numOfRows, *v, p); - } else { - __COMPARE_EXTRACT_MAX(start, start + numOfRows, *v, p); - } + if (isMinFunc) { + __COMPARE_EXTRACT_MIN(start, start + numOfRows, *v, p); + } else { + __COMPARE_EXTRACT_MAX(start, start + numOfRows, *v, p); } } @@ -292,31 +125,31 @@ static void handleInt16Col(const void* data, int32_t start, int32_t numOfRows, S pBuf->v = ((const int16_t*)data)[start]; } -#ifdef __AVX2__ - if (tsAVX2Supported && tsSIMDEnable && numOfRows * sizeof(int16_t) >= sizeof(__m256i)) { - i16VectorCmpAVX2(data + start * sizeof(int16_t), numOfRows, isMinFunc, signVal, &pBuf->v); - } else { -#else - if (true) { -#endif - if (signVal) { - const int16_t* p = (const int16_t*)data; - int16_t* v = (int16_t*)&pBuf->v; - - if (isMinFunc) { - __COMPARE_EXTRACT_MIN(start, start + numOfRows, *v, p); - } else { - __COMPARE_EXTRACT_MAX(start, start + numOfRows, *v, p); - } + if (tsAVX2Supported && tsSIMDEnable && numOfRows * sizeof(int16_t) >= M256_BYTES) { + int32_t code = i16VectorCmpAVX2(((char*)data) + start * sizeof(int16_t), numOfRows, isMinFunc, signVal, &pBuf->v); + if (code == TSDB_CODE_SUCCESS) { + pBuf->assign = true; + return; + } + } + + if (signVal) { + const int16_t* p = (const int16_t*)data; + int16_t* v = (int16_t*)&pBuf->v; + + if (isMinFunc) { + __COMPARE_EXTRACT_MIN(start, start + numOfRows, *v, p); } else { - const uint16_t* p = (const uint16_t*)data; - uint16_t* v = (uint16_t*)&pBuf->v; + __COMPARE_EXTRACT_MAX(start, start + numOfRows, *v, p); + } + } else { + const uint16_t* p = (const uint16_t*)data; + uint16_t* v = (uint16_t*)&pBuf->v; - if (isMinFunc) { - __COMPARE_EXTRACT_MIN(start, start + numOfRows, *v, p); - } else { - __COMPARE_EXTRACT_MAX(start, start + numOfRows, *v, p); - } + if (isMinFunc) { + __COMPARE_EXTRACT_MIN(start, start + numOfRows, *v, p); + } else { + __COMPARE_EXTRACT_MAX(start, start + numOfRows, *v, p); } } @@ -329,31 +162,31 @@ static void handleInt32Col(const void* data, int32_t start, int32_t numOfRows, S pBuf->v = ((const int32_t*)data)[start]; } -#ifdef __AVX2__ - if (tsAVX2Supported && tsSIMDEnable && numOfRows * sizeof(int32_t) >= sizeof(__m256i)) { - i32VectorCmpAVX2(data + start * sizeof(int32_t), numOfRows, isMinFunc, signVal, &pBuf->v); - } else { -#else - if (true) { -#endif - if (signVal) { - const int32_t* p = (const int32_t*)data; - int32_t* v = (int32_t*)&pBuf->v; - - if (isMinFunc) { - __COMPARE_EXTRACT_MIN(start, start + numOfRows, *v, p); - } else { - __COMPARE_EXTRACT_MAX(start, start + numOfRows, *v, p); - } + if (tsAVX2Supported && tsSIMDEnable && numOfRows * sizeof(int32_t) >= M256_BYTES) { + int32_t code = i32VectorCmpAVX2(((char*)data) + start * sizeof(int32_t), numOfRows, isMinFunc, signVal, &pBuf->v); + if (code == TSDB_CODE_SUCCESS) { + pBuf->assign = true; + return; + } + } + + if (signVal) { + const int32_t* p = (const int32_t*)data; + int32_t* v = (int32_t*)&pBuf->v; + + if (isMinFunc) { + __COMPARE_EXTRACT_MIN(start, start + numOfRows, *v, p); } else { - const uint32_t* p = (const uint32_t*)data; - uint32_t* v = (uint32_t*)&pBuf->v; + __COMPARE_EXTRACT_MAX(start, start + numOfRows, *v, p); + } + } else { + const uint32_t* p = (const uint32_t*)data; + uint32_t* v = (uint32_t*)&pBuf->v; - if (isMinFunc) { - __COMPARE_EXTRACT_MIN(start, start + numOfRows, *v, p); - } else { - __COMPARE_EXTRACT_MAX(start, start + numOfRows, *v, p); - } + if (isMinFunc) { + __COMPARE_EXTRACT_MIN(start, start + numOfRows, *v, p); + } else { + __COMPARE_EXTRACT_MAX(start, start + numOfRows, *v, p); } } @@ -397,20 +230,20 @@ static void handleFloatCol(SColumnInfoData* pCol, int32_t start, int32_t numOfRo *val = pData[start]; } -#ifdef __AVX2__ - if (tsAVXSupported && tsSIMDEnable && numOfRows * sizeof(float) >= sizeof(__m256i)) { - floatVectorCmpAVX2(pData + start, numOfRows, isMinFunc, val); - } else { -#else - if (true) { -#endif - if (isMinFunc) { // min - __COMPARE_EXTRACT_MIN(start, start + numOfRows, *val, pData); - } else { // max - __COMPARE_EXTRACT_MAX(start, start + numOfRows, *val, pData); + if (tsAVX2Supported && tsSIMDEnable && numOfRows * sizeof(float) >= M256_BYTES) { + int32_t code = floatVectorCmpAVX2(pData + start, numOfRows, isMinFunc, val); + if (code == TSDB_CODE_SUCCESS) { + pBuf->assign = true; + return; } } + if (isMinFunc) { // min + __COMPARE_EXTRACT_MIN(start, start + numOfRows, *val, pData); + } else { // max + __COMPARE_EXTRACT_MAX(start, start + numOfRows, *val, pData); + } + pBuf->assign = true; } @@ -422,20 +255,20 @@ static void handleDoubleCol(SColumnInfoData* pCol, int32_t start, int32_t numOfR *val = pData[start]; } -#ifdef __AVX2__ - if (tsAVXSupported && tsSIMDEnable && numOfRows * sizeof(double) >= sizeof(__m256i)) { - doubleVectorCmpAVX2(pData + start, numOfRows, isMinFunc, val); - } else { -#else - if (true) { -#endif - if (isMinFunc) { // min - __COMPARE_EXTRACT_MIN(start, start + numOfRows, *val, pData); - } else { // max - __COMPARE_EXTRACT_MAX(start, start + numOfRows, *val, pData); + if (tsAVX2Supported && tsSIMDEnable && numOfRows * sizeof(double) >= M256_BYTES) { + int32_t code = doubleVectorCmpAVX2(pData + start, numOfRows, isMinFunc, val); + if (code == TSDB_CODE_SUCCESS) { + pBuf->assign = true; + return; } } + if (isMinFunc) { // min + __COMPARE_EXTRACT_MIN(start, start + numOfRows, *val, pData); + } else { // max + __COMPARE_EXTRACT_MAX(start, start + numOfRows, *val, pData); + } + pBuf->assign = true; } diff --git a/source/libs/function/src/detail/tminmaxavx.c b/source/libs/function/src/detail/tminmaxavx.c new file mode 100644 index 00000000000..8fe6cc5448f --- /dev/null +++ b/source/libs/function/src/detail/tminmaxavx.c @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "builtinsimpl.h" + +#ifdef __AVX2__ +static void calculateRounds(int32_t numOfRows, int32_t bytes, int32_t* remainder, int32_t* rounds, int32_t* width) { + const int32_t bitWidth = 256; + + *width = (bitWidth >> 3u) / bytes; + *remainder = numOfRows % (*width); + *rounds = numOfRows / (*width); +} + +#define __COMPARE_EXTRACT_MIN(start, end, val, _data) \ + for (int32_t i = (start); i < (end); ++i) { \ + if ((val) > (_data)[i]) { \ + (val) = (_data)[i]; \ + } \ + } + +#define __COMPARE_EXTRACT_MAX(start, end, val, _data) \ + for (int32_t i = (start); i < (end); ++i) { \ + if ((val) < (_data)[i]) { \ + (val) = (_data)[i]; \ + } \ + } + +#define EXTRACT_MAX_VAL(_first, _sec, _width, _remain, _v) \ + __COMPARE_EXTRACT_MAX(0, (_width), (_v), (_first)) \ + __COMPARE_EXTRACT_MAX(0, (_remain), (_v), (_sec)) + +#define EXTRACT_MIN_VAL(_first, _sec, _width, _remain, _v) \ + __COMPARE_EXTRACT_MIN(0, (_width), (_v), (_first)) \ + __COMPARE_EXTRACT_MIN(0, (_remain), (_v), (_sec)) + +#define CMP_TYPE_MIN_MAX(type, cmp) \ + const type* p = pData; \ + __m256i initVal = _mm256_lddqu_si256((__m256i*)p); \ + p += width; \ + for (int32_t i = 1; i < (rounds); ++i) { \ + __m256i next = _mm256_lddqu_si256((__m256i*)p); \ + initVal = CMP_FUNC_##cmp##_##type(initVal, next); \ + p += width; \ + } \ + const type* q = (const type*)&initVal; \ + type* v = (type*)res; \ + EXTRACT_##cmp##_VAL(q, p, width, remain, *v) +#endif + +int32_t i8VectorCmpAVX2(const void* pData, int32_t numOfRows, bool isMinFunc, bool signVal, int64_t* res) { +#ifdef __AVX2__ + const int8_t* p = pData; + + int32_t width, remain, rounds; + calculateRounds(numOfRows, sizeof(int8_t), &remain, &rounds, &width); + +#define CMP_FUNC_MIN_int8_t _mm256_min_epi8 +#define CMP_FUNC_MAX_int8_t _mm256_max_epi8 +#define CMP_FUNC_MIN_uint8_t _mm256_min_epu8 +#define CMP_FUNC_MAX_uint8_t _mm256_max_epu8 + + if (!isMinFunc) { // max function + if (signVal) { + CMP_TYPE_MIN_MAX(int8_t, MAX); + } else { + CMP_TYPE_MIN_MAX(uint8_t, MAX); + } + } else { // min function + if (signVal) { + CMP_TYPE_MIN_MAX(int8_t, MIN); + } else { + CMP_TYPE_MIN_MAX(uint8_t, MIN); + } + } + return TSDB_CODE_SUCCESS; +#else + uError("unable run %s without avx2 instructions", __func__); + return TSDB_CODE_OPS_NOT_SUPPORT; +#endif +} + +int32_t i16VectorCmpAVX2(const void* pData, int32_t numOfRows, bool isMinFunc, bool signVal, int64_t* res) { +#ifdef __AVX2__ + int32_t width, remain, rounds; + calculateRounds(numOfRows, sizeof(int16_t), &remain, &rounds, &width); + +#define CMP_FUNC_MIN_int16_t _mm256_min_epi16 +#define CMP_FUNC_MAX_int16_t _mm256_max_epi16 +#define CMP_FUNC_MIN_uint16_t _mm256_min_epu16 +#define CMP_FUNC_MAX_uint16_t _mm256_max_epu16 + if (!isMinFunc) { // max function + if (signVal) { + CMP_TYPE_MIN_MAX(int16_t, MAX); + } else { + CMP_TYPE_MIN_MAX(uint16_t, MAX); + } + } else { // min function + if (signVal) { + CMP_TYPE_MIN_MAX(int16_t, MIN); + } else { + CMP_TYPE_MIN_MAX(uint16_t, MIN); + } + } + return TSDB_CODE_SUCCESS; +#else + uError("unable run %s without avx2 instructions", __func__); + return TSDB_CODE_OPS_NOT_SUPPORT; +#endif +} + +int32_t i32VectorCmpAVX2(const void* pData, int32_t numOfRows, bool isMinFunc, bool signVal, int64_t* res) { +#ifdef __AVX2__ + int32_t width, remain, rounds; + calculateRounds(numOfRows, sizeof(int32_t), &remain, &rounds, &width); + +#define CMP_FUNC_MIN_int32_t _mm256_min_epi32 +#define CMP_FUNC_MAX_int32_t _mm256_max_epi32 +#define CMP_FUNC_MIN_uint32_t _mm256_min_epu32 +#define CMP_FUNC_MAX_uint32_t _mm256_max_epu32 + if (!isMinFunc) { // max function + if (signVal) { + CMP_TYPE_MIN_MAX(int32_t, MAX); + } else { + CMP_TYPE_MIN_MAX(uint32_t, MAX); + } + } else { // min function + if (signVal) { + CMP_TYPE_MIN_MAX(int32_t, MIN); + } else { + CMP_TYPE_MIN_MAX(uint32_t, MIN); + } + } + return TSDB_CODE_SUCCESS; +#else + uError("unable run %s without avx2 instructions", __func__); + return TSDB_CODE_OPS_NOT_SUPPORT; +#endif +} + +int32_t floatVectorCmpAVX2(const float* pData, int32_t numOfRows, bool isMinFunc, float* res) { +#ifdef __AVX2__ + const float* p = pData; + + int32_t width, remain, rounds; + calculateRounds(numOfRows, sizeof(float), &remain, &rounds, &width); + + __m256 next; + __m256 initVal = _mm256_loadu_ps(p); + p += width; + + if (!isMinFunc) { // max function + for (int32_t i = 1; i < rounds; ++i) { + next = _mm256_loadu_ps(p); + initVal = _mm256_max_ps(initVal, next); + p += width; + } + + const float* q = (const float*)&initVal; + EXTRACT_MAX_VAL(q, p, width, remain, *res) + } else { // min function + for (int32_t i = 1; i < rounds; ++i) { + next = _mm256_loadu_ps(p); + initVal = _mm256_min_ps(initVal, next); + p += width; + } + + const float* q = (const float*)&initVal; + EXTRACT_MIN_VAL(q, p, width, remain, *res) + } + return TSDB_CODE_SUCCESS; +#else + uError("unable run %s without avx2 instructions", __func__); + return TSDB_CODE_OPS_NOT_SUPPORT; +#endif +} + +int32_t doubleVectorCmpAVX2(const double* pData, int32_t numOfRows, bool isMinFunc, double* res) { +#ifdef __AVX2__ + const double* p = pData; + + int32_t width, remain, rounds; + calculateRounds(numOfRows, sizeof(double), &remain, &rounds, &width); + + __m256d next; + __m256d initVal = _mm256_loadu_pd(p); + p += width; + + if (!isMinFunc) { // max function + for (int32_t i = 1; i < rounds; ++i) { + next = _mm256_loadu_pd(p); + initVal = _mm256_max_pd(initVal, next); + p += width; + } + + // let sum up the final results + const double* q = (const double*)&initVal; + EXTRACT_MAX_VAL(q, p, width, remain, *res) + } else { // min function + for (int32_t i = 1; i < rounds; ++i) { + next = _mm256_loadu_pd(p); + initVal = _mm256_min_pd(initVal, next); + p += width; + } + + // let sum up the final results + const double* q = (const double*)&initVal; + EXTRACT_MIN_VAL(q, p, width, remain, *res) + } + return TSDB_CODE_SUCCESS; +#else + uError("unable run %s without avx2 instructions", __func__); + return TSDB_CODE_OPS_NOT_SUPPORT; +#endif +} diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index aaa66441eed..a406b23c59b 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -412,6 +412,27 @@ int32_t createFunction(const char* pName, SNodeList* pParameterList, SFunctionNo return code; } +int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFunc, SNodeList* pParameterList, SFunctionNode** ppFunc) { + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)ppFunc); + if (NULL == *ppFunc) { + return code; + } + + (*ppFunc)->hasPk = pSrcFunc->hasPk; + (*ppFunc)->pkBytes = pSrcFunc->pkBytes; + + (void)snprintf((*ppFunc)->functionName, sizeof((*ppFunc)->functionName), "%s", pName); + (*ppFunc)->pParameterList = pParameterList; + code = getFuncInfo((*ppFunc)); + if (TSDB_CODE_SUCCESS != code) { + (*ppFunc)->pParameterList = NULL; + nodesDestroyNode((SNode*)*ppFunc); + *ppFunc = NULL; + return code; + } + return code; +} + static int32_t createColumnByFunc(const SFunctionNode* pFunc, SColumnNode** ppCol) { int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)ppCol); if (NULL == *ppCol) { @@ -438,7 +459,8 @@ static int32_t createPartialFunction(const SFunctionNode* pSrcFunc, SFunctionNod if (NULL == pParameterList) { return code; } - code = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pPartialFunc, pParameterList,pPartialFunc ); + code = + createFunctionWithSrcFunc(funcMgtBuiltins[pSrcFunc->funcId].pPartialFunc, pSrcFunc, pParameterList, pPartialFunc); if (TSDB_CODE_SUCCESS != code) { nodesDestroyList(pParameterList); return code; @@ -452,8 +474,6 @@ static int32_t createPartialFunction(const SFunctionNode* pSrcFunc, SFunctionNod return TSDB_CODE_FAILED; } tstrncpy((*pPartialFunc)->node.aliasName, name, TSDB_COL_NAME_LEN); - (*pPartialFunc)->hasPk = pSrcFunc->hasPk; - (*pPartialFunc)->pkBytes = pSrcFunc->pkBytes; return TSDB_CODE_SUCCESS; } @@ -479,9 +499,9 @@ static int32_t createMidFunction(const SFunctionNode* pSrcFunc, const SFunctionN int32_t code = createMergeFuncPara(pSrcFunc, pPartialFunc, &pParameterList); if (TSDB_CODE_SUCCESS == code) { if(funcMgtBuiltins[pSrcFunc->funcId].pMiddleFunc != NULL){ - code = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pMiddleFunc, pParameterList, &pFunc); + code = createFunctionWithSrcFunc(funcMgtBuiltins[pSrcFunc->funcId].pMiddleFunc, pSrcFunc, pParameterList, &pFunc); }else{ - code = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pParameterList, &pFunc); + code = createFunctionWithSrcFunc(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pSrcFunc, pParameterList, &pFunc); } } if (TSDB_CODE_SUCCESS == code) { @@ -493,8 +513,6 @@ static int32_t createMidFunction(const SFunctionNode* pSrcFunc, const SFunctionN } else { nodesDestroyList(pParameterList); } - (*pMidFunc)->hasPk = pPartialFunc->hasPk; - (*pMidFunc)->pkBytes = pPartialFunc->pkBytes; return code; } @@ -505,7 +523,7 @@ static int32_t createMergeFunction(const SFunctionNode* pSrcFunc, const SFunctio int32_t code = createMergeFuncPara(pSrcFunc, pPartialFunc, &pParameterList); if (TSDB_CODE_SUCCESS == code) { - code = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pParameterList, &pFunc); + code = createFunctionWithSrcFunc(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pSrcFunc, pParameterList, &pFunc); } if (TSDB_CODE_SUCCESS == code) { pFunc->hasOriginalFunc = true; @@ -522,8 +540,6 @@ static int32_t createMergeFunction(const SFunctionNode* pSrcFunc, const SFunctio } else { nodesDestroyList(pParameterList); } - (*pMergeFunc)->hasPk = pPartialFunc->hasPk; - (*pMergeFunc)->pkBytes = pPartialFunc->pkBytes; return code; } diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 429ab52a8d4..78c16ec7cb8 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -224,7 +224,7 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value, int32_t *index *index = -1; - if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal || isnan(v)) { + if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal || isnan(v) || isinf(v)) { return TSDB_CODE_SUCCESS; } @@ -232,6 +232,8 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value, int32_t *index double span = pBucket->range.dMaxVal - pBucket->range.dMinVal; if (fabs(span) < DBL_EPSILON) { *index = 0; + } else if (isinf(span)) { + *index = -1; } else { double slotSpan = span / pBucket->numOfSlots; *index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan); diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 6bc9f84d6d8..a8198a804db 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -40,7 +40,7 @@ typedef struct SUdfdData { #ifdef WINDOWS HANDLE jobHandle; #endif - int spawnErr; + int32_t spawnErr; uv_pipe_t ctrlPipe; uv_async_t stopAsync; int32_t stopCalled; @@ -51,15 +51,17 @@ typedef struct SUdfdData { SUdfdData udfdGlobal = {0}; int32_t udfStartUdfd(int32_t startDnodeId); -void udfStopUdfd(); +void udfStopUdfd(); + +extern char **environ; static int32_t udfSpawnUdfd(SUdfdData *pData); -void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal); +void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int32_t termSignal); static void udfUdfdCloseWalkCb(uv_handle_t *handle, void *arg); static void udfUdfdStopAsyncCb(uv_async_t *async); static void udfWatchUdfd(void *args); -void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal) { +void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int32_t termSignal) { fnInfo("udfd process exited with status %" PRId64 ", signal %d", exitStatus, termSignal); SUdfdData *pData = process->data; if (exitStatus == 0 && termSignal == 0 || atomic_load_32(&pData->stopCalled)) { @@ -67,7 +69,7 @@ void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal) { } else { fnInfo("udfd process restart"); int32_t code = udfSpawnUdfd(pData); - if(code != 0) { + if (code != 0) { fnError("udfd process restart failed with code:%d", code); } } @@ -75,6 +77,8 @@ void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal) { static int32_t udfSpawnUdfd(SUdfdData *pData) { fnInfo("start to init udfd"); + + int32_t err = 0; uv_process_options_t options = {0}; char path[PATH_MAX] = {0}; @@ -126,17 +130,17 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) { char thrdPoolSizeEnvItem[32] = {0}; snprintf(dnodeIdEnvItem, 32, "%s=%d", "DNODE_ID", pData->dnodeId); - float numCpuCores = 4; + float numCpuCores = 4; int32_t code = taosGetCpuCores(&numCpuCores, false); - if(code != 0) { - fnError("failed to get cpu cores, code:%d", code); + if (code != 0) { + fnError("failed to get cpu cores, code:0x%x", code); } numCpuCores = TMAX(numCpuCores, 2); - snprintf(thrdPoolSizeEnvItem, 32, "%s=%d", "UV_THREADPOOL_SIZE", (int)numCpuCores * 2); + snprintf(thrdPoolSizeEnvItem, 32, "%s=%d", "UV_THREADPOOL_SIZE", (int32_t)numCpuCores * 2); - char pathTaosdLdLib[512] = {0}; - size_t taosdLdLibPathLen = sizeof(pathTaosdLdLib); - int ret = uv_os_getenv("LD_LIBRARY_PATH", pathTaosdLdLib, &taosdLdLibPathLen); + char pathTaosdLdLib[512] = {0}; + size_t taosdLdLibPathLen = sizeof(pathTaosdLdLib); + int32_t ret = uv_os_getenv("LD_LIBRARY_PATH", pathTaosdLdLib, &taosdLdLibPathLen); if (ret != UV_ENOBUFS) { taosdLdLibPathLen = strlen(pathTaosdLdLib); } @@ -158,8 +162,8 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) { char *taosFqdnEnvItem = NULL; char *taosFqdn = getenv("TAOS_FQDN"); if (taosFqdn != NULL) { - int subLen = strlen(taosFqdn); - int len = strlen("TAOS_FQDN=") + subLen + 1; + int32_t subLen = strlen(taosFqdn); + int32_t len = strlen("TAOS_FQDN=") + subLen + 1; taosFqdnEnvItem = taosMemoryMalloc(len); if (taosFqdnEnvItem != NULL) { tstrncpy(taosFqdnEnvItem, "TAOS_FQDN=", len); @@ -171,11 +175,53 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) { } } - char *envUdfd[] = {dnodeIdEnvItem, thrdPoolSizeEnvItem, ldLibPathEnvItem,taosFqdnEnvItem, NULL}; + char *envUdfd[] = {dnodeIdEnvItem, thrdPoolSizeEnvItem, ldLibPathEnvItem, taosFqdnEnvItem, NULL}; + + char **envUdfdWithPEnv = NULL; + if (environ != NULL) { + int32_t lenEnvUdfd = ARRAY_SIZE(envUdfd); + int32_t numEnviron = 0; + while (environ[numEnviron] != NULL) { + numEnviron++; + } + + envUdfdWithPEnv = (char **)taosMemoryCalloc(numEnviron + lenEnvUdfd, sizeof(char *)); + if (envUdfdWithPEnv == NULL) { + err = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } + + for (int32_t i = 0; i < numEnviron; i++) { + int32_t len = strlen(environ[i]) + 1; + envUdfdWithPEnv[i] = (char *)taosMemoryCalloc(len, 1); + if (envUdfdWithPEnv[i] == NULL) { + err = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } + + tstrncpy(envUdfdWithPEnv[i], environ[i], len); + } - options.env = envUdfd; + for (int32_t i = 0; i < lenEnvUdfd; i++) { + if (envUdfd[i] != NULL) { + int32_t len = strlen(envUdfd[i]) + 1; + envUdfdWithPEnv[numEnviron + i] = (char *)taosMemoryCalloc(len, 1); + if (envUdfdWithPEnv[numEnviron + i] == NULL) { + err = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } + + tstrncpy(envUdfdWithPEnv[numEnviron + i], envUdfd[i], len); + } + } + envUdfdWithPEnv[numEnviron + lenEnvUdfd - 1] = NULL; + + options.env = envUdfdWithPEnv; + } else { + options.env = envUdfd; + } - int err = uv_spawn(&pData->loop, &pData->process, &options); + err = uv_spawn(&pData->loop, &pData->process, &options); pData->process.data = (void *)pData; #ifdef WINDOWS @@ -202,7 +248,21 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) { } else { fnInfo("udfd is initialized"); } - if(taosFqdnEnvItem) taosMemoryFree(taosFqdnEnvItem); + +_OVER: + if (taosFqdnEnvItem) { + taosMemoryFree(taosFqdnEnvItem); + } + + if (envUdfdWithPEnv != NULL) { + int32_t i = 0; + while (envUdfdWithPEnv[i] != NULL) { + taosMemoryFree(envUdfdWithPEnv[i]); + i++; + } + taosMemoryFree(envUdfdWithPEnv); + } + return err; } @@ -225,13 +285,13 @@ static void udfWatchUdfd(void *args) { TAOS_UV_CHECK_ERRNO(udfSpawnUdfd(pData)); atomic_store_32(&pData->spawnErr, 0); (void)uv_barrier_wait(&pData->barrier); - int num = uv_run(&pData->loop, UV_RUN_DEFAULT); + int32_t num = uv_run(&pData->loop, UV_RUN_DEFAULT); fnInfo("udfd loop exit with %d active handles, line:%d", num, __LINE__); uv_walk(&pData->loop, udfUdfdCloseWalkCb, NULL); num = uv_run(&pData->loop, UV_RUN_DEFAULT); fnInfo("udfd loop exit with %d active handles, line:%d", num, __LINE__); - if(uv_loop_close(&pData->loop) != 0) { + if (uv_loop_close(&pData->loop) != 0) { fnError("udfd loop close failed, lino:%d", __LINE__); } return; @@ -240,7 +300,7 @@ static void udfWatchUdfd(void *args) { if (terrno != 0) { (void)uv_barrier_wait(&pData->barrier); atomic_store_32(&pData->spawnErr, terrno); - if(uv_loop_close(&pData->loop) != 0) { + if (uv_loop_close(&pData->loop) != 0) { fnError("udfd loop close failed, lino:%d", __LINE__); } fnError("udfd thread exit with code:%d lino:%d", terrno, terrln); @@ -271,10 +331,10 @@ int32_t udfStartUdfd(int32_t startDnodeId) { int32_t err = atomic_load_32(&pData->spawnErr); if (err != 0) { uv_barrier_destroy(&pData->barrier); - if(uv_async_send(&pData->stopAsync) != 0) { + if (uv_async_send(&pData->stopAsync) != 0) { fnError("start udfd: failed to send stop async"); } - if(uv_thread_join(&pData->thread)!= 0) { + if (uv_thread_join(&pData->thread) != 0) { fnError("start udfd: failed to join udfd thread"); } pData->needCleanUp = false; @@ -299,10 +359,10 @@ void udfStopUdfd() { atomic_store_32(&pData->stopCalled, 1); pData->needCleanUp = false; uv_barrier_destroy(&pData->barrier); - if(uv_async_send(&pData->stopAsync) != 0) { + if (uv_async_send(&pData->stopAsync) != 0) { fnError("stop udfd: failed to send stop async"); } - if(uv_thread_join(&pData->thread) != 0) { + if (uv_thread_join(&pData->thread) != 0) { fnError("stop udfd: failed to join udfd thread"); } @@ -341,7 +401,7 @@ typedef void *QUEUE[2]; #define QUEUE_NEXT_PREV(q) (QUEUE_PREV(QUEUE_NEXT(q))) /* Public macros. */ -#define QUEUE_DATA(ptr, type, field) ((type *)((char *)(ptr)-offsetof(type, field))) +#define QUEUE_DATA(ptr, type, field) ((type *)((char *)(ptr) - offsetof(type, field))) /* Important note: mutating the list while QUEUE_FOREACH is * iterating over its elements results in undefined behavior. @@ -434,8 +494,8 @@ typedef struct SUdfcProxy { QUEUE uvProcTaskQueue; uv_mutex_t udfStubsMutex; - SArray *udfStubs; // SUdfcFuncStub - SArray *expiredUdfStubs; //SUdfcFuncStub + SArray *udfStubs; // SUdfcFuncStub + SArray *expiredUdfStubs; // SUdfcFuncStub uv_mutex_t udfcUvMutex; int8_t initialized; @@ -458,7 +518,7 @@ typedef struct SUdfcUvSession { typedef struct SClientUvTaskNode { SUdfcProxy *udfc; int8_t type; - int errCode; + int32_t errCode; uv_pipe_t *pipe; @@ -516,7 +576,7 @@ enum { UDFC_STATE_STOPPING, // stopping after udfcClose }; -void getUdfdPipeName(char *pipeName, int32_t size); +void getUdfdPipeName(char *pipeName, int32_t size); int32_t encodeUdfSetupRequest(void **buf, const SUdfSetupRequest *setup); void *decodeUdfSetupRequest(const void *buf, SUdfSetupRequest *request); int32_t encodeUdfInterBuf(void **buf, const SUdfInterBuf *state); @@ -801,12 +861,12 @@ void *decodeUdfResponse(const void *buf, SUdfResponse *rsp) { buf = decodeUdfTeardownResponse(buf, &rsp->teardownRsp); break; default: - rsp->code = TSDB_CODE_UDF_INTERNAL_ERROR; + rsp->code = TSDB_CODE_UDF_INTERNAL_ERROR; fnError("decode udf response, invalid udf response type %d", rsp->type); break; } - if(buf == NULL) { - rsp->code = terrno; + if (buf == NULL) { + rsp->code = terrno; fnError("decode udf response failed, code:0x%x", rsp->code); } return (void *)buf; @@ -847,12 +907,12 @@ int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlo udfBlock->numOfRows = block->info.rows; udfBlock->numOfCols = taosArrayGetSize(block->pDataBlock); udfBlock->udfCols = taosMemoryCalloc(taosArrayGetSize(block->pDataBlock), sizeof(SUdfColumn *)); - if((udfBlock->udfCols) == NULL) { + if ((udfBlock->udfCols) == NULL) { return terrno; } for (int32_t i = 0; i < udfBlock->numOfCols; ++i) { udfBlock->udfCols[i] = taosMemoryCalloc(1, sizeof(SUdfColumn)); - if(udfBlock->udfCols[i] == NULL) { + if (udfBlock->udfCols[i] == NULL) { return terrno; } SColumnInfoData *col = (SColumnInfoData *)taosArrayGet(block->pDataBlock, i); @@ -866,18 +926,18 @@ int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlo if (IS_VAR_DATA_TYPE(udfCol->colMeta.type)) { udfCol->colData.varLenCol.varOffsetsLen = sizeof(int32_t) * udfBlock->numOfRows; udfCol->colData.varLenCol.varOffsets = taosMemoryMalloc(udfCol->colData.varLenCol.varOffsetsLen); - if(udfCol->colData.varLenCol.varOffsets == NULL) { + if (udfCol->colData.varLenCol.varOffsets == NULL) { return terrno; } memcpy(udfCol->colData.varLenCol.varOffsets, col->varmeta.offset, udfCol->colData.varLenCol.varOffsetsLen); udfCol->colData.varLenCol.payloadLen = colDataGetLength(col, udfBlock->numOfRows); udfCol->colData.varLenCol.payload = taosMemoryMalloc(udfCol->colData.varLenCol.payloadLen); - if(udfCol->colData.varLenCol.payload == NULL) { + if (udfCol->colData.varLenCol.payload == NULL) { return terrno; } if (col->reassigned) { for (int32_t row = 0; row < udfCol->colData.numOfRows; ++row) { - char* pColData = col->pData + col->varmeta.offset[row]; + char *pColData = col->pData + col->varmeta.offset[row]; int32_t colSize = 0; if (col->info.type == TSDB_DATA_TYPE_JSON) { colSize = getJsonValueLen(pColData); @@ -894,7 +954,7 @@ int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlo udfCol->colData.fixLenCol.nullBitmapLen = BitmapLen(udfCol->colData.numOfRows); int32_t bitmapLen = udfCol->colData.fixLenCol.nullBitmapLen; udfCol->colData.fixLenCol.nullBitmap = taosMemoryMalloc(udfCol->colData.fixLenCol.nullBitmapLen); - if(udfCol->colData.fixLenCol.nullBitmap == NULL) { + if (udfCol->colData.fixLenCol.nullBitmap == NULL) { return terrno; } char *bitmap = udfCol->colData.fixLenCol.nullBitmap; @@ -927,11 +987,11 @@ int32_t convertUdfColumnToDataBlock(SUdfColumn *udfCol, SSDataBlock *block) { code = bdGetColumnInfoData(block, 0, &col); TAOS_CHECK_GOTO(code, &lino, _exit); - for (int i = 0; i < udfCol->colData.numOfRows; ++i) { + for (int32_t i = 0; i < udfCol->colData.numOfRows; ++i) { if (udfColDataIsNull(udfCol, i)) { colDataSetNULL(col, i); } else { - char* data = udfColDataGetData(udfCol, i); + char *data = udfColDataGetData(udfCol, i); code = colDataSetVal(col, i, data, false); TAOS_CHECK_GOTO(code, &lino, _exit); } @@ -953,32 +1013,32 @@ int32_t convertScalarParamToDataBlock(SScalarParam *input, int32_t numOfCols, SS } // create the basic block info structure - for(int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData* pInfo = input[i].columnData; - SColumnInfoData d = {0}; + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData *pInfo = input[i].columnData; + SColumnInfoData d = {0}; d.info = pInfo->info; TAOS_CHECK_GOTO(blockDataAppendColInfo(output, &d), &lino, _exit); } - TAOS_CHECK_GOTO(blockDataEnsureCapacity(output, numOfRows), &lino, _exit); + TAOS_CHECK_GOTO(blockDataEnsureCapacity(output, numOfRows), &lino, _exit); - for(int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData* pDest = taosArrayGet(output->pDataBlock, i); + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData *pDest = taosArrayGet(output->pDataBlock, i); - SColumnInfoData* pColInfoData = input[i].columnData; + SColumnInfoData *pColInfoData = input[i].columnData; TAOS_CHECK_GOTO(colDataAssign(pDest, pColInfoData, input[i].numOfRows, &output->info), &lino, _exit); if (input[i].numOfRows < numOfRows) { int32_t startRow = input[i].numOfRows; - int expandRows = numOfRows - startRow; - bool isNull = colDataIsNull_s(pColInfoData, (input+i)->numOfRows - 1); + int32_t expandRows = numOfRows - startRow; + bool isNull = colDataIsNull_s(pColInfoData, (input + i)->numOfRows - 1); if (isNull) { colDataSetNNULL(pDest, startRow, expandRows); } else { - char* src = colDataGetData(pColInfoData, (input + i)->numOfRows - 1); - for (int j = 0; j < expandRows; ++j) { - TAOS_CHECK_GOTO(colDataSetVal(pDest, startRow+j, src, false), &lino, _exit); + char *src = colDataGetData(pColInfoData, (input + i)->numOfRows - 1); + for (int32_t j = 0; j < expandRows; ++j) { + TAOS_CHECK_GOTO(colDataSetVal(pDest, startRow + j, src, false), &lino, _exit); } } } @@ -1000,7 +1060,7 @@ int32_t convertDataBlockToScalarParm(SSDataBlock *input, SScalarParam *output) { output->numOfRows = input->info.rows; output->columnData = taosMemoryMalloc(sizeof(SColumnInfoData)); - if(output->columnData == NULL) { + if (output->columnData == NULL) { return terrno; } memcpy(output->columnData, taosArrayGet(input->pDataBlock, 0), sizeof(SColumnInfoData)); @@ -1012,11 +1072,11 @@ int32_t convertDataBlockToScalarParm(SSDataBlock *input, SScalarParam *output) { ////////////////////////////////////////////////////////////////////////////////////////////////////////////// // memory layout |---SUdfAggRes----|-----final result-----|---inter result----| typedef struct SUdfAggRes { - int8_t finalResNum; - int8_t interResNum; + int8_t finalResNum; + int8_t interResNum; int32_t interResBufLen; - char *finalResBuf; - char *interResBuf; + char *finalResBuf; + char *interResBuf; } SUdfAggRes; void onUdfcPipeClose(uv_handle_t *handle); @@ -1026,8 +1086,8 @@ bool isUdfcUvMsgComplete(SClientConnBuf *connBuf); void udfcUvHandleRsp(SClientUvConn *conn); void udfcUvHandleError(SClientUvConn *conn); void onUdfcPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf); -void onUdfcPipeWrite(uv_write_t *write, int status); -void onUdfcPipeConnect(uv_connect_t *connect, int status); +void onUdfcPipeWrite(uv_write_t *write, int32_t status); +void onUdfcPipeConnect(uv_connect_t *connect, int32_t status); int32_t udfcInitializeUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvTaskNode *uvTask); int32_t udfcQueueUvTask(SClientUvTaskNode *uvTask); int32_t udfcStartUvTask(SClientUvTaskNode *uvTask); @@ -1037,7 +1097,7 @@ void udfStopAsyncCb(uv_async_t *async); void constructUdfService(void *argsThread); int32_t udfcRunUdfUvTask(SClientUdfTask *task, int8_t uvTaskType); int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle); -int compareUdfcFuncSub(const void *elem1, const void *elem2); +int32_t compareUdfcFuncSub(const void *elem1, const void *elem2); int32_t doTeardownUdf(UdfcFuncHandle handle); int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdfInterBuf *state, SUdfInterBuf *state2, @@ -1062,9 +1122,9 @@ int32_t udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pRes int32_t udfAggProcess(struct SqlFunctionCtx *pCtx); int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock); -void cleanupNotExpiredUdfs(); -void cleanupExpiredUdfs(); -int compareUdfcFuncSub(const void *elem1, const void *elem2) { +void cleanupNotExpiredUdfs(); +void cleanupExpiredUdfs(); +int32_t compareUdfcFuncSub(const void *elem1, const void *elem2) { SUdfcFuncStub *stub1 = (SUdfcFuncStub *)elem1; SUdfcFuncStub *stub2 = (SUdfcFuncStub *)elem2; return strcmp(stub1->udfName, stub2->udfName); @@ -1150,21 +1210,22 @@ void releaseUdfFuncHandle(char *udfName, UdfcFuncHandle handle) { void cleanupExpiredUdfs() { int32_t i = 0; SArray *expiredUdfStubs = taosArrayInit(16, sizeof(SUdfcFuncStub)); - if(expiredUdfStubs == NULL) { + if (expiredUdfStubs == NULL) { fnError("cleanupExpiredUdfs: failed to init array"); return; } while (i < taosArrayGetSize(gUdfcProxy.expiredUdfStubs)) { SUdfcFuncStub *stub = taosArrayGet(gUdfcProxy.expiredUdfStubs, i); if (stub->refCount == 0) { - fnInfo("tear down udf. expired. udf name: %s, handle: %p, ref count: %d", stub->udfName, stub->handle, stub->refCount); + fnInfo("tear down udf. expired. udf name: %s, handle: %p, ref count: %d", stub->udfName, stub->handle, + stub->refCount); (void)doTeardownUdf(stub->handle); } else { - fnInfo("udf still in use. expired. udf name: %s, ref count: %d, create time: %" PRId64 ", handle: %p", stub->udfName, - stub->refCount, stub->createTime, stub->handle); + fnInfo("udf still in use. expired. udf name: %s, ref count: %d, create time: %" PRId64 ", handle: %p", + stub->udfName, stub->refCount, stub->createTime, stub->handle); UdfcFuncHandle handle = stub->handle; if (handle != NULL && ((SUdfcUvSession *)handle)->udfUvPipe != NULL) { - if(taosArrayPush(expiredUdfStubs, stub) == NULL) { + if (taosArrayPush(expiredUdfStubs, stub) == NULL) { fnError("cleanupExpiredUdfs: failed to push udf stub to array"); } } else { @@ -1347,7 +1408,8 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) { return code; } - SUdfInterBuf state = {.buf = udfRes->interResBuf, .bufLen = udfRes->interResBufLen, .numOfResult = udfRes->interResNum}; + SUdfInterBuf state = { + .buf = udfRes->interResBuf, .bufLen = udfRes->interResBufLen, .numOfResult = udfRes->interResNum}; SUdfInterBuf newState = {0}; udfCode = doCallUdfAggProcess(session, inputBlock, &state, &newState); @@ -1391,8 +1453,9 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock) { udfRes->interResBuf = (char *)udfRes + sizeof(SUdfAggRes) + session->bytes; SUdfInterBuf resultBuf = {0}; - SUdfInterBuf state = {.buf = udfRes->interResBuf, .bufLen = udfRes->interResBufLen, .numOfResult = udfRes->interResNum}; - int32_t udfCallCode = 0; + SUdfInterBuf state = { + .buf = udfRes->interResBuf, .bufLen = udfRes->interResBufLen, .numOfResult = udfRes->interResNum}; + int32_t udfCallCode = 0; udfCallCode = doCallUdfAggFinalize(session, &state, &resultBuf); if (udfCallCode != 0) { fnError("udfAggFinalize error. doCallUdfAggFinalize step. udf code:%d", udfCallCode); @@ -1448,7 +1511,7 @@ int32_t udfcGetUdfTaskResultFromUvTask(SClientUdfTask *task, SClientUvTaskNode * SUdfResponse rsp = {0}; void *buf = decodeUdfResponse(uvTask->rspBuf.base, &rsp); code = rsp.code; - if(code != 0) { + if (code != 0) { fnError("udfc get udf task result failure. code: %d", code); } @@ -1474,18 +1537,18 @@ int32_t udfcGetUdfTaskResultFromUvTask(SClientUdfTask *task, SClientUvTaskNode * taosMemoryFree(uvTask->rspBuf.base); } else { code = uvTask->errCode; - if(code != 0) { + if (code != 0) { fnError("udfc get udf task result failure. code: %d, line:%d", code, __LINE__); } } } else if (uvTask->type == UV_TASK_CONNECT) { code = uvTask->errCode; - if(code != 0) { + if (code != 0) { fnError("udfc get udf task result failure. code: %d, line:%d", code, __LINE__); } } else if (uvTask->type == UV_TASK_DISCONNECT) { code = uvTask->errCode; - if(code != 0) { + if (code != 0) { fnError("udfc get udf task result failure. code: %d, line:%d", code, __LINE__); } } @@ -1620,7 +1683,7 @@ void onUdfcPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) { } } -void onUdfcPipeWrite(uv_write_t *write, int status) { +void onUdfcPipeWrite(uv_write_t *write, int32_t status) { SClientUvConn *conn = write->data; if (status < 0) { fnError("udfc client connection %p write failed. status: %d(%s)", conn, status, uv_strerror(status)); @@ -1631,7 +1694,7 @@ void onUdfcPipeWrite(uv_write_t *write, int status) { taosMemoryFree(write); } -void onUdfcPipeConnect(uv_connect_t *connect, int status) { +void onUdfcPipeConnect(uv_connect_t *connect, int32_t status) { SClientUvTaskNode *uvTask = connect->data; if (status != 0) { fnError("client connect error, task seq: %" PRId64 ", code: %s", uvTask->seqNum, uv_strerror(status)); @@ -1639,7 +1702,7 @@ void onUdfcPipeConnect(uv_connect_t *connect, int status) { uvTask->errCode = status; int32_t code = uv_read_start((uv_stream_t *)uvTask->pipe, udfcAllocateBuffer, onUdfcPipeRead); - if(code != 0) { + if (code != 0) { fnError("udfc client connection %p read start failed. code: %d(%s)", uvTask->pipe, code, uv_strerror(code)); uvTask->errCode = code; } @@ -1678,13 +1741,12 @@ int32_t udfcInitializeUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvT } request.msgLen = bufLen; void *bufBegin = taosMemoryMalloc(bufLen); - if(bufBegin == NULL) { + if (bufBegin == NULL) { fnError("udfc create uv task, malloc buffer failed. size: %d", bufLen); return terrno; } void *buf = bufBegin; - if(encodeUdfRequest(&buf, &request) <= 0) - { + if (encodeUdfRequest(&buf, &request) <= 0) { fnError("udfc create uv task, encode request failed. size: %d", bufLen); taosMemoryFree(bufBegin); return TSDB_CODE_UDF_UV_EXEC_FAILURE; @@ -1695,9 +1757,8 @@ int32_t udfcInitializeUvTask(SClientUdfTask *task, int8_t uvTaskType, SClientUvT } else if (uvTaskType == UV_TASK_DISCONNECT) { uvTask->pipe = task->session->udfUvPipe; } - if (uv_sem_init(&uvTask->taskSem, 0) != 0) - { - if (uvTaskType == UV_TASK_REQ_RSP) { + if (uv_sem_init(&uvTask->taskSem, 0) != 0) { + if (uvTaskType == UV_TASK_REQ_RSP) { taosMemoryFree(uvTask->reqBuf.base); } fnError("udfc create uv task, init semaphore failed."); @@ -1733,7 +1794,7 @@ int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) { switch (uvTask->type) { case UV_TASK_CONNECT: { uv_pipe_t *pipe = taosMemoryMalloc(sizeof(uv_pipe_t)); - if(pipe == NULL) { + if (pipe == NULL) { fnError("udfc event loop start connect task malloc pipe failed."); return terrno; } @@ -1745,7 +1806,7 @@ int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) { uvTask->pipe = pipe; SClientUvConn *conn = taosMemoryCalloc(1, sizeof(SClientUvConn)); - if(conn == NULL) { + if (conn == NULL) { fnError("udfc event loop start connect task malloc conn failed."); taosMemoryFree(pipe); return terrno; @@ -1760,7 +1821,7 @@ int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) { pipe->data = conn; uv_connect_t *connReq = taosMemoryMalloc(sizeof(uv_connect_t)); - if(connReq == NULL) { + if (connReq == NULL) { fnError("udfc event loop start connect task malloc connReq failed."); taosMemoryFree(pipe); taosMemoryFree(conn); @@ -1777,14 +1838,14 @@ int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) { code = TSDB_CODE_UDF_PIPE_NOT_EXIST; } else { uv_write_t *write = taosMemoryMalloc(sizeof(uv_write_t)); - if(write == NULL) { + if (write == NULL) { fnError("udfc event loop start req_rsp task malloc write failed."); return terrno; } write->data = pipe->data; QUEUE *connTaskQueue = &((SClientUvConn *)pipe->data)->taskQueue; QUEUE_INSERT_TAIL(connTaskQueue, &uvTask->connTaskQueue); - int err = uv_write(write, (uv_stream_t *)pipe, &uvTask->reqBuf, 1, onUdfcPipeWrite); + int32_t err = uv_write(write, (uv_stream_t *)pipe, &uvTask->reqBuf, 1, onUdfcPipeWrite); if (err != 0) { taosMemoryFree(write); fnError("udfc event loop start req_rsp task uv_write failed. uvtask: %p, code: %s", uvTask, uv_strerror(err)); @@ -1874,7 +1935,7 @@ void udfStopAsyncCb(uv_async_t *async) { } void constructUdfService(void *argsThread) { - int32_t code = 0, lino = 0; + int32_t code = 0, lino = 0; SUdfcProxy *udfc = (SUdfcProxy *)argsThread; code = uv_loop_init(&udfc->uvLoop); TAOS_CHECK_GOTO(code, &lino, _exit); @@ -1891,7 +1952,7 @@ void constructUdfService(void *argsThread) { QUEUE_INIT(&udfc->uvProcTaskQueue); (void)uv_barrier_wait(&udfc->initBarrier); // TODO return value of uv_run - int num = uv_run(&udfc->uvLoop, UV_RUN_DEFAULT); + int32_t num = uv_run(&udfc->uvLoop, UV_RUN_DEFAULT); fnInfo("udfc uv loop exit. active handle num: %d", num); (void)uv_loop_close(&udfc->uvLoop); @@ -1909,7 +1970,7 @@ void constructUdfService(void *argsThread) { int32_t udfcOpen() { int32_t code = 0, lino = 0; - int8_t old = atomic_val_compare_exchange_8(&gUdfcProxy.initialized, 0, 1); + int8_t old = atomic_val_compare_exchange_8(&gUdfcProxy.initialized, 0, 1); if (old == 1) { return 0; } @@ -1927,12 +1988,12 @@ int32_t udfcOpen() { code = uv_mutex_init(&proxy->udfStubsMutex); TAOS_CHECK_GOTO(code, &lino, _exit); proxy->udfStubs = taosArrayInit(8, sizeof(SUdfcFuncStub)); - if(proxy->udfStubs == NULL) { + if (proxy->udfStubs == NULL) { fnError("udfc init failed. udfStubs: %p", proxy->udfStubs); return -1; } proxy->expiredUdfStubs = taosArrayInit(8, sizeof(SUdfcFuncStub)); - if(proxy->expiredUdfStubs == NULL) { + if (proxy->expiredUdfStubs == NULL) { taosArrayDestroy(proxy->udfStubs); fnError("udfc init failed. expiredUdfStubs: %p", proxy->expiredUdfStubs); return -1; @@ -1956,10 +2017,10 @@ int32_t udfcClose() { SUdfcProxy *udfc = &gUdfcProxy; udfc->udfcState = UDFC_STATE_STOPPING; - if(uv_async_send(&udfc->loopStopAsync) != 0) { + if (uv_async_send(&udfc->loopStopAsync) != 0) { fnError("udfc close error to send stop async"); } - if(uv_thread_join(&udfc->loopThread) != 0 ) { + if (uv_thread_join(&udfc->loopThread) != 0) { fnError("udfc close errir to join loop thread"); } uv_mutex_destroy(&udfc->taskQueueMutex); @@ -1974,9 +2035,9 @@ int32_t udfcClose() { } int32_t udfcRunUdfUvTask(SClientUdfTask *task, int8_t uvTaskType) { - int32_t code = 0, lino = 0; + int32_t code = 0, lino = 0; SClientUvTaskNode *uvTask = taosMemoryCalloc(1, sizeof(SClientUvTaskNode)); - if(uvTask == NULL) { + if (uvTask == NULL) { fnError("udfc client task: %p failed to allocate memory for uvTask", task); return terrno; } @@ -2006,14 +2067,14 @@ int32_t udfcRunUdfUvTask(SClientUdfTask *task, int8_t uvTaskType) { } int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle) { - int32_t code = TSDB_CODE_SUCCESS, lino = 0; + int32_t code = TSDB_CODE_SUCCESS, lino = 0; SClientUdfTask *task = taosMemoryCalloc(1, sizeof(SClientUdfTask)); - if(task == NULL) { + if (task == NULL) { fnError("doSetupUdf, failed to allocate memory for task"); return terrno; } task->session = taosMemoryCalloc(1, sizeof(SUdfcUvSession)); - if(task->session == NULL) { + if (task->session == NULL) { fnError("doSetupUdf, failed to allocate memory for session"); taosMemoryFree(task); return terrno; @@ -2059,7 +2120,7 @@ int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdf return TSDB_CODE_UDF_PIPE_NOT_EXIST; } SClientUdfTask *task = taosMemoryCalloc(1, sizeof(SClientUdfTask)); - if(task == NULL) { + if (task == NULL) { fnError("udfc call udf. failed to allocate memory for task"); return terrno; } @@ -2163,8 +2224,8 @@ int32_t doCallUdfAggFinalize(UdfcFuncHandle handle, SUdfInterBuf *interBuf, SUdf int32_t doCallUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t numOfCols, SScalarParam *output) { int8_t callType = TSDB_UDF_CALL_SCALA_PROC; SSDataBlock inputBlock = {0}; - int32_t code = convertScalarParamToDataBlock(input, numOfCols, &inputBlock); - if(code != 0) { + int32_t code = convertScalarParamToDataBlock(input, numOfCols, &inputBlock); + if (code != 0) { fnError("doCallUdfScalarFunc, convertScalarParamToDataBlock failed. code: %d", code); return code; } @@ -2174,13 +2235,13 @@ int32_t doCallUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t err = convertDataBlockToScalarParm(&resultBlock, output); taosArrayDestroy(resultBlock.pDataBlock); } - + blockDataFreeRes(&inputBlock); return err; } int32_t doTeardownUdf(UdfcFuncHandle handle) { - int32_t code = TSDB_CODE_SUCCESS, lino = 0;; + int32_t code = TSDB_CODE_SUCCESS, lino = 0; SUdfcUvSession *session = (SUdfcUvSession *)handle; if (session->udfUvPipe == NULL) { @@ -2190,7 +2251,7 @@ int32_t doTeardownUdf(UdfcFuncHandle handle) { } SClientUdfTask *task = taosMemoryCalloc(1, sizeof(SClientUdfTask)); - if(task == NULL) { + if (task == NULL) { fnError("doTeardownUdf, failed to allocate memory for task"); taosMemoryFree(session); return terrno; diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index c360cf68949..6eef99e1f82 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -1217,7 +1217,7 @@ int32_t udfdOpenClientRpc() { connLimitNum = TMIN(connLimitNum, 500); rpcInit.connLimitNum = connLimitNum; rpcInit.timeToGetConn = tsTimeToGetAvailableConn; - TAOS_CHECK_RETURN(taosVersionStrToInt(version, &(rpcInit.compatibilityVer))); + TAOS_CHECK_RETURN(taosVersionStrToInt(td_version, &rpcInit.compatibilityVer)); global.clientRpc = rpcOpen(&rpcInit); if (global.clientRpc == NULL) { fnError("failed to init dnode rpc client"); @@ -1470,9 +1470,9 @@ static int32_t udfdParseArgs(int32_t argc, char *argv[]) { } static void udfdPrintVersion() { - (void)printf("udfd version: %s compatible_version: %s\n", version, compatible_version); - (void)printf("git: %s\n", gitinfo); - (void)printf("build: %s\n", buildinfo); + (void)printf("udfd version: %s compatible_version: %s\n", td_version, td_compatible_version); + (void)printf("git: %s\n", td_gitinfo); + (void)printf("build: %s\n", td_buildinfo); } static int32_t udfdInitLog() { diff --git a/source/libs/index/test/CMakeLists.txt b/source/libs/index/test/CMakeLists.txt index eff74f93d24..e8a8194ea2a 100644 --- a/source/libs/index/test/CMakeLists.txt +++ b/source/libs/index/test/CMakeLists.txt @@ -7,160 +7,159 @@ IF(NOT TD_DARWIN) add_executable(idxFstUtilUT "") target_sources(idxTest - PRIVATE - "indexTests.cc" + PRIVATE + "indexTests.cc" ) target_sources(idxFstTest - PRIVATE - "fstTest.cc" + PRIVATE + "fstTest.cc" ) target_sources(idxFstUT - PRIVATE - "fstUT.cc" + PRIVATE + "fstUT.cc" ) target_sources(idxUtilUT - PRIVATE - "utilUT.cc" + PRIVATE + "utilUT.cc" ) target_sources(idxJsonUT - PRIVATE - "jsonUT.cc" + PRIVATE + "jsonUT.cc" ) target_sources(idxFstUtilUT - PRIVATE - "fstUtilUT.cc" - ) - - target_include_directories (idxTest - PUBLIC - "${TD_SOURCE_DIR}/include/libs/index" - "${CMAKE_CURRENT_SOURCE_DIR}/../inc" - ) - target_include_directories (idxFstTest - PUBLIC - "${TD_SOURCE_DIR}/include/libs/index" - "${CMAKE_CURRENT_SOURCE_DIR}/../inc" - ) + PRIVATE + "fstUtilUT.cc" + ) + + target_include_directories(idxTest + PUBLIC + "${TD_SOURCE_DIR}/include/libs/index" + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" + ) + target_include_directories(idxFstTest + PUBLIC + "${TD_SOURCE_DIR}/include/libs/index" + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" + ) target_sources(idxJsonUT - PRIVATE - "jsonUT.cc" + PRIVATE + "jsonUT.cc" ) - target_include_directories (idxTest + target_include_directories(idxTest PUBLIC - "${TD_SOURCE_DIR}/include/libs/index" + "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" - ) - target_include_directories (idxFstTest + ) + target_include_directories(idxFstTest PUBLIC - "${TD_SOURCE_DIR}/include/libs/index" + "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" - ) + ) - target_include_directories (idxFstUT + target_include_directories(idxFstUT PUBLIC - "${TD_SOURCE_DIR}/include/libs/index" + "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" - ) + ) - target_include_directories (idxUtilUT + target_include_directories(idxUtilUT PUBLIC - "${TD_SOURCE_DIR}/include/libs/index" + "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" - ) + ) - target_include_directories (idxJsonUT - PUBLIC - "${TD_SOURCE_DIR}/include/libs/index" - "${CMAKE_CURRENT_SOURCE_DIR}/../inc" - ) - target_include_directories (idxFstUtilUT - PUBLIC - "${TD_SOURCE_DIR}/include/libs/index" - "${CMAKE_CURRENT_SOURCE_DIR}/../inc" - ) - target_include_directories (idxJsonUT + target_include_directories(idxJsonUT + PUBLIC + "${TD_SOURCE_DIR}/include/libs/index" + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" + ) + target_include_directories(idxFstUtilUT PUBLIC - "${TD_SOURCE_DIR}/include/libs/index" + "${TD_SOURCE_DIR}/include/libs/index" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" - ) + ) + target_include_directories(idxJsonUT + PUBLIC + "${TD_SOURCE_DIR}/include/libs/index" + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" + ) - target_link_libraries (idxTest - os + target_link_libraries(idxTest + os util common gtest_main index ) - target_link_libraries (idxFstTest - os + target_link_libraries(idxFstTest + os util common gtest_main index ) - target_link_libraries (idxFstUT - os + target_link_libraries(idxFstUT + os util common gtest_main index ) - target_link_libraries (idxTest - os + target_link_libraries(idxTest + os util common gtest_main index ) - target_link_libraries (idxFstTest - os + target_link_libraries(idxFstTest + os util common gtest_main index ) - target_link_libraries (idxFstUT - os + target_link_libraries(idxFstUT + os util common gtest_main index ) - target_link_libraries (idxUtilUT - os + target_link_libraries(idxUtilUT + os util common gtest_main index ) - target_link_libraries (idxJsonUT - os + target_link_libraries(idxJsonUT + os util common gtest_main index ) - target_link_libraries (idxFstUtilUT - os + target_link_libraries(idxFstUtilUT + os util common gtest_main index ) - + add_test( NAME idxJsonUT - COMMAND idxJsonUT + COMMAND idxJsonUT ) add_test( - NAME idxFstUtilUT - COMMAND idxFstUtilUT - + NAME idxFstUtilUT + COMMAND idxFstUtilUT ) add_test( @@ -168,15 +167,15 @@ IF(NOT TD_DARWIN) COMMAND idxTest ) add_test( - NAME idxUtilUT - COMMAND idxUtilUT + NAME idxUtilUT + COMMAND idxUtilUT ) add_test( - NAME idxFstUT - COMMAND idxFstUT + NAME idxFstUT + COMMAND idxFstUT ) add_test( NAME idxFstTest - COMMAND idxFstTest + COMMAND idxFstTest ) -ENDIF () +ENDIF() diff --git a/source/libs/monitor/src/monFramework.c b/source/libs/monitor/src/monFramework.c index a2d03bbd6aa..0dbf6e091ac 100644 --- a/source/libs/monitor/src/monFramework.c +++ b/source/libs/monitor/src/monFramework.c @@ -183,7 +183,7 @@ void monGenClusterInfoTable(SMonInfo *pMonitor){ } if (taosHashRemove(tsMonitor.metrics, metric_names[i], strlen(metric_names[i])) != 0) { - uError("failed to remove metric %s", metric_names[i]); + uTrace("failed to remove metric %s", metric_names[i]); } } @@ -652,7 +652,7 @@ void monGenMnodeRoleTable(SMonInfo *pMonitor){ } if (taosHashRemove(tsMonitor.metrics, mnodes_role_gauges[i], strlen(mnodes_role_gauges[i])) != 0) { - uError("failed to remove metric %s", mnodes_role_gauges[i]); + uTrace("failed to remove metric %s", mnodes_role_gauges[i]); } } @@ -725,7 +725,7 @@ void monGenVnodeRoleTable(SMonInfo *pMonitor){ } if (taosHashRemove(tsMonitor.metrics, vnodes_role_gauges[i], strlen(vnodes_role_gauges[i])) != 0) { - uError("failed to remove metric %s", vnodes_role_gauges[i]); + uTrace("failed to remove metric %s", vnodes_role_gauges[i]); } } diff --git a/source/libs/monitor/test/monTest.cpp b/source/libs/monitor/test/monTest.cpp index 2660cff2169..a788a5a3410 100644 --- a/source/libs/monitor/test/monTest.cpp +++ b/source/libs/monitor/test/monTest.cpp @@ -26,7 +26,10 @@ class MonitorTest : public ::testing::Test { monInit(&cfg); } - static void TearDownTestSuite() { monCleanup(); } + static void TearDownTestSuite() { + monCleanup(); + taosMsleep(100); + } public: void SetUp() override {} diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index b77ffb8d2c6..1a5785190be 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -175,8 +175,8 @@ static int32_t valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) { case TSDB_DATA_TYPE_VARCHAR: case TSDB_DATA_TYPE_VARBINARY: case TSDB_DATA_TYPE_GEOMETRY: { - int32_t len = pSrc->node.resType.bytes + 1; - pDst->datum.p = taosMemoryCalloc(1, len); + int32_t len = varDataTLen(pSrc->datum.p); + pDst->datum.p = taosMemoryCalloc(1, len + 1); if (NULL == pDst->datum.p) { return terrno; } @@ -642,6 +642,7 @@ static int32_t logicFillCopy(const SFillLogicNode* pSrc, SFillLogicNode* pDst) { CLONE_NODE_FIELD(pWStartTs); CLONE_NODE_FIELD(pValues); COPY_OBJECT_FIELD(timeRange, sizeof(STimeWindow)); + CLONE_NODE_LIST_FIELD(pFillNullExprs); return TSDB_CODE_SUCCESS; } @@ -677,9 +678,12 @@ static int32_t logicInterpFuncCopy(const SInterpFuncLogicNode* pSrc, SInterpFunc CLONE_NODE_LIST_FIELD(pFuncs); COPY_OBJECT_FIELD(timeRange, sizeof(STimeWindow)); COPY_SCALAR_FIELD(interval); + COPY_SCALAR_FIELD(intervalUnit); + COPY_SCALAR_FIELD(precision); COPY_SCALAR_FIELD(fillMode); CLONE_NODE_FIELD(pFillValues); CLONE_NODE_FIELD(pTimeSeries); + COPY_OBJECT_FIELD(streamNodeOption, sizeof(SStreamNodeOption)); return TSDB_CODE_SUCCESS; } @@ -788,7 +792,7 @@ static int32_t physiWindowCopy(const SWindowPhysiNode* pSrc, SWindowPhysiNode* p COPY_SCALAR_FIELD(triggerType); COPY_SCALAR_FIELD(watermark); COPY_SCALAR_FIELD(igExpired); - COPY_SCALAR_FIELD(destHasPrimayKey); + COPY_SCALAR_FIELD(destHasPrimaryKey); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 0f67493094f..3275cfd8385 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -431,6 +431,8 @@ const char* nodesNodeName(ENodeType type) { return "PhysiIndefRowsFunc"; case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: return "PhysiInterpFunc"; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC: + return "PhysiStreamInterpFunc"; case QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC: return "PhysiForecastFunc"; case QUERY_NODE_PHYSICAL_PLAN_DISPATCH: @@ -1235,10 +1237,67 @@ static int32_t jsonToLogicIndefRowsFuncNode(const SJson* pJson, void* pObj) { return code; } +static const char* jkStreamOption_triggerType = "StreamOptionTriggerType"; +static const char* jkStreamOption_watermark = "StreamOptionWatermark"; +static const char* jkStreamOption_deleteMark = "StreamOptionDeleteMark"; +static const char* jkStreamOption_igExpired = "StreamOptionIgExpired"; +static const char* jkStreamOption_igCheckUpdate = "StreamOption_igCheckUpdate"; +static const char* jkStreamOption_destHasPrimaryKey = "StreamOptionDestHasPrimaryKey"; + +static int32_t streamNodeOptionToJson(const void* pObj, SJson* pJson) { + const SStreamNodeOption* pNode = (const SStreamNodeOption*)pObj; + int32_t code = tjsonAddIntegerToObject(pJson, jkStreamOption_triggerType, pNode->triggerType); + + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkStreamOption_watermark, pNode->watermark); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkStreamOption_deleteMark, pNode->deleteMark); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkStreamOption_igExpired, pNode->igExpired); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkStreamOption_igCheckUpdate, pNode->igCheckUpdate); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkStreamOption_destHasPrimaryKey, pNode->destHasPrimaryKey); + } + return code; +} + +static int32_t jsonToStreamNodeOption(const SJson* pJson, void* pObj) { + SStreamNodeOption* pNode = (SStreamNodeOption*)pObj; + int32_t code = tjsonGetTinyIntValue(pJson, jkStreamOption_triggerType, &pNode->triggerType); + + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBigIntValue(pJson, jkStreamOption_watermark, &pNode->watermark); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBigIntValue(pJson, jkStreamOption_deleteMark, &pNode->deleteMark); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkStreamOption_igExpired, &pNode->igExpired); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkStreamOption_igCheckUpdate, &pNode->igCheckUpdate); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkStreamOption_destHasPrimaryKey, &pNode->destHasPrimaryKey); + } + return code; +} + static const char* jkInterpFuncLogicPlanFuncs = "Funcs"; static const char* jkInterpFuncLogicPlanStartTime = "StartTime"; static const char* jkInterpFuncLogicPlanEndTime = "EndTime"; static const char* jkInterpFuncLogicPlanInterval = "Interval"; +static const char* jkInterpFuncLogicPlanIntervalUnit = "IntervalUnit"; +static const char* jkInterpFuncLogicPlanPrecision = "Precision"; +static const char* jkInterpFuncLogicPlanFillMode = "fillMode"; +static const char* jkInterpFuncLogicPlanFillValues = "FillValues"; +static const char* jkInterpFuncLogicPlanTimeSeries = "TimeSeries"; +static const char* jkInterpFuncLogicPlanStreamNodeOption = "StreamNodeOption"; static int32_t logicInterpFuncNodeToJson(const void* pObj, SJson* pJson) { const SInterpFuncLogicNode* pNode = (const SInterpFuncLogicNode*)pObj; @@ -1256,6 +1315,24 @@ static int32_t logicInterpFuncNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkInterpFuncLogicPlanInterval, pNode->interval); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkInterpFuncLogicPlanIntervalUnit, pNode->intervalUnit); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkInterpFuncLogicPlanPrecision, pNode->precision); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkInterpFuncLogicPlanFillMode, pNode->fillMode); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkInterpFuncLogicPlanFillValues, nodeToJson, pNode->pFillValues); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkInterpFuncLogicPlanTimeSeries, nodeToJson, pNode->pTimeSeries); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkInterpFuncLogicPlanStreamNodeOption, streamNodeOptionToJson, &pNode->streamNodeOption); + } return code; } @@ -1276,6 +1353,24 @@ static int32_t jsonToLogicInterpFuncNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBigIntValue(pJson, jkInterpFuncLogicPlanInterval, &pNode->interval); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkInterpFuncLogicPlanIntervalUnit, &pNode->intervalUnit); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkInterpFuncLogicPlanPrecision, &pNode->precision); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkInterpFuncLogicPlanFillMode, (int8_t*)&pNode->fillMode); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonToObject(pJson, jkInterpFuncLogicPlanFillValues, jsonToNode, pNode->pFillValues); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkInterpFuncLogicPlanTimeSeries, &pNode->pTimeSeries); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonToObject(pJson, jkInterpFuncLogicPlanStreamNodeOption, jsonToStreamNodeOption, &pNode->streamNodeOption); + } return code; } @@ -2784,7 +2879,7 @@ static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) { code = tjsonAddBoolToObject(pJson, jkWindowPhysiPlanMergeDataBlock, pNode->mergeDataBlock); } if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanDestHasPrimaryKey, pNode->destHasPrimayKey); + code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanDestHasPrimaryKey, pNode->destHasPrimaryKey); } return code; @@ -2822,7 +2917,7 @@ static int32_t jsonToPhysiWindowNode(const SJson* pJson, void* pObj) { code = tjsonGetBoolValue(pJson, jkWindowPhysiPlanMergeDataBlock, &pNode->mergeDataBlock); } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetTinyIntValue(pJson, jkWindowPhysiPlanDestHasPrimaryKey, &pNode->destHasPrimayKey); + code = tjsonGetTinyIntValue(pJson, jkWindowPhysiPlanDestHasPrimaryKey, &pNode->destHasPrimaryKey); } return code; @@ -2887,6 +2982,7 @@ static const char* jkFillPhysiPlanWStartTs = "WStartTs"; static const char* jkFillPhysiPlanValues = "Values"; static const char* jkFillPhysiPlanStartTime = "StartTime"; static const char* jkFillPhysiPlanEndTime = "EndTime"; +static const char* jkFillPhysiPlanFillNullExprs = "FillNullExprs"; static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) { const SFillPhysiNode* pNode = (const SFillPhysiNode*)pObj; @@ -2913,6 +3009,9 @@ static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkFillPhysiPlanEndTime, pNode->timeRange.ekey); } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkFillPhysiPlanFillNullExprs, pNode->pFillNullExprs); + } return code; } @@ -2942,6 +3041,9 @@ static int32_t jsonToPhysiFillNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBigIntValue(pJson, jkFillPhysiPlanEndTime, &pNode->timeRange.ekey); } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkFillPhysiPlanFillNullExprs, &pNode->pFillNullExprs); + } return code; } @@ -3204,9 +3306,12 @@ static const char* jkInterpFuncPhysiPlanFuncs = "Funcs"; static const char* jkInterpFuncPhysiPlanStartTime = "StartTime"; static const char* jkInterpFuncPhysiPlanEndTime = "EndTime"; static const char* jkInterpFuncPhysiPlanInterval = "Interval"; +static const char* jkInterpFuncPhysiPlanIntervalUnit = "intervalUnit"; +static const char* jkInterpFuncPhysiPlanPrecision = "precision"; static const char* jkInterpFuncPhysiPlanFillMode = "FillMode"; static const char* jkInterpFuncPhysiPlanFillValues = "FillValues"; static const char* jkInterpFuncPhysiPlanTimeSeries = "TimeSeries"; +static const char* jkInterpFuncPhysiPlanStreamNodeOption = "StreamNodeOption"; static int32_t physiInterpFuncNodeToJson(const void* pObj, SJson* pJson) { const SInterpFuncPhysiNode* pNode = (const SInterpFuncPhysiNode*)pObj; @@ -3227,6 +3332,12 @@ static int32_t physiInterpFuncNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkInterpFuncPhysiPlanInterval, pNode->interval); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkInterpFuncPhysiPlanIntervalUnit, pNode->intervalUnit); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkInterpFuncPhysiPlanPrecision, pNode->precision); + } if (TSDB_CODE_SUCCESS == code) { code = tjsonAddIntegerToObject(pJson, jkInterpFuncPhysiPlanFillMode, pNode->fillMode); } @@ -3236,6 +3347,9 @@ static int32_t physiInterpFuncNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkInterpFuncPhysiPlanTimeSeries, nodeToJson, pNode->pTimeSeries); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkInterpFuncPhysiPlanStreamNodeOption, streamNodeOptionToJson, &pNode->streamNodeOption); + } return code; } @@ -3259,6 +3373,12 @@ static int32_t jsonToPhysiInterpFuncNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBigIntValue(pJson, jkInterpFuncPhysiPlanInterval, &pNode->interval); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkInterpFuncPhysiPlanIntervalUnit, &pNode->intervalUnit); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkInterpFuncPhysiPlanPrecision, &pNode->precision); + } if (TSDB_CODE_SUCCESS == code) { tjsonGetNumberValue(pJson, jkInterpFuncPhysiPlanFillMode, pNode->fillMode, code); } @@ -3268,6 +3388,9 @@ static int32_t jsonToPhysiInterpFuncNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkInterpFuncPhysiPlanTimeSeries, &pNode->pTimeSeries); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonToObject(pJson, jkInterpFuncPhysiPlanStreamNodeOption, jsonToStreamNodeOption, &pNode->streamNodeOption); + } return code; } @@ -5764,6 +5887,10 @@ static const char* jkSelectStmtLimit = "Limit"; static const char* jkSelectStmtSlimit = "Slimit"; static const char* jkSelectStmtStmtName = "StmtName"; static const char* jkSelectStmtHasAggFuncs = "HasAggFuncs"; +static const char* jkSelectStmtInterpFuncs = "HasInterpFuncs"; +static const char* jkSelectStmtInterpFill = "InterpFill"; +static const char* jkSelectStmtInterpEvery = "InterpEvery"; +static const char* jkSelectStmtTwaOrElapsedFuncs = "HasTwaOrElapsedFuncs"; static int32_t selectStmtToJson(const void* pObj, SJson* pJson) { const SSelectStmt* pNode = (const SSelectStmt*)pObj; @@ -5811,6 +5938,18 @@ static int32_t selectStmtToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkSelectStmtHasAggFuncs, pNode->hasAggFuncs); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkSelectStmtInterpFuncs, pNode->hasInterpFunc); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkSelectStmtTwaOrElapsedFuncs, pNode->hasTwaOrElapsedFunc); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkSelectStmtInterpFill, nodeToJson, pNode->pFill); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkSelectStmtInterpEvery, nodeToJson, pNode->pEvery); + } return code; } @@ -5861,6 +6000,18 @@ static int32_t jsonToSelectStmt(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkSelectStmtHasAggFuncs, &pNode->hasAggFuncs); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkSelectStmtInterpFuncs, &pNode->hasInterpFunc); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkSelectStmtTwaOrElapsedFuncs, &pNode->hasTwaOrElapsedFunc); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkSelectStmtInterpFill, &pNode->pFill); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkSelectStmtInterpEvery, &pNode->pEvery); + } return code; } @@ -8002,6 +8153,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC: return physiIndefRowsFuncNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC: return physiInterpFuncNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC: return physiForecastFuncNodeToJson(pObj, pJson); @@ -8372,6 +8524,7 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC: return jsonToPhysiIndefRowsFuncNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC: return jsonToPhysiInterpFuncNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC: return jsonToPhysiForecastFuncNode(pJson, pObj); diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 3d8a57363b6..28d0b9fbd45 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -3190,7 +3190,7 @@ static int32_t physiWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { code = tlvEncodeBool(pEncoder, PHY_WINDOW_CODE_MERGE_DATA_BLOCK, pNode->mergeDataBlock); } if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeI8(pEncoder, PHY_WINDOW_CODE_DEST_HAS_PRIMARY_KEY, pNode->destHasPrimayKey); + code = tlvEncodeI8(pEncoder, PHY_WINDOW_CODE_DEST_HAS_PRIMARY_KEY, pNode->destHasPrimaryKey); } return code; @@ -3234,7 +3234,7 @@ static int32_t msgToPhysiWindowNode(STlvDecoder* pDecoder, void* pObj) { code = tlvDecodeBool(pTlv, &pNode->mergeDataBlock); break; case PHY_WINDOW_CODE_DEST_HAS_PRIMARY_KEY: - code = tlvDecodeI8(pTlv, &pNode->destHasPrimayKey); + code = tlvDecodeI8(pTlv, &pNode->destHasPrimaryKey); break; default: break; @@ -3326,7 +3326,8 @@ enum { PHY_FILL_CODE_WSTART, PHY_FILL_CODE_VALUES, PHY_FILL_CODE_TIME_RANGE, - PHY_FILL_CODE_INPUT_TS_ORDER + PHY_FILL_CODE_INPUT_TS_ORDER, + PHY_FILL_CODE_FILL_NULL_EXPRS, }; static int32_t physiFillNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { @@ -3351,6 +3352,9 @@ static int32_t physiFillNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_TIME_RANGE, timeWindowToMsg, &pNode->timeRange); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_FILL_NULL_EXPRS, nodeListToMsg, pNode->pFillNullExprs); + } return code; } @@ -3383,6 +3387,9 @@ static int32_t msgToPhysiFillNode(STlvDecoder* pDecoder, void* pObj) { case PHY_FILL_CODE_TIME_RANGE: code = tlvDecodeObjFromTlv(pTlv, msgToTimeWindow, (void**)&pNode->timeRange); break; + case PHY_FILL_CODE_FILL_NULL_EXPRS: + code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pFillNullExprs); + break; default: break; } @@ -4633,6 +4640,7 @@ static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { code = physiIndefRowsFuncNodeToMsg(pObj, pEncoder); break; case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC: code = physiInterpFuncNodeToMsg(pObj, pEncoder); break; case QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC: @@ -4801,6 +4809,7 @@ static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) { code = msgToPhysiIndefRowsFuncNode(pDecoder, pObj); break; case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC: code = msgToPhysiInterpFuncNode(pDecoder, pObj); break; case QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC: diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 4a45eb730aa..42c7bad7d43 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -703,6 +703,8 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) { code = makeNode(type, sizeof(SGroupSortPhysiNode), &pNode); break; case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL: code = makeNode(type, sizeof(SIntervalPhysiNode), &pNode); break; + case QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL: + code = makeNode(type, sizeof(SMergeIntervalPhysiNode), &pNode); break; case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL: code = makeNode(type, sizeof(SMergeAlignedIntervalPhysiNode), &pNode); break; case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL: @@ -764,6 +766,8 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) { code = makeNode(type, sizeof(SSubplan), &pNode); break; case QUERY_NODE_PHYSICAL_PLAN: code = makeNode(type, sizeof(SQueryPlan), &pNode); break; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC: + code = makeNode(type, sizeof(SStreamInterpFuncPhysiNode), &pNode); break; default: break; } @@ -1495,6 +1499,7 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pLogicNode->pValues); nodesDestroyList(pLogicNode->pFillExprs); nodesDestroyList(pLogicNode->pNotFillExprs); + nodesDestroyList(pLogicNode->pFillNullExprs); break; } case QUERY_NODE_LOGIC_PLAN_SORT: { @@ -1605,10 +1610,14 @@ void nodesDestroyNode(SNode* pNode) { case QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN: { SHashJoinPhysiNode* pPhyNode = (SHashJoinPhysiNode*)pNode; destroyPhysiNode((SPhysiNode*)pPhyNode); + nodesDestroyNode(pPhyNode->pWindowOffset); + nodesDestroyNode(pPhyNode->pJLimit); nodesDestroyList(pPhyNode->pOnLeft); nodesDestroyList(pPhyNode->pOnRight); nodesDestroyNode(pPhyNode->leftPrimExpr); nodesDestroyNode(pPhyNode->rightPrimExpr); + nodesDestroyNode(pPhyNode->pLeftOnCond); + nodesDestroyNode(pPhyNode->pRightOnCond); nodesDestroyNode(pPhyNode->pFullOnCond); nodesDestroyList(pPhyNode->pTargets); @@ -1616,8 +1625,6 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pPhyNode->pColEqCond); nodesDestroyNode(pPhyNode->pTagEqCond); - nodesDestroyNode(pPhyNode->pLeftOnCond); - nodesDestroyNode(pPhyNode->pRightOnCond); break; } case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: { @@ -1651,6 +1658,7 @@ void nodesDestroyNode(SNode* pNode) { break; } case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL: + case QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL: case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL: case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL: case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL: @@ -1666,6 +1674,7 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyList(pPhyNode->pNotFillExprs); nodesDestroyNode(pPhyNode->pWStartTs); nodesDestroyNode(pPhyNode->pValues); + nodesDestroyList(pPhyNode->pFillNullExprs); break; } case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: @@ -1718,7 +1727,8 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyList(pPhyNode->pFuncs); break; } - case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: { + case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC: { SInterpFuncPhysiNode* pPhyNode = (SInterpFuncPhysiNode*)pNode; destroyPhysiNode((SPhysiNode*)pPhyNode); nodesDestroyList(pPhyNode->pExprs); @@ -2054,6 +2064,8 @@ void* nodesGetValueFromNode(SValueNode* pNode) { int32_t nodesSetValueNodeValue(SValueNode* pNode, void* value) { switch (pNode->node.resType.type) { + case TSDB_DATA_TYPE_NULL: + break; case TSDB_DATA_TYPE_BOOL: pNode->datum.b = *(bool*)value; *(bool*)&pNode->typeData = pNode->datum.b; @@ -2105,7 +2117,10 @@ int32_t nodesSetValueNodeValue(SValueNode* pNode, void* value) { case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_VARCHAR: case TSDB_DATA_TYPE_VARBINARY: + case TSDB_DATA_TYPE_DECIMAL: case TSDB_DATA_TYPE_JSON: + case TSDB_DATA_TYPE_BLOB: + case TSDB_DATA_TYPE_MEDIUMBLOB: case TSDB_DATA_TYPE_GEOMETRY: pNode->datum.p = (char*)value; break; diff --git a/source/libs/parser/CMakeLists.txt b/source/libs/parser/CMakeLists.txt index f1b801c563d..bd2dd95ee0f 100644 --- a/source/libs/parser/CMakeLists.txt +++ b/source/libs/parser/CMakeLists.txt @@ -4,7 +4,27 @@ IF(TD_ENTERPRISE) LIST(APPEND PARSER_SRC ${TD_ENTERPRISE_DIR}/src/plugins/view/src/parserView.c) ENDIF() +add_custom_command( + OUTPUT ${TD_SOURCE_DIR}/source/libs/parser/src/sql.c ${TD_SOURCE_DIR}/include/common/ttokenauto.h + COMMAND echo "Running lemon process in ${TD_SOURCE_DIR}/source/libs/parser/inc" + COMMAND ${TD_CONTRIB_DIR}/lemon/lemon sql.y || true + COMMAND echo "copy sql.c from ${TD_SOURCE_DIR}/source/libs/parser/inc/sql.c to ${TD_SOURCE_DIR}/source/libs/parser/src/" + COMMAND mv ${TD_SOURCE_DIR}/source/libs/parser/inc/sql.c ${TD_SOURCE_DIR}/source/libs/parser/src/sql.c + COMMAND mv ${TD_SOURCE_DIR}/source/libs/parser/inc/sql.h ${TD_SOURCE_DIR}/include/common/ttokenauto.h + COMMAND echo "lemon process completed." + DEPENDS ${TD_SOURCE_DIR}/source/libs/parser/inc/sql.y + WORKING_DIRECTORY ${TD_SOURCE_DIR}/source/libs/parser/inc + COMMENT "Generating sql.c using lemon" +) + +add_custom_target(lemon_sql ALL + DEPENDS ${TD_SOURCE_DIR}/source/libs/parser/src/sql.c ${TD_SOURCE_DIR}/include/common/ttokenauto.h +) + +list(APPEND PARSER_SRC ${TD_SOURCE_DIR}/source/libs/parser/src/sql.c) + add_library(parser STATIC ${PARSER_SRC}) +add_dependencies(parser lemon_sql) target_include_directories( parser PUBLIC "${TD_SOURCE_DIR}/include/libs/parser" diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index 28e867965f4..3caa8da80fd 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -64,11 +64,12 @@ typedef enum EDatabaseOptionType { DB_OPTION_STT_TRIGGER, DB_OPTION_TABLE_PREFIX, DB_OPTION_TABLE_SUFFIX, - DB_OPTION_S3_CHUNKSIZE, + DB_OPTION_S3_CHUNKPAGES, DB_OPTION_S3_KEEPLOCAL, DB_OPTION_S3_COMPACT, DB_OPTION_KEEP_TIME_OFFSET, DB_OPTION_ENCRYPT_ALGORITHM, + DB_OPTION_DNODES, } EDatabaseOptionType; typedef enum ETableOptionType { @@ -244,7 +245,7 @@ SNode* createShowTableDistributedStmt(SAstCreateContext* pCxt, SNode* pRealTable SNode* createShowDnodeVariablesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pLikePattern); SNode* createShowVnodesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pDnodeEndpoint); SNode* createShowTableTagsStmt(SAstCreateContext* pCxt, SNode* pTbName, SNode* pDbName, SNodeList* pTags); -SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo, +SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo, int8_t createdb, int8_t is_import); SNode* addCreateUserStmtWhiteList(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pIpRangesNodeList); SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t alterType, void* pAlterInfo); diff --git a/source/libs/parser/inc/parInt.h b/source/libs/parser/inc/parInt.h index c231de653c9..5999ada70f1 100644 --- a/source/libs/parser/inc/parInt.h +++ b/source/libs/parser/inc/parInt.h @@ -28,6 +28,8 @@ extern "C" { #define QUERY_SMA_OPTIMIZE_DISABLE 0 #define QUERY_SMA_OPTIMIZE_ENABLE 1 +#define QUERY_NUMBER_MAX_DISPLAY_LEN 65 + int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatalogReq, const SMetaData* pMetaData); int32_t continueCreateTbFromFile(SParseContext* pCxt, SQuery** pQuery); int32_t parse(SParseContext* pParseCxt, SQuery** pQuery); diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 99f301445a5..e1c3456e3f2 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -280,12 +280,13 @@ db_options(A) ::= db_options(B) WAL_SEGMENT_SIZE NK_INTEGER(C). db_options(A) ::= db_options(B) STT_TRIGGER NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_STT_TRIGGER, &C); } db_options(A) ::= db_options(B) TABLE_PREFIX signed(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_TABLE_PREFIX, C); } db_options(A) ::= db_options(B) TABLE_SUFFIX signed(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_TABLE_SUFFIX, C); } -db_options(A) ::= db_options(B) S3_CHUNKSIZE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_S3_CHUNKSIZE, &C); } +db_options(A) ::= db_options(B) S3_CHUNKPAGES NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_S3_CHUNKPAGES, &C); } db_options(A) ::= db_options(B) S3_KEEPLOCAL NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_S3_KEEPLOCAL, &C); } db_options(A) ::= db_options(B) S3_KEEPLOCAL NK_VARIABLE(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_S3_KEEPLOCAL, &C); } db_options(A) ::= db_options(B) S3_COMPACT NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_S3_COMPACT, &C); } db_options(A) ::= db_options(B) KEEP_TIME_OFFSET NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_KEEP_TIME_OFFSET, &C); } db_options(A) ::= db_options(B) ENCRYPT_ALGORITHM NK_STRING(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_ENCRYPT_ALGORITHM, &C); } +db_options(A) ::= db_options(B) DNODES NK_STRING(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_DNODES, &C); } alter_db_options(A) ::= alter_db_option(B). { A = createAlterDatabaseOptions(pCxt); A = setAlterDatabaseOption(pCxt, A, &B); } alter_db_options(A) ::= alter_db_options(B) alter_db_option(C). { A = setAlterDatabaseOption(pCxt, B, &C); } @@ -775,6 +776,7 @@ tag_def_or_ref_opt(A) ::= TAGS NK_LP column_stream_def_list(B) NK_RP. stream_options(A) ::= . { A = createStreamOptions(pCxt); } stream_options(A) ::= stream_options(B) TRIGGER AT_ONCE(C). { A = setStreamOptions(pCxt, B, SOPT_TRIGGER_TYPE_SET, &C, NULL); } stream_options(A) ::= stream_options(B) TRIGGER WINDOW_CLOSE(C). { A = setStreamOptions(pCxt, B, SOPT_TRIGGER_TYPE_SET, &C, NULL); } +stream_options(A) ::= stream_options(B) TRIGGER FORCE_WINDOW_CLOSE(C). { A = setStreamOptions(pCxt, B, SOPT_TRIGGER_TYPE_SET, &C, NULL); } stream_options(A) ::= stream_options(B) TRIGGER MAX_DELAY(C) duration_literal(D). { A = setStreamOptions(pCxt, B, SOPT_TRIGGER_TYPE_SET, &C, releaseRawExprNode(pCxt, D)); } stream_options(A) ::= stream_options(B) WATERMARK duration_literal(C). { A = setStreamOptions(pCxt, B, SOPT_WATERMARK_SET, NULL, releaseRawExprNode(pCxt, C)); } stream_options(A) ::= stream_options(B) IGNORE EXPIRED NK_INTEGER(C). { A = setStreamOptions(pCxt, B, SOPT_IGNORE_EXPIRED_SET, &C, NULL); } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index e031ee0fe1b..245346273f9 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -43,11 +43,11 @@ } \ } while (0) -#define CHECK_NAME(p) \ - do { \ - if (!p) { \ - goto _err; \ - } \ +#define CHECK_NAME(p) \ + do { \ + if (!p) { \ + goto _err; \ + } \ } while (0) #define COPY_STRING_FORM_ID_TOKEN(buf, pToken) strncpy(buf, (pToken)->z, TMIN((pToken)->n, sizeof(buf) - 1)) @@ -333,7 +333,7 @@ SNode* releaseRawExprNode(SAstCreateContext* pCxt, SNode* pNode) { // Len of pRawExpr->p could be larger than len of aliasName[TSDB_COL_NAME_LEN]. // If aliasName is truncated, hash value of aliasName could be the same. uint64_t hashVal = MurmurHash3_64(pRawExpr->p, pRawExpr->n); - sprintf(pExpr->aliasName, "%"PRIu64, hashVal); + sprintf(pExpr->aliasName, "%" PRIu64, hashVal); strncpy(pExpr->userAlias, pRawExpr->p, len); pExpr->userAlias[len] = '\0'; } @@ -405,7 +405,7 @@ SNode* createValueNode(SAstCreateContext* pCxt, int32_t dataType, const SToken* pCxt->errCode = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&val); CHECK_MAKE_NODE(val); val->literal = taosStrndup(pLiteral->z, pLiteral->n); - if(!val->literal) { + if (!val->literal) { pCxt->errCode = terrno; nodesDestroyNode((SNode*)val); return NULL; @@ -586,8 +586,8 @@ SNodeList* createHintNodeList(SAstCreateContext* pCxt, const SToken* pLiteral) { if (NULL == pLiteral || pLiteral->n <= 5) { return NULL; } - SNodeList* pHintList = NULL; - char* hint = taosStrndup(pLiteral->z + 3, pLiteral->n - 5); + SNodeList* pHintList = NULL; + char* hint = taosStrndup(pLiteral->z + 3, pLiteral->n - 5); if (!hint) return NULL; int32_t i = 0; bool quit = false; @@ -971,7 +971,7 @@ SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pL } SNode* createBetweenAnd(SAstCreateContext* pCxt, SNode* pExpr, SNode* pLeft, SNode* pRight) { - SNode* pNew = NULL, *pGE = NULL, *pLE = NULL; + SNode *pNew = NULL, *pGE = NULL, *pLE = NULL; CHECK_PARSER_STATUS(pCxt); pCxt->errCode = nodesCloneNode(pExpr, &pNew); CHECK_PARSER_STATUS(pCxt); @@ -993,7 +993,7 @@ SNode* createBetweenAnd(SAstCreateContext* pCxt, SNode* pExpr, SNode* pLeft, SNo } SNode* createNotBetweenAnd(SAstCreateContext* pCxt, SNode* pExpr, SNode* pLeft, SNode* pRight) { - SNode* pNew = NULL, *pLT = NULL, *pGT = NULL; + SNode *pNew = NULL, *pLT = NULL, *pGT = NULL; CHECK_PARSER_STATUS(pCxt); pCxt->errCode = nodesCloneNode(pExpr, &pNew); CHECK_PARSER_STATUS(pCxt); @@ -1799,6 +1799,7 @@ SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) { pOptions->s3Compact = TSDB_DEFAULT_S3_COMPACT; pOptions->withArbitrator = TSDB_DEFAULT_DB_WITH_ARBITRATOR; pOptions->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO; + pOptions->dnodeListStr[0] = 0; return (SNode*)pOptions; _err: return NULL; @@ -1842,6 +1843,7 @@ SNode* createAlterDatabaseOptions(SAstCreateContext* pCxt) { pOptions->s3Compact = -1; pOptions->withArbitrator = -1; pOptions->encryptAlgorithm = -1; + pOptions->dnodeListStr[0] = 0; return (SNode*)pOptions; _err: return NULL; @@ -1959,7 +1961,7 @@ static SNode* setDatabaseOptionImpl(SAstCreateContext* pCxt, SNode* pOptions, ED nodesDestroyNode((SNode*)pNode); break; } - case DB_OPTION_S3_CHUNKSIZE: + case DB_OPTION_S3_CHUNKPAGES: pDbOptions->s3ChunkSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10); break; case DB_OPTION_S3_KEEPLOCAL: { @@ -1974,14 +1976,21 @@ static SNode* setDatabaseOptionImpl(SAstCreateContext* pCxt, SNode* pOptions, ED case DB_OPTION_S3_COMPACT: pDbOptions->s3Compact = taosStr2Int8(((SToken*)pVal)->z, NULL, 10); break; - case DB_OPTION_KEEP_TIME_OFFSET: { + case DB_OPTION_KEEP_TIME_OFFSET: pDbOptions->keepTimeOffset = taosStr2Int32(((SToken*)pVal)->z, NULL, 10); break; - case DB_OPTION_ENCRYPT_ALGORITHM: - COPY_STRING_FORM_STR_TOKEN(pDbOptions->encryptAlgorithmStr, (SToken*)pVal); - pDbOptions->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO; - break; - } + case DB_OPTION_ENCRYPT_ALGORITHM: + COPY_STRING_FORM_STR_TOKEN(pDbOptions->encryptAlgorithmStr, (SToken*)pVal); + pDbOptions->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO; + break; + case DB_OPTION_DNODES: + if (((SToken*)pVal)->n >= TSDB_DNODE_LIST_LEN) { + snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "the dnode list is too long (should less than %d)", + TSDB_DNODE_LIST_LEN); + pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR; + } else { + COPY_STRING_FORM_STR_TOKEN(pDbOptions->dnodeListStr, (SToken*)pVal); + } default: break; } @@ -2210,7 +2219,7 @@ SNode* setColumnOptionsPK(SAstCreateContext* pCxt, SNode* pOptions) { SNode* setColumnOptions(SAstCreateContext* pCxt, SNode* pOptions, const SToken* pVal1, void* pVal2) { CHECK_PARSER_STATUS(pCxt); - char optionType[TSDB_CL_OPTION_LEN]; + char optionType[TSDB_CL_OPTION_LEN]; memset(optionType, 0, TSDB_CL_OPTION_LEN); strncpy(optionType, pVal1->z, TMIN(pVal1->n, TSDB_CL_OPTION_LEN)); @@ -2807,7 +2816,7 @@ static int32_t getIpV4RangeFromWhitelistItem(char* ipRange, SIpV4Range* pIpRange int32_t code = TSDB_CODE_SUCCESS; char* ipCopy = taosStrdup(ipRange); if (!ipCopy) return terrno; - char* slash = strchr(ipCopy, '/'); + char* slash = strchr(ipCopy, '/'); if (slash) { *slash = '\0'; struct in_addr addr; @@ -3467,6 +3476,8 @@ static int8_t getTriggerType(uint32_t tokenType) { return STREAM_TRIGGER_WINDOW_CLOSE; case TK_MAX_DELAY: return STREAM_TRIGGER_MAX_DELAY; + case TK_FORCE_WINDOW_CLOSE: + return STREAM_TRIGGER_FORCE_WINDOW_CLOSE; default: break; } diff --git a/source/libs/parser/src/parInsertSml.c b/source/libs/parser/src/parInsertSml.c index cca35d9c9ac..b5cdf1e4ee2 100644 --- a/source/libs/parser/src/parInsertSml.c +++ b/source/libs/parser/src/parInsertSml.c @@ -468,35 +468,38 @@ int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSc int32_t smlInitHandle(SQuery** query) { *query = NULL; SQuery* pQuery = NULL; + SVnodeModifyOpStmt* stmt = NULL; + int32_t code = nodesMakeNode(QUERY_NODE_QUERY, (SNode**)&pQuery); - if (NULL == pQuery) { - uError("create pQuery error"); - return code; + if (code != 0) { + uError("SML create pQuery error"); + goto END; } pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE; pQuery->haveResultSet = false; pQuery->msgType = TDMT_VND_SUBMIT; - SVnodeModifyOpStmt* stmt = NULL; code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&stmt); - if (NULL == stmt) { - uError("create SVnodeModifyOpStmt error"); - qDestroyQuery(pQuery); - return code; + if (code != 0) { + uError("SML create SVnodeModifyOpStmt error"); + goto END; } stmt->pTableBlockHashObj = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); if (stmt->pTableBlockHashObj == NULL){ - uError("create pTableBlockHashObj error"); - qDestroyQuery(pQuery); - nodesDestroyNode((SNode*)stmt); - return terrno; + uError("SML create pTableBlockHashObj error"); + code = terrno; + goto END; } stmt->freeHashFunc = insDestroyTableDataCxtHashMap; stmt->freeArrayFunc = insDestroyVgroupDataCxtList; pQuery->pRoot = (SNode*)stmt; *query = pQuery; + return code; - return TSDB_CODE_SUCCESS; +END: + nodesDestroyNode((SNode*)stmt); + qDestroyQuery(pQuery); + return code; } int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash) { diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 1c26a7c70ed..4b91f01a8c8 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -30,7 +30,7 @@ typedef struct SInsertParseContext { bool forceUpdate; bool needTableTagVal; bool needRequest; // whether or not request server - bool isStmtBind; // whether is stmt bind + bool isStmtBind; // whether is stmt bind } SInsertParseContext; typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param); @@ -757,7 +757,7 @@ int32_t parseTagValue(SMsgBuf* pMsgBuf, const char** pSql, uint8_t precision, SS STagVal val = {0}; int32_t code = parseTagToken(pSql, pToken, pTagSchema, precision, &val, pMsgBuf); if (TSDB_CODE_SUCCESS == code) { - if (NULL == taosArrayPush(pTagVals, &val)){ + if (NULL == taosArrayPush(pTagVals, &val)) { code = terrno; } } @@ -775,11 +775,14 @@ static int32_t buildCreateTbReq(SVnodeModifyOpStmt* pStmt, STag* pTag, SArray* p return terrno; } return insBuildCreateTbReq(pStmt->pCreateTblReq, pStmt->targetTableName.tname, pTag, pStmt->pTableMeta->suid, - pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags, - TSDB_DEFAULT_TABLE_TTL); + pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags, + TSDB_DEFAULT_TABLE_TTL); } int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMsgBuf, int8_t type) { + if (pToken->type == TK_NK_QUESTION) { + return buildInvalidOperationMsg(pMsgBuf, "insert into super table syntax is not supported for stmt"); + } if ((pToken->type != TK_NOW && pToken->type != TK_TODAY && pToken->type != TK_NK_INTEGER && pToken->type != TK_NK_STRING && pToken->type != TK_NK_FLOAT && pToken->type != TK_NK_BOOL && pToken->type != TK_NULL && pToken->type != TK_NK_HEX && pToken->type != TK_NK_OCT && pToken->type != TK_NK_BIN && @@ -810,7 +813,7 @@ typedef struct SRewriteTagCondCxt { static int32_t rewriteTagCondColumnImpl(STagVal* pVal, SNode** pNode) { SValueNode* pValue = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); if (NULL == pValue) { return code; } @@ -1041,7 +1044,7 @@ static int32_t storeChildTableMeta(SInsertParseContext* pCxt, SVnodeModifyOpStmt return TSDB_CODE_OUT_OF_MEMORY; } - char tbFName[TSDB_TABLE_FNAME_LEN]; + char tbFName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName); if (TSDB_CODE_SUCCESS != code) { taosMemoryFree(pBackup); @@ -1236,7 +1239,7 @@ static int32_t getTargetTableMetaAndVgroup(SInsertParseContext* pCxt, SVnodeModi } static int32_t collectUseTable(const SName* pName, SHashObj* pTable) { - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pName, fullName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1382,7 +1385,7 @@ static int32_t getTableDataCxt(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS pStmt->pTableMeta, &pStmt->pCreateTblReq, pTableCxt, false, false); } - char tbFName[TSDB_TABLE_FNAME_LEN]; + char tbFName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1824,37 +1827,49 @@ static int32_t doGetStbRowValues(SInsertParseContext* pCxt, SVnodeModifyOpStmt* code = generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_INVALID_COLUMNS_NUM); break; } - if (pCols->pColIndex[i] < numOfCols) { - const SSchema* pSchema = &pSchemas[pCols->pColIndex[i]]; - SColVal* pVal = taosArrayGet(pStbRowsCxt->aColVals, pCols->pColIndex[i]); - code = parseValueToken(pCxt, ppSql, pToken, (SSchema*)pSchema, precision, pVal); - if (TK_NK_VARIABLE == pToken->type) { - code = buildInvalidOperationMsg(&pCxt->msg, "not expected row value"); - } - } else if (pCols->pColIndex[i] < tbnameIdx) { - const SSchema* pTagSchema = &pSchemas[pCols->pColIndex[i]]; - if (canParseTagsAfter) { - tagTokens[(*pNumOfTagTokens)] = *pToken; - tagSchemas[(*pNumOfTagTokens)] = (SSchema*)pTagSchema; - ++(*pNumOfTagTokens); - } else { - code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, pTagSchema->type); - if (code == TSDB_CODE_SUCCESS && TK_NK_VARIABLE == pToken->type) { + + if (TK_NK_QUESTION == pToken->type) { + pCxt->isStmtBind = true; + if (pCols->pColIndex[i] == tbnameIdx) { + *bFoundTbName = true; + } + if (NULL == pCxt->pComCxt->pStmtCb) { + code = buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pToken->z); + break; + } + } else { + if (pCols->pColIndex[i] < numOfCols) { + const SSchema* pSchema = &pSchemas[pCols->pColIndex[i]]; + SColVal* pVal = taosArrayGet(pStbRowsCxt->aColVals, pCols->pColIndex[i]); + code = parseValueToken(pCxt, ppSql, pToken, (SSchema*)pSchema, precision, pVal); + if (TK_NK_VARIABLE == pToken->type) { code = buildInvalidOperationMsg(&pCxt->msg, "not expected row value"); } - if (code == TSDB_CODE_SUCCESS) { - code = parseTagValue(&pCxt->msg, ppSql, precision, (SSchema*)pTagSchema, pToken, pTagNames, pTagVals, - &pStbRowsCxt->pTag); + } else if (pCols->pColIndex[i] < tbnameIdx) { + const SSchema* pTagSchema = &pSchemas[pCols->pColIndex[i]]; + if (canParseTagsAfter) { + tagTokens[(*pNumOfTagTokens)] = *pToken; + tagSchemas[(*pNumOfTagTokens)] = (SSchema*)pTagSchema; + ++(*pNumOfTagTokens); + } else { + code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, pTagSchema->type); + if (code == TSDB_CODE_SUCCESS && TK_NK_VARIABLE == pToken->type) { + code = buildInvalidOperationMsg(&pCxt->msg, "not expected row value"); + } + if (code == TSDB_CODE_SUCCESS) { + code = parseTagValue(&pCxt->msg, ppSql, precision, (SSchema*)pTagSchema, pToken, pTagNames, pTagVals, + &pStbRowsCxt->pTag); + } + } + } else if (pCols->pColIndex[i] == tbnameIdx) { + code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, TSDB_DATA_TYPE_BINARY); + if (TK_NK_VARIABLE == pToken->type) { + code = buildInvalidOperationMsg(&pCxt->msg, "not expected tbname"); } - } - } else if (pCols->pColIndex[i] == tbnameIdx) { - code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, TSDB_DATA_TYPE_BINARY); - if (TK_NK_VARIABLE == pToken->type) { - code = buildInvalidOperationMsg(&pCxt->msg, "not expected tbname"); - } - if (code == TSDB_CODE_SUCCESS) { - code = parseTbnameToken(&pCxt->msg, pStbRowsCxt->ctbName.tname, pToken, bFoundTbName); + if (code == TSDB_CODE_SUCCESS) { + code = parseTbnameToken(&pCxt->msg, pStbRowsCxt->ctbName.tname, pToken, bFoundTbName); + } } } @@ -1888,6 +1903,11 @@ static int32_t getStbRowValues(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS code = buildSyntaxErrMsg(&pCxt->msg, "tbname value expected", pOrigSql); } + if (code == TSDB_CODE_SUCCESS && pStbRowsCxt->ctbName.tname[0] == '\0') { + *pGotRow = true; + return TSDB_CODE_TSC_STMT_TBNAME_ERROR; + } + bool ctbFirst = true; char ctbFName[TSDB_TABLE_FNAME_LEN]; if (code == TSDB_CODE_SUCCESS) { @@ -1923,8 +1943,8 @@ static int32_t processCtbAutoCreationAndCtbMeta(SInsertParseContext* pCxt, SVnod } if (code == TSDB_CODE_SUCCESS) { code = insBuildCreateTbReq(pStbRowsCxt->pCreateCtbReq, pStbRowsCxt->ctbName.tname, pStbRowsCxt->pTag, - pStbRowsCxt->pStbMeta->uid, pStbRowsCxt->stbName.tname, pStbRowsCxt->aTagNames, - getNumOfTags(pStbRowsCxt->pStbMeta), TSDB_DEFAULT_TABLE_TTL); + pStbRowsCxt->pStbMeta->uid, pStbRowsCxt->stbName.tname, pStbRowsCxt->aTagNames, + getNumOfTags(pStbRowsCxt->pStbMeta), TSDB_DEFAULT_TABLE_TTL); pStbRowsCxt->pTag = NULL; } @@ -1933,9 +1953,9 @@ static int32_t processCtbAutoCreationAndCtbMeta(SInsertParseContext* pCxt, SVnod code = tNameExtractFullName(&pStbRowsCxt->ctbName, ctbFName); SVgroupInfo vg; SRequestConnInfo conn = {.pTrans = pCxt->pComCxt->pTransporter, - .requestId = pCxt->pComCxt->requestId, - .requestObjRefId = pCxt->pComCxt->requestRid, - .mgmtEps = pCxt->pComCxt->mgmtEpSet}; + .requestId = pCxt->pComCxt->requestId, + .requestObjRefId = pCxt->pComCxt->requestRid, + .mgmtEps = pCxt->pComCxt->mgmtEpSet}; if (TSDB_CODE_SUCCESS == code) { code = catalogGetTableHashVgroup(pCxt->pComCxt->pCatalog, &conn, &pStbRowsCxt->ctbName, &vg); } @@ -1979,11 +1999,47 @@ static void clearStbRowsDataContext(SStbRowsDataContext* pStbRowsCxt) { taosMemoryFreeClear(pStbRowsCxt->pCreateCtbReq); } +static int32_t parseStbBoundInfo(SVnodeModifyOpStmt* pStmt, SStbRowsDataContext* pStbRowsCxt, + STableDataCxt** ppTableDataCxt) { + char tbFName[TSDB_TABLE_FNAME_LEN]; + int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName); + if (TSDB_CODE_SUCCESS != code) { + return code; + } + if (pStmt->usingTableProcessing) { + pStmt->pTableMeta->uid = 0; + } + + code = insGetTableDataCxt(pStmt->pTableBlockHashObj, tbFName, strlen(tbFName), pStmt->pTableMeta, + &pStmt->pCreateTblReq, ppTableDataCxt, false, true); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + insDestroyBoundColInfo(&((*ppTableDataCxt)->boundColsInfo)); + (*ppTableDataCxt)->boundColsInfo = pStbRowsCxt->boundColsInfo; + (*ppTableDataCxt)->boundColsInfo.numOfCols = pStbRowsCxt->boundColsInfo.numOfBound; + (*ppTableDataCxt)->boundColsInfo.numOfBound = pStbRowsCxt->boundColsInfo.numOfBound; + (*ppTableDataCxt)->boundColsInfo.hasBoundCols = pStbRowsCxt->boundColsInfo.hasBoundCols; + (*ppTableDataCxt)->boundColsInfo.pColIndex = taosMemoryCalloc(pStbRowsCxt->boundColsInfo.numOfBound, sizeof(int16_t)); + if (NULL == (*ppTableDataCxt)->boundColsInfo.pColIndex) { + return terrno; + } + (void)memcpy((*ppTableDataCxt)->boundColsInfo.pColIndex, pStbRowsCxt->boundColsInfo.pColIndex, + sizeof(int16_t) * pStmt->pStbRowsCxt->boundColsInfo.numOfBound); + return TSDB_CODE_SUCCESS; +} + static int32_t parseOneStbRow(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, const char** ppSql, SStbRowsDataContext* pStbRowsCxt, bool* pGotRow, SToken* pToken, STableDataCxt** ppTableDataCxt) { bool bFirstTable = false; int32_t code = getStbRowValues(pCxt, pStmt, ppSql, pStbRowsCxt, pGotRow, pToken, &bFirstTable); + + if (code == TSDB_CODE_TSC_STMT_TBNAME_ERROR && *pGotRow) { + return parseStbBoundInfo(pStmt, pStbRowsCxt, ppTableDataCxt); + } + if (code != TSDB_CODE_SUCCESS || !*pGotRow) { return code; } @@ -2176,8 +2232,8 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt if (code == TSDB_CODE_SUCCESS) { SStbRowsDataContext* pStbRowsCxt = rowsDataCxt.pStbRowsCxt; void* pData = pTableDataCxt; - code = taosHashPut(pStmt->pTableCxtHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid), &pData, - POINTER_BYTES); + code = taosHashPut(pStmt->pTableCxtHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid), + &pData, POINTER_BYTES); if (TSDB_CODE_SUCCESS != code) { break; } @@ -2249,7 +2305,7 @@ static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifyOpSt if (!pStmt->stbSyntax && numOfRows > 0) { void* pData = rowsDataCxt.pTableDataCxt; code = taosHashPut(pStmt->pTableCxtHashObj, &pStmt->pTableMeta->uid, sizeof(pStmt->pTableMeta->uid), &pData, - POINTER_BYTES); + POINTER_BYTES); } return code; @@ -2363,8 +2419,7 @@ static int32_t constructStbRowsDataContext(SVnodeModifyOpStmt* pStmt, SStbRowsDa if (TSDB_CODE_SUCCESS == code) { // col values and bound cols info of STableDataContext is not used pStbRowsCxt->aColVals = taosArrayInit(getNumOfColumns(pStbRowsCxt->pStbMeta), sizeof(SColVal)); - if (!pStbRowsCxt->aColVals) - code = terrno; + if (!pStbRowsCxt->aColVals) code = terrno; } if (TSDB_CODE_SUCCESS == code) { code = insInitColValues(pStbRowsCxt->pStbMeta, pStbRowsCxt->aColVals); @@ -2422,9 +2477,6 @@ static int32_t parseInsertStbClauseBottom(SInsertParseContext* pCxt, SVnodeModif // 1. [(tag1_name, ...)] ... // 2. VALUES ... | FILE ... static int32_t parseInsertTableClauseBottom(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) { - if (pStmt->stbSyntax && TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { - return buildInvalidOperationMsg(&pCxt->msg, "insert into super table syntax is not supported for stmt"); - } if (!pStmt->stbSyntax) { STableDataCxt* pTableCxt = NULL; int32_t code = parseSchemaClauseBottom(pCxt, pStmt, &pTableCxt); @@ -2511,9 +2563,9 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif } // db.? situation,ensure that the only thing following the '.' mark is '?' - char *tbNameAfterDbName = strnchr(pTbName->z, '.', pTbName->n, true); + char* tbNameAfterDbName = strnchr(pTbName->z, '.', pTbName->n, true); if ((tbNameAfterDbName != NULL) && (*(tbNameAfterDbName + 1) == '?')) { - char *tbName = NULL; + char* tbName = NULL; if (NULL == pCxt->pComCxt->pStmtCb) { return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pTbName->z); } @@ -2528,7 +2580,8 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif if (pCxt->isStmtBind) { if (TK_NK_ID == pTbName->type || (tbNameAfterDbName != NULL && *(tbNameAfterDbName + 1) != '?')) { // In SQL statements, the table name has already been specified. - parserWarn("0x%" PRIx64 " table name is specified in sql, ignore the table name in bind param", pCxt->pComCxt->requestId); + parserWarn("0x%" PRIx64 " table name is specified in sql, ignore the table name in bind param", + pCxt->pComCxt->requestId); } } @@ -2614,7 +2667,7 @@ static void destroySubTableHashElem(void* p) { taosMemoryFree(*(STableMeta**)p); static int32_t createVnodeModifOpStmt(SInsertParseContext* pCxt, bool reentry, SNode** pOutput) { SVnodeModifyOpStmt* pStmt = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pStmt); + int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pStmt); if (NULL == pStmt) { return code; } @@ -2729,7 +2782,7 @@ static int32_t buildTagNameFromMeta(STableMeta* pMeta, SArray** pTagName) { return terrno; } SSchema* pSchema = getTableTagSchema(pMeta); - int32_t code = 0; + int32_t code = 0; for (int32_t i = 0; i < pMeta->tableInfo.numOfTags; ++i) { if (NULL == taosArrayPush(*pTagName, pSchema[i].name)) { code = terrno; @@ -2834,7 +2887,7 @@ static int32_t resetVnodeModifOpStmt(SInsertParseContext* pCxt, SQuery* pQuery) } if (NULL == pStmt->pTableBlockHashObj) { pStmt->pTableBlockHashObj = - taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); } if (NULL == pStmt->pVgroupsHashObj || NULL == pStmt->pTableBlockHashObj) { code = TSDB_CODE_OUT_OF_MEMORY; @@ -2866,7 +2919,7 @@ static int32_t initInsertQuery(SInsertParseContext* pCxt, SCatalogReq* pCatalogR static int32_t setRefreshMeta(SQuery* pQuery) { SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)pQuery->pRoot; - int32_t code = 0; + int32_t code = 0; if (taosHashGetSize(pStmt->pTableNameHashObj) > 0) { taosArrayDestroy(pQuery->pTableList); @@ -3065,9 +3118,10 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal .forceUpdate = (NULL != pCatalogReq ? pCatalogReq->forceUpdate : false), .isStmtBind = pCxt->isStmtBind}; - int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery); + int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery); + SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)((*pQuery)->pRoot); if (TSDB_CODE_SUCCESS == code) { - code = parseInsertSqlImpl(&context, (SVnodeModifyOpStmt*)(*pQuery)->pRoot); + code = parseInsertSqlImpl(&context, pStmt); } if (TSDB_CODE_SUCCESS == code) { code = setNextStageInfo(&context, *pQuery, pCatalogReq); @@ -3076,8 +3130,8 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal QUERY_EXEC_STAGE_SCHEDULE == (*pQuery)->execStage) { code = setRefreshMeta(*pQuery); } - insDestroyBoundColInfo(&context.tags); + insDestroyBoundColInfo(&context.tags); // if no data to insert, set emptyMode to avoid request server if (!context.needRequest) { (*pQuery)->execMode = QUERY_EXEC_MODE_EMPTY_RESULT; diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index ee61611bf27..0979028e6df 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -242,7 +242,7 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch } code = insBuildCreateTbReq(pDataBlock->pData->pCreateTbReq, tName, pTag, suid, sTableName, tagName, - pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); + pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); pTag = NULL; end: @@ -594,7 +594,7 @@ int32_t qBindStmtTagsValue2(void* pBlock, void* boundTags, int64_t suid, const c } code = insBuildCreateTbReq(pDataBlock->pData->pCreateTbReq, tName, pTag, suid, sTableName, tagName, - pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); + pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); pTag = NULL; end: @@ -797,6 +797,10 @@ int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, for (int c = 0; c < boundInfo->numOfBound; ++c) { SSchema* pColSchema = &pSchema[boundInfo->pColIndex[c]]; SColData* pCol = taosArrayGet(pCols, c); + if (pCol == NULL || pColSchema == NULL) { + code = buildInvalidOperationMsg(&pBuf, "get column schema or column data failed"); + goto _return; + } if (bind[c].num != rowNum) { code = buildInvalidOperationMsg(&pBuf, "row number in each bind param should be the same"); @@ -886,7 +890,7 @@ int32_t qBindStmtSingleColValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* b int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD_E** fields, uint8_t timePrec) { - if (fields) { + if (fields != NULL) { *fields = taosMemoryCalloc(numOfBound, sizeof(TAOS_FIELD_E)); if (NULL == *fields) { return terrno; @@ -910,6 +914,44 @@ int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSc return TSDB_CODE_SUCCESS; } +int32_t buildStbBoundFields(SBoundColInfo boundColsInfo, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD_STB** fields, + STableMeta* pMeta) { + if (fields != NULL) { + *fields = taosMemoryCalloc(boundColsInfo.numOfBound, sizeof(TAOS_FIELD_STB)); + if (NULL == *fields) { + return terrno; + } + + SSchema* schema = &pSchema[boundColsInfo.pColIndex[0]]; + if (TSDB_DATA_TYPE_TIMESTAMP == schema->type) { + (*fields)[0].precision = pMeta->tableInfo.precision; + } + + for (int32_t i = 0; i < boundColsInfo.numOfBound; ++i) { + int16_t idx = boundColsInfo.pColIndex[i]; + + if (idx == pMeta->tableInfo.numOfColumns + pMeta->tableInfo.numOfTags) { + (*fields)[i].field_type = TAOS_FIELD_TBNAME; + tstrncpy((*fields)[i].name, "tbname", sizeof((*fields)[i].name)); + continue; + } else if (idx < pMeta->tableInfo.numOfColumns) { + (*fields)[i].field_type = TAOS_FIELD_COL; + } else { + (*fields)[i].field_type = TAOS_FIELD_TAG; + } + + schema = &pSchema[idx]; + tstrncpy((*fields)[i].name, schema->name, sizeof((*fields)[i].name)); + (*fields)[i].type = schema->type; + (*fields)[i].bytes = schema->bytes; + } + } + + *fieldNum = boundColsInfo.numOfBound; + + return TSDB_CODE_SUCCESS; +} + int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields) { STableDataCxt* pDataBlock = (STableDataCxt*)pBlock; SBoundColInfo* tags = (SBoundColInfo*)boundTags; @@ -939,7 +981,7 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fiel SSchema* pSchema = getTableColumnSchema(pDataBlock->pMeta); if (pDataBlock->boundColsInfo.numOfBound <= 0) { *fieldNum = 0; - if (fields) { + if (fields != NULL) { *fields = NULL; } @@ -952,6 +994,23 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fiel return TSDB_CODE_SUCCESS; } +int32_t qBuildStmtStbColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_STB** fields) { + STableDataCxt* pDataBlock = (STableDataCxt*)pBlock; + SSchema* pSchema = getTableColumnSchema(pDataBlock->pMeta); + if (pDataBlock->boundColsInfo.numOfBound <= 0) { + *fieldNum = 0; + if (fields != NULL) { + *fields = NULL; + } + + return TSDB_CODE_SUCCESS; + } + + CHECK_CODE(buildStbBoundFields(pDataBlock->boundColsInfo, pSchema, fieldNum, fields, pDataBlock->pMeta)); + + return TSDB_CODE_SUCCESS; +} + int32_t qResetStmtColumns(SArray* pCols, bool deepClear) { int32_t colNum = taosArrayGetSize(pCols); diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c index 4f9b46176c1..bcb560ab5ef 100644 --- a/source/libs/parser/src/parInsertUtil.c +++ b/source/libs/parser/src/parInsertUtil.c @@ -147,7 +147,7 @@ int16_t insFindCol(SToken* pColname, int16_t start, int16_t end, SSchema* pSchem } int32_t insBuildCreateTbReq(SVCreateTbReq* pTbReq, const char* tname, STag* pTag, int64_t suid, const char* sname, - SArray* tagName, uint8_t tagNum, int32_t ttl) { + SArray* tagName, uint8_t tagNum, int32_t ttl) { pTbReq->type = TD_CHILD_TABLE; pTbReq->ctb.pTag = (uint8_t*)pTag; pTbReq->name = taosStrdup(tname); @@ -174,7 +174,7 @@ static void initBoundCols(int32_t ncols, int16_t* pBoundCols) { static int32_t initColValues(STableMeta* pTableMeta, SArray* pValues) { SSchema* pSchemas = getTableColumnSchema(pTableMeta); - int32_t code = 0; + int32_t code = 0; for (int32_t i = 0; i < pTableMeta->tableInfo.numOfColumns; ++i) { SColVal val = COL_VAL_NONE(pSchemas[i].colId, pSchemas[i].type); if (NULL == taosArrayPush(pValues, &val)) { @@ -886,17 +886,90 @@ static bool findFileds(SSchema* pSchema, TAOS_FIELD* fields, int numFields) { return false; } -int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD* tFields, - int numFields, bool needChangeLength, char* errstr, int32_t errstrLen) { - if(data == NULL) { +int32_t checkSchema(SSchema* pColSchema, int8_t* fields, char* errstr, int32_t errstrLen) { + if (*fields != pColSchema->type) { + if (errstr != NULL) + snprintf(errstr, errstrLen, "column type not equal, name:%s, schema type:%s, data type:%s", pColSchema->name, + tDataTypes[pColSchema->type].name, tDataTypes[*fields].name); + return TSDB_CODE_INVALID_PARA; + } + if (IS_VAR_DATA_TYPE(pColSchema->type) && *(int32_t*)(fields + sizeof(int8_t)) > pColSchema->bytes) { + if (errstr != NULL) + snprintf(errstr, errstrLen, + "column var data bytes error, name:%s, schema type:%s, bytes:%d, data type:%s, bytes:%d", + pColSchema->name, tDataTypes[pColSchema->type].name, pColSchema->bytes, tDataTypes[*fields].name, + *(int32_t*)(fields + sizeof(int8_t))); + return TSDB_CODE_INVALID_PARA; + } + + if (!IS_VAR_DATA_TYPE(pColSchema->type) && *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) { + if (errstr != NULL) + snprintf(errstr, errstrLen, + "column normal data bytes not equal, name:%s, schema type:%s, bytes:%d, data type:%s, bytes:%d", + pColSchema->name, tDataTypes[pColSchema->type].name, pColSchema->bytes, tDataTypes[*fields].name, + *(int32_t*)(fields + sizeof(int8_t))); + return TSDB_CODE_INVALID_PARA; + } + return 0; +} + +#define PRCESS_DATA(i, j) \ + ret = checkSchema(pColSchema, fields, errstr, errstrLen); \ + if (ret != 0) { \ + goto end; \ + } \ + \ + if (pColSchema->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { \ + hasTs = true; \ + } \ + \ + int8_t* offset = pStart; \ + if (IS_VAR_DATA_TYPE(pColSchema->type)) { \ + pStart += numOfRows * sizeof(int32_t); \ + } else { \ + pStart += BitmapLen(numOfRows); \ + } \ + char* pData = pStart; \ + \ + SColData* pCol = taosArrayGet(pTableCxt->pData->aCol, j); \ + ret = tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData); \ + if (ret != 0) { \ + goto end; \ + } \ + fields += sizeof(int8_t) + sizeof(int32_t); \ + if (needChangeLength && version == BLOCK_VERSION_1) { \ + pStart += htonl(colLength[i]); \ + } else { \ + pStart += colLength[i]; \ + } \ + boundInfo->pColIndex[j] = -1; + +int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, void* tFields, + int numFields, bool needChangeLength, char* errstr, int32_t errstrLen, bool raw) { + int ret = 0; + if (data == NULL) { uError("rawBlockBindData, data is NULL"); return TSDB_CODE_APP_ERROR; } void* tmp = taosHashGet(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, sizeof(pTableMeta->uid)); + SVCreateTbReq* pCreateReqTmp = NULL; + if (tmp == NULL && pCreateTb != NULL) { + ret = cloneSVreateTbReq(pCreateTb, &pCreateReqTmp); + if (ret != TSDB_CODE_SUCCESS) { + uError("cloneSVreateTbReq error"); + goto end; + } + } + STableDataCxt* pTableCxt = NULL; - int ret = insGetTableDataCxt(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, - sizeof(pTableMeta->uid), pTableMeta, pCreateTb, &pTableCxt, true, false); + ret = insGetTableDataCxt(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, + sizeof(pTableMeta->uid), pTableMeta, &pCreateReqTmp, &pTableCxt, true, false); + if (pCreateReqTmp != NULL) { + tdDestroySVCreateTbReq(pCreateReqTmp); + taosMemoryFree(pCreateReqTmp); + } + if (ret != TSDB_CODE_SUCCESS) { uError("insGetTableDataCxt error"); goto end; @@ -948,105 +1021,48 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate ret = TSDB_CODE_INVALID_PARA; goto end; } - if (tFields != NULL && numFields > boundInfo->numOfBound) { - if (errstr != NULL) - snprintf(errstr, errstrLen, "numFields:%d bigger than num of bound cols:%d", numFields, boundInfo->numOfBound); - ret = TSDB_CODE_INVALID_PARA; - goto end; - } - if (tFields == NULL) { - for (int j = 0; j < boundInfo->numOfBound; j++) { - SSchema* pColSchema = &pSchema[j]; - SColData* pCol = taosArrayGet(pTableCxt->pData->aCol, j); - if (*fields != pColSchema->type && *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) { - if (errstr != NULL) - snprintf(errstr, errstrLen, - "column type or bytes not equal, name:%s, schema type:%s, bytes:%d, data type:%s, bytes:%d", - pColSchema->name, tDataTypes[pColSchema->type].name, pColSchema->bytes, tDataTypes[*fields].name, - *(int32_t*)(fields + sizeof(int8_t))); - ret = TSDB_CODE_INVALID_PARA; - goto end; - } - int8_t* offset = pStart; - if (IS_VAR_DATA_TYPE(pColSchema->type)) { - pStart += numOfRows * sizeof(int32_t); - } else { - pStart += BitmapLen(numOfRows); - } - char* pData = pStart; - ret = tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData); - if (ret != 0) { - goto end; - } - fields += sizeof(int8_t) + sizeof(int32_t); - if (needChangeLength && version == BLOCK_VERSION_1) { - pStart += htonl(colLength[j]); - } else { - pStart += colLength[j]; - } + bool hasTs = false; + if (tFields == NULL) { + int32_t len = TMIN(numOfCols, boundInfo->numOfBound); + for (int j = 0; j < len; j++) { + SSchema* pColSchema = &pSchema[j]; + PRCESS_DATA(j, j) } } else { - bool hasTs = false; for (int i = 0; i < numFields; i++) { for (int j = 0; j < boundInfo->numOfBound; j++) { SSchema* pColSchema = &pSchema[j]; - if (strcmp(pColSchema->name, tFields[i].name) == 0) { - if (*fields != pColSchema->type && *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) { - if (errstr != NULL) - snprintf(errstr, errstrLen, - "column type or bytes not equal, name:%s, schema type:%s, bytes:%d, data type:%s, bytes:%d", - pColSchema->name, tDataTypes[pColSchema->type].name, pColSchema->bytes, tDataTypes[*fields].name, - *(int32_t*)(fields + sizeof(int8_t))); - ret = TSDB_CODE_INVALID_PARA; - goto end; - } - - if (pColSchema->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { - hasTs = true; - } - - int8_t* offset = pStart; - if (IS_VAR_DATA_TYPE(pColSchema->type)) { - pStart += numOfRows * sizeof(int32_t); - } else { - pStart += BitmapLen(numOfRows); - } - char* pData = pStart; - - SColData* pCol = taosArrayGet(pTableCxt->pData->aCol, j); - ret = tColDataAddValueByDataBlock(pCol, pColSchema->type, pColSchema->bytes, numOfRows, offset, pData); - if (ret != 0) { - goto end; - } - fields += sizeof(int8_t) + sizeof(int32_t); - if (needChangeLength && version == BLOCK_VERSION_1) { - pStart += htonl(colLength[i]); - } else { - pStart += colLength[i]; - } - boundInfo->pColIndex[j] = -1; + char* fieldName = NULL; + if (raw) { + fieldName = ((SSchemaWrapper*)tFields)->pSchema[i].name; + } else { + fieldName = ((TAOS_FIELD*)tFields)[i].name; + } + if (strcmp(pColSchema->name, fieldName) == 0) { + PRCESS_DATA(i, j) break; } } } + } - if (!hasTs) { - if (errstr != NULL) snprintf(errstr, errstrLen, "timestamp column(primary key) not found in raw data"); - ret = TSDB_CODE_INVALID_PARA; - goto end; - } + if (!hasTs) { + if (errstr != NULL) snprintf(errstr, errstrLen, "timestamp column(primary key) not found in raw data"); + ret = TSDB_CODE_INVALID_PARA; + goto end; + } - for (int c = 0; c < boundInfo->numOfBound; ++c) { - if (boundInfo->pColIndex[c] != -1) { - SColData* pCol = taosArrayGet(pTableCxt->pData->aCol, c); - ret = tColDataAddValueByDataBlock(pCol, 0, 0, numOfRows, NULL, NULL); - if (ret != 0) { - goto end; - } - } else { - boundInfo->pColIndex[c] = c; // restore for next block + // process NULL data + for (int c = 0; c < boundInfo->numOfBound; ++c) { + if (boundInfo->pColIndex[c] != -1) { + SColData* pCol = taosArrayGet(pTableCxt->pData->aCol, c); + ret = tColDataAddValueByDataBlock(pCol, 0, 0, numOfRows, NULL, NULL); + if (ret != 0) { + goto end; } + } else { + boundInfo->pColIndex[c] = c; // restore for next block } } diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c index 63121ec044c..1db139b8d45 100644 --- a/source/libs/parser/src/parTokenizer.c +++ b/source/libs/parser/src/parTokenizer.c @@ -340,13 +340,14 @@ static SKeyword keywordTable[] = { {"_FROWTS", TK_FROWTS}, {"ALIVE", TK_ALIVE}, {"VARBINARY", TK_VARBINARY}, - {"S3_CHUNKSIZE", TK_S3_CHUNKSIZE}, + {"S3_CHUNKPAGES", TK_S3_CHUNKPAGES}, {"S3_KEEPLOCAL", TK_S3_KEEPLOCAL}, {"S3_COMPACT", TK_S3_COMPACT}, {"S3MIGRATE", TK_S3MIGRATE}, {"KEEP_TIME_OFFSET", TK_KEEP_TIME_OFFSET}, {"ARBGROUPS", TK_ARBGROUPS}, {"IS_IMPORT", TK_IS_IMPORT}, + {"FORCE_WINDOW_CLOSE", TK_FORCE_WINDOW_CLOSE}, }; // clang-format on @@ -370,7 +371,7 @@ static int32_t doInitKeywordsTable(void) { keywordHashTable = taosHashInit(numOfEntries, MurmurHash3_32, true, false); for (int32_t i = 0; i < numOfEntries; i++) { keywordTable[i].len = (uint8_t)strlen(keywordTable[i].name); - void* ptr = &keywordTable[i]; + void* ptr = &keywordTable[i]; int32_t code = taosHashPut(keywordHashTable, keywordTable[i].name, keywordTable[i].len, (void*)&ptr, POINTER_BYTES); if (TSDB_CODE_SUCCESS != code) { taosHashCleanup(keywordHashTable); @@ -698,7 +699,7 @@ uint32_t tGetToken(const char* z, uint32_t* tokenId) { } } if (hasNonAsciiChars) { - *tokenId = TK_NK_ALIAS; // must be alias + *tokenId = TK_NK_ALIAS; // must be alias return i; } if (IS_TRUE_STR(z, i) || IS_FALSE_STR(z, i)) { @@ -713,10 +714,10 @@ uint32_t tGetToken(const char* z, uint32_t* tokenId) { break; } bool hasNonAsciiChars = false; - for (i = 1; ; i++) { + for (i = 1;; i++) { if ((z[i] & 0x80) != 0) { hasNonAsciiChars = true; - } else if (isIdChar[(uint8_t)z[i]]){ + } else if (isIdChar[(uint8_t)z[i]]) { } else { break; } @@ -834,9 +835,7 @@ SToken tStrGetToken(const char* str, int32_t* i, bool isPrevOptr, bool* pIgnoreC bool taosIsKeyWordToken(const char* z, int32_t len) { return (tKeywordCode((char*)z, len) != TK_NK_ID); } -int32_t taosInitKeywordsTable() { - return doInitKeywordsTable(); -} +int32_t taosInitKeywordsTable() { return doInitKeywordsTable(); } void taosCleanupKeywordsTable() { void* m = keywordHashTable; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 4e4a217da60..14c72e04ba9 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -24,7 +24,7 @@ #include "parUtil.h" #include "scalar.h" #include "systable.h" -#include "tanal.h" +#include "tanalytics.h" #include "tcol.h" #include "tglobal.h" #include "ttime.h" @@ -34,19 +34,19 @@ #define SYSTABLE_SHOW_TYPE_OFFSET QUERY_NODE_SHOW_DNODES_STMT -#define CHECK_RES_OUT_OF_MEM(p) \ - do { \ - int32_t code = (p); \ - if (TSDB_CODE_SUCCESS != code) { \ - return code; \ - } \ +#define CHECK_RES_OUT_OF_MEM(p) \ + do { \ + int32_t code = (p); \ + if (TSDB_CODE_SUCCESS != code) { \ + return code; \ + } \ } while (0) -#define CHECK_POINTER_OUT_OF_MEM(p) \ - do { \ - if (NULL == (p)) { \ - return code; \ - } \ +#define CHECK_POINTER_OUT_OF_MEM(p) \ + do { \ + if (NULL == (p)) { \ + return code; \ + } \ } while (0) typedef struct SRewriteTbNameContext { @@ -458,7 +458,7 @@ static int32_t collectUseDatabase(const SName* pName, SHashObj* pDbs) { } static int32_t collectUseTable(const SName* pName, SHashObj* pTable) { - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pName, fullName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -709,7 +709,7 @@ static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArr } static int32_t getDBVgInfo(STranslateContext* pCxt, const char* pDbName, SArray** pVgInfo) { - SName name; + SName name; int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pDbName, strlen(pDbName)); if (TSDB_CODE_SUCCESS != code) return code; char dbFname[TSDB_DB_FNAME_LEN] = {0}; @@ -725,7 +725,7 @@ static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pNam } if (TSDB_CODE_SUCCESS == code) { if (pParCxt->async) { - if(pCxt->withOpt) { + if (pCxt->withOpt) { code = getDbTableVgroupFromCache(pCxt->pMetaCache, pName, pInfo); } else { code = getTableVgroupFromCache(pCxt->pMetaCache, pName, pInfo); @@ -777,7 +777,7 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo SParseContext* pParCxt = pCxt->pParseCxt; SName name; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pDbName, strlen(pDbName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pDbName, strlen(pDbName)); if (TSDB_CODE_SUCCESS != code) return code; char dbFname[TSDB_DB_FNAME_LEN] = {0}; (void)tNameGetFullDbName(&name, dbFname); @@ -1019,8 +1019,7 @@ static uint8_t getPrecisionFromCurrStmt(SNode* pCurrStmt, uint8_t defaultVal) { if (isDeleteStmt(pCurrStmt)) { return ((SDeleteStmt*)pCurrStmt)->precision; } - if (pCurrStmt && nodeType(pCurrStmt) == QUERY_NODE_CREATE_TSMA_STMT) - return ((SCreateTSMAStmt*)pCurrStmt)->precision; + if (pCurrStmt && nodeType(pCurrStmt) == QUERY_NODE_CREATE_TSMA_STMT) return ((SCreateTSMAStmt*)pCurrStmt)->precision; return defaultVal; } @@ -1168,7 +1167,7 @@ static bool isBlockTimeLineAlignedQuery(SNode* pStmt) { return false; } -int32_t buildPartitionListFromOrderList(SNodeList* pOrderList, int32_t nodesNum, SNodeList**ppOut) { +int32_t buildPartitionListFromOrderList(SNodeList* pOrderList, int32_t nodesNum, SNodeList** ppOut) { *ppOut = NULL; SNodeList* pPartitionList = NULL; SNode* pNode = NULL; @@ -1194,8 +1193,7 @@ int32_t buildPartitionListFromOrderList(SNodeList* pOrderList, int32_t nodesNum, break; } } - if(TSDB_CODE_SUCCESS == code) - *ppOut = pPartitionList; + if (TSDB_CODE_SUCCESS == code) *ppOut = pPartitionList; return code; } @@ -1229,7 +1227,8 @@ static int32_t isTimeLineAlignedQuery(SNode* pStmt, bool* pRes) { } } } - if (TSDB_CODE_SUCCESS == code && QUERY_NODE_SET_OPERATOR == nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { + if (TSDB_CODE_SUCCESS == code && + QUERY_NODE_SET_OPERATOR == nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { SSetOperator* pSub = (SSetOperator*)((STempTableNode*)pSelect->pFromTable)->pSubquery; if (pSelect->pPartitionByList && pSub->timeLineFromOrderBy && pSub->pOrderByList->length > 1) { SNodeList* pPartitionList = NULL; @@ -1397,12 +1396,16 @@ static void setColumnPrimTs(STranslateContext* pCxt, SColumnNode* pCol, const ST } } -static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* pTable, bool igTags, SNodeList* pList, bool skipProjRef) { +static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* pTable, bool igTags, SNodeList* pList, + bool skipProjRef) { int32_t code = 0; if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) { const STableMeta* pMeta = ((SRealTableNode*)pTable)->pMeta; int32_t nums = pMeta->tableInfo.numOfColumns + - (igTags ? 0 : ((TSDB_SUPER_TABLE == pMeta->tableType || ((SRealTableNode*)pTable)->stbRewrite) ? pMeta->tableInfo.numOfTags : 0)); + (igTags ? 0 + : ((TSDB_SUPER_TABLE == pMeta->tableType || ((SRealTableNode*)pTable)->stbRewrite) + ? pMeta->tableInfo.numOfTags + : 0)); for (int32_t i = 0; i < nums; ++i) { if (invisibleColumn(pCxt->pParseCxt->enableSysInfo, pMeta->tableType, pMeta->schema[i].flags)) { pCxt->pParseCxt->hasInvisibleCol = true; @@ -1433,7 +1436,8 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p code = setColumnInfoByExpr(pTempTable, (SExprNode*)pNode, (SColumnNode**)&pCell->pNode); } if (TSDB_CODE_SUCCESS == code) { - if (!skipProjRef) pCol->projRefIdx = ((SExprNode*)pNode)->projIdx; // only set proj ref when select * from (select ...) + if (!skipProjRef) + pCol->projRefIdx = ((SExprNode*)pNode)->projIdx; // only set proj ref when select * from (select ...) } else { break; } @@ -1603,26 +1607,7 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p } } if (*pFound) { - if (QUERY_NODE_FUNCTION == nodeType(pFoundNode) && (SQL_CLAUSE_GROUP_BY == pCxt->currClause || SQL_CLAUSE_PARTITION_BY == pCxt->currClause)) { - pCxt->errCode = getFuncInfo(pCxt, (SFunctionNode*)pFoundNode); - if (TSDB_CODE_SUCCESS == pCxt->errCode) { - if (fmIsVectorFunc(((SFunctionNode*)pFoundNode)->funcId)) { - pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION, (*pCol)->colName); - return DEAL_RES_ERROR; - } else if (fmIsPseudoColumnFunc(((SFunctionNode*)pFoundNode)->funcId)) { - if ('\0' != (*pCol)->tableAlias[0]) { - return translateColumnWithPrefix(pCxt, pCol); - } else { - return translateColumnWithoutPrefix(pCxt, pCol); - } - } else { - /* Do nothing and replace old node with found node. */ - } - } else { - return DEAL_RES_ERROR; - } - } - SNode* pNew = NULL; + SNode* pNew = NULL; int32_t code = nodesCloneNode(pFoundNode, &pNew); if (NULL == pNew) { pCxt->errCode = code; @@ -1630,14 +1615,6 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p } nodesDestroyNode(*(SNode**)pCol); *(SNode**)pCol = (SNode*)pNew; - if (QUERY_NODE_COLUMN == nodeType(pFoundNode)) { - pCxt->errCode = TSDB_CODE_SUCCESS; - if ('\0' != (*pCol)->tableAlias[0]) { - return translateColumnWithPrefix(pCxt, pCol); - } else { - return translateColumnWithoutPrefix(pCxt, pCol); - } - } } return DEAL_RES_CONTINUE; } @@ -1687,7 +1664,7 @@ static int32_t biMakeTbnameProjectAstNode(char* funcName, char* tableAlias, SNod } if (TSDB_CODE_SUCCESS == code) { snprintf(tbNameFunc->node.userAlias, sizeof(tbNameFunc->node.userAlias), (tableAlias) ? "%s.tbname" : "%stbname", - (tableAlias) ? tableAlias : ""); + (tableAlias) ? tableAlias : ""); strncpy(tbNameFunc->node.aliasName, tbNameFunc->functionName, TSDB_COL_NAME_LEN); if (funcName == NULL) { *pOutNode = (SNode*)tbNameFunc; @@ -1705,13 +1682,13 @@ static int32_t biMakeTbnameProjectAstNode(char* funcName, char* tableAlias, SNod if (TSDB_CODE_SUCCESS == code) { if (tsKeepColumnName) { snprintf(multiResFunc->node.userAlias, sizeof(tbNameFunc->node.userAlias), - (tableAlias) ? "%s.tbname" : "%stbname", (tableAlias) ? tableAlias : ""); + (tableAlias) ? "%s.tbname" : "%stbname", (tableAlias) ? tableAlias : ""); strcpy(multiResFunc->node.aliasName, tbNameFunc->functionName); } else { snprintf(multiResFunc->node.userAlias, sizeof(multiResFunc->node.userAlias), - tableAlias ? "%s(%s.tbname)" : "%s(%stbname)", funcName, tableAlias ? tableAlias : ""); + tableAlias ? "%s(%s.tbname)" : "%s(%stbname)", funcName, tableAlias ? tableAlias : ""); biMakeAliasNameInMD5(multiResFunc->node.userAlias, strlen(multiResFunc->node.userAlias), - multiResFunc->node.aliasName); + multiResFunc->node.aliasName); } *pOutNode = (SNode*)multiResFunc; } else { @@ -1726,7 +1703,7 @@ static int32_t biMakeTbnameProjectAstNode(char* funcName, char* tableAlias, SNod static int32_t biRewriteSelectFuncParamStar(STranslateContext* pCxt, SSelectStmt* pSelect, SNode* pNode, SListCell* pSelectListCell) { SNodeList* pTbnameNodeList = NULL; - int32_t code = nodesMakeList(&pTbnameNodeList); + int32_t code = nodesMakeList(&pTbnameNodeList); if (!pTbnameNodeList) return code; SFunctionNode* pFunc = (SFunctionNode*)pNode; @@ -1744,8 +1721,7 @@ static int32_t biRewriteSelectFuncParamStar(STranslateContext* pCxt, SSelectStmt ((SRealTableNode*)pTable)->pMeta->tableType == TSDB_SUPER_TABLE) { SNode* pTbnameNode = NULL; code = biMakeTbnameProjectAstNode(pFunc->functionName, NULL, &pTbnameNode); - if (TSDB_CODE_SUCCESS == code) - code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); + if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); } } if (TSDB_CODE_SUCCESS == code && LIST_LENGTH(pTbnameNodeList) > 0) { @@ -1761,8 +1737,7 @@ static int32_t biRewriteSelectFuncParamStar(STranslateContext* pCxt, SSelectStmt ((SRealTableNode*)pTable)->pMeta->tableType == TSDB_SUPER_TABLE) { SNode* pTbnameNode = NULL; code = biMakeTbnameProjectAstNode(pFunc->functionName, pTableAlias, &pTbnameNode); - if (TSDB_CODE_SUCCESS == code) - code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); + if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); } if (TSDB_CODE_SUCCESS == code && LIST_LENGTH(pTbnameNodeList) > 0) { nodesListInsertListAfterPos(pSelect->pProjectionList, pSelectListCell, pTbnameNodeList); @@ -1794,8 +1769,7 @@ int32_t biRewriteSelectStar(STranslateContext* pCxt, SSelectStmt* pSelect) { ((SRealTableNode*)pTable)->pMeta->tableType == TSDB_SUPER_TABLE) { SNode* pTbnameNode = NULL; code = biMakeTbnameProjectAstNode(NULL, NULL, &pTbnameNode); - if (TSDB_CODE_SUCCESS == code) - code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); + if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); } } if (TSDB_CODE_SUCCESS == code && LIST_LENGTH(pTbnameNodeList) > 0) { @@ -1810,8 +1784,7 @@ int32_t biRewriteSelectStar(STranslateContext* pCxt, SSelectStmt* pSelect) { ((SRealTableNode*)pTable)->pMeta != NULL && ((SRealTableNode*)pTable)->pMeta->tableType == TSDB_SUPER_TABLE) { SNode* pTbnameNode = NULL; code = biMakeTbnameProjectAstNode(NULL, pTableAlias, &pTbnameNode); - if (TSDB_CODE_SUCCESS ==code) - code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); + if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); } if (TSDB_CODE_SUCCESS == code && LIST_LENGTH(pTbnameNodeList) > 0) { nodesListInsertListAfterPos(pSelect->pProjectionList, cell, pTbnameNodeList); @@ -1877,9 +1850,40 @@ int32_t biCheckCreateTableTbnameCol(STranslateContext* pCxt, SCreateTableStmt* p } static bool clauseSupportAlias(ESqlClause clause) { - return SQL_CLAUSE_GROUP_BY == clause || - SQL_CLAUSE_PARTITION_BY == clause || - SQL_CLAUSE_ORDER_BY == clause; + return SQL_CLAUSE_GROUP_BY == clause || SQL_CLAUSE_PARTITION_BY == clause || SQL_CLAUSE_ORDER_BY == clause; +} + +static EDealRes translateColumnInGroupByClause(STranslateContext* pCxt, SColumnNode** pCol, bool *translateAsAlias) { + *translateAsAlias = false; + // count(*)/first(*)/last(*) and so on + if (0 == strcmp((*pCol)->colName, "*")) { + return DEAL_RES_CONTINUE; + } + + if (pCxt->pParseCxt->biMode) { + SNode** ppNode = (SNode**)pCol; + bool ret; + pCxt->errCode = biRewriteToTbnameFunc(pCxt, ppNode, &ret); + if (TSDB_CODE_SUCCESS != pCxt->errCode) return DEAL_RES_ERROR; + if (ret) { + return translateFunction(pCxt, (SFunctionNode**)ppNode); + } + } + + EDealRes res = DEAL_RES_CONTINUE; + if ('\0' != (*pCol)->tableAlias[0]) { + res = translateColumnWithPrefix(pCxt, pCol); + } else { + bool found = false; + res = translateColumnWithoutPrefix(pCxt, pCol); + if (!(*pCol)->node.asParam && + res != DEAL_RES_CONTINUE && + res != DEAL_RES_END && pCxt->errCode != TSDB_CODE_PAR_AMBIGUOUS_COLUMN) { + res = translateColumnUseAlias(pCxt, pCol, &found); + *translateAsAlias = true; + } + } + return res; } static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { @@ -1895,7 +1899,7 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { if (pCxt->pParseCxt->biMode) { SNode** ppNode = (SNode**)pCol; - bool ret; + bool ret; pCxt->errCode = biRewriteToTbnameFunc(pCxt, ppNode, &ret); if (TSDB_CODE_SUCCESS != pCxt->errCode) return DEAL_RES_ERROR; if (ret) { @@ -1908,8 +1912,7 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { res = translateColumnWithPrefix(pCxt, pCol); } else { bool found = false; - if ((pCxt->currClause == SQL_CLAUSE_ORDER_BY) && - !(*pCol)->node.asParam) { + if ((pCxt->currClause == SQL_CLAUSE_ORDER_BY) && !(*pCol)->node.asParam) { res = translateColumnUseAlias(pCxt, pCol, &found); } if (DEAL_RES_ERROR != res && !found) { @@ -1919,9 +1922,7 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { res = translateColumnWithoutPrefix(pCxt, pCol); } } - if (clauseSupportAlias(pCxt->currClause) && - !(*pCol)->node.asParam && - res != DEAL_RES_CONTINUE && + if (clauseSupportAlias(pCxt->currClause) && !(*pCol)->node.asParam && res != DEAL_RES_CONTINUE && res != DEAL_RES_END) { res = translateColumnUseAlias(pCxt, pCol, &found); } @@ -2478,8 +2479,8 @@ static int32_t translateIndefiniteRowsFunc(STranslateContext* pCxt, SFunctionNod return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } if (pSelect->hasIndefiniteRowsFunc && - (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc)) && - (pSelect->lastProcessByRowFuncId == -1 || !fmIsProcessByRowFunc(pFunc->funcId))) { + (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc)) && + (pSelect->lastProcessByRowFuncId == -1 || !fmIsProcessByRowFunc(pFunc->funcId))) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } if (pSelect->lastProcessByRowFuncId != -1 && pSelect->lastProcessByRowFuncId != pFunc->funcId) { @@ -2651,14 +2652,14 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu "%s function must be used in select statements", pFunc->functionName); } SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; - bool isTimelineAlignedQuery = false; + bool isTimelineAlignedQuery = false; if ((NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery))) { int32_t code = isTimeLineAlignedQuery(pCxt->pCurrStmt, &isTimelineAlignedQuery); if (TSDB_CODE_SUCCESS != code) return code; if (!isTimelineAlignedQuery) return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s function requires valid time series input", pFunc->functionName); + "%s function requires valid time series input", pFunc->functionName); } if (NULL != pSelect->pFromTable && QUERY_NODE_JOIN_TABLE == nodeType(pSelect->pFromTable) && (TIME_LINE_GLOBAL != pSelect->timeLineCurMode && TIME_LINE_MULTI != pSelect->timeLineCurMode)) { @@ -2750,9 +2751,10 @@ static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* p } if (NULL != pSelect->pWindow) { - if (QUERY_NODE_EVENT_WINDOW == nodeType(pSelect->pWindow) || QUERY_NODE_COUNT_WINDOW == nodeType(pSelect->pWindow)) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s function is not supported in count/event window", pFunc->functionName); + if (QUERY_NODE_EVENT_WINDOW == nodeType(pSelect->pWindow) || + QUERY_NODE_COUNT_WINDOW == nodeType(pSelect->pWindow)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, + "%s function is not supported in count/event window", pFunc->functionName); } } return TSDB_CODE_SUCCESS; @@ -2844,6 +2846,9 @@ static void setFuncClassification(STranslateContext* pCxt, SFunctionNode* pFunc) pSelect->hasUniqueFunc = pSelect->hasUniqueFunc ? true : (FUNCTION_TYPE_UNIQUE == pFunc->funcType); pSelect->hasTailFunc = pSelect->hasTailFunc ? true : (FUNCTION_TYPE_TAIL == pFunc->funcType); pSelect->hasInterpFunc = pSelect->hasInterpFunc ? true : (FUNCTION_TYPE_INTERP == pFunc->funcType); + pSelect->hasTwaOrElapsedFunc = pSelect->hasTwaOrElapsedFunc ? true + : (FUNCTION_TYPE_TWA == pFunc->funcType || + FUNCTION_TYPE_ELAPSED == pFunc->funcType); pSelect->hasInterpPseudoColFunc = pSelect->hasInterpPseudoColFunc ? true : fmIsInterpPseudoColumnFunc(pFunc->funcId); pSelect->hasForecastFunc = pSelect->hasForecastFunc ? true : (FUNCTION_TYPE_FORECAST == pFunc->funcType); @@ -2861,7 +2866,7 @@ static void setFuncClassification(STranslateContext* pCxt, SFunctionNode* pFunc) static int32_t rewriteFuncToValue(STranslateContext* pCxt, char** pLiteral, SNode** pNode) { SValueNode* pVal = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -2900,7 +2905,7 @@ static int32_t rewriteDatabaseFunc(STranslateContext* pCxt, SNode** pNode) { } static int32_t rewriteClentVersionFunc(STranslateContext* pCxt, SNode** pNode) { - char* pVer = taosStrdup((void*)version); + char* pVer = taosStrdup((void*)td_version); if (NULL == pVer) { return terrno; } @@ -2970,7 +2975,7 @@ static int32_t rewriteSystemInfoFunc(STranslateContext* pCxt, SNode** pNode) { static int32_t replacePsedudoColumnFuncWithColumn(STranslateContext* pCxt, SNode** ppNode) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -3222,8 +3227,7 @@ static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode** pFunc pCxt->errCode = getFuncInfo(pCxt, *pFunc); if (TSDB_CODE_SUCCESS == pCxt->errCode) { - if ((SQL_CLAUSE_GROUP_BY == pCxt->currClause || - SQL_CLAUSE_PARTITION_BY == pCxt->currClause) && + if ((SQL_CLAUSE_GROUP_BY == pCxt->currClause || SQL_CLAUSE_PARTITION_BY == pCxt->currClause) && fmIsVectorFunc((*pFunc)->funcId)) { pCxt->errCode = TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION; } @@ -3246,7 +3250,7 @@ static EDealRes translateLogicCond(STranslateContext* pCxt, SLogicConditionNode* static int32_t createCastFunc(STranslateContext* pCxt, SNode* pExpr, SDataType dt, SNode** pCast) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -3281,7 +3285,7 @@ static bool isCondition(const SNode* pNode) { static int32_t rewriteIsTrue(SNode* pSrc, SNode** pIsTrue) { SOperatorNode* pOp = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOp); + int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOp); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -3293,11 +3297,11 @@ static int32_t rewriteIsTrue(SNode* pSrc, SNode** pIsTrue) { return TSDB_CODE_SUCCESS; } -extern int8_t gDisplyTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX]; +extern int8_t gDisplyTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX]; static int32_t selectCommonType(SDataType* commonType, const SDataType* newType) { - if (commonType->type < TSDB_DATA_TYPE_NULL || commonType->type >= TSDB_DATA_TYPE_MAX || + if (commonType->type < TSDB_DATA_TYPE_NULL || commonType->type >= TSDB_DATA_TYPE_MAX || newType->type < TSDB_DATA_TYPE_NULL || newType->type >= TSDB_DATA_TYPE_MAX) { - return TSDB_CODE_INVALID_PARA; + return TSDB_CODE_INVALID_PARA; } int8_t type1 = commonType->type; int8_t type2 = newType->type; @@ -3307,27 +3311,27 @@ static int32_t selectCommonType(SDataType* commonType, const SDataType* newType) } else { resultType = gDisplyTypes[type2][type1]; } + if (resultType == -1) { - return TSDB_CODE_SCALAR_CONVERT_ERROR; + return TSDB_CODE_SCALAR_CONVERT_ERROR; } + if (commonType->type == newType->type) { commonType->bytes = TMAX(commonType->bytes, newType->bytes); return TSDB_CODE_SUCCESS; } - if (resultType == commonType->type){ - return TSDB_CODE_SUCCESS; - } - if(resultType == newType->type) { - *commonType = *newType; - return TSDB_CODE_SUCCESS; - } - commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), TYPE_BYTES[resultType]); - if(resultType == TSDB_DATA_TYPE_VARCHAR && (IS_FLOAT_TYPE(commonType->type) || IS_FLOAT_TYPE(newType->type))) { - commonType->bytes += TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE]; + + if ((resultType == TSDB_DATA_TYPE_VARCHAR) && (IS_MATHABLE_TYPE(commonType->type) || IS_MATHABLE_TYPE(newType->type))) { + commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), QUERY_NUMBER_MAX_DISPLAY_LEN); + } else if ((resultType == TSDB_DATA_TYPE_NCHAR) && (IS_MATHABLE_TYPE(commonType->type) || IS_MATHABLE_TYPE(newType->type))) { + commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), QUERY_NUMBER_MAX_DISPLAY_LEN * TSDB_NCHAR_SIZE); + } else { + commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), TYPE_BYTES[resultType]); } + commonType->type = resultType; + return TSDB_CODE_SUCCESS; - } static EDealRes translateCaseWhen(STranslateContext* pCxt, SCaseWhenNode* pCaseWhen) { @@ -3350,7 +3354,7 @@ static EDealRes translateCaseWhen(STranslateContext* pCxt, SCaseWhenNode* pCaseW } allNullThen = false; pCxt->errCode = selectCommonType(&pCaseWhen->node.resType, &pThenExpr->resType); - if(TSDB_CODE_SUCCESS != pCxt->errCode){ + if (TSDB_CODE_SUCCESS != pCxt->errCode) { return DEAL_RES_ERROR; } } @@ -3358,7 +3362,7 @@ static EDealRes translateCaseWhen(STranslateContext* pCxt, SCaseWhenNode* pCaseW SExprNode* pElseExpr = (SExprNode*)pCaseWhen->pElse; if (NULL != pElseExpr) { pCxt->errCode = selectCommonType(&pCaseWhen->node.resType, &pElseExpr->resType); - if(TSDB_CODE_SUCCESS != pCxt->errCode) { + if (TSDB_CODE_SUCCESS != pCxt->errCode) { return DEAL_RES_ERROR; } } @@ -3459,7 +3463,7 @@ static int32_t getGroupByErrorCode(STranslateContext* pCxt) { static EDealRes rewriteColToSelectValFunc(STranslateContext* pCxt, SNode** pNode) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (TSDB_CODE_SUCCESS != code) { pCxt->errCode = code; return DEAL_RES_ERROR; @@ -3482,7 +3486,7 @@ static EDealRes rewriteColToSelectValFunc(STranslateContext* pCxt, SNode** pNode static EDealRes rewriteExprToGroupKeyFunc(STranslateContext* pCxt, SNode** pNode) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (TSDB_CODE_SUCCESS != code) { pCxt->errCode = code; return DEAL_RES_ERROR; @@ -3757,7 +3761,7 @@ static EDealRes doCheckExprForGroupBy(SNode** pNode, void* pContext) { bool partionByTbname = hasTbnameFunction(pSelect->pPartitionByList); FOREACH(pPartKey, pSelect->pPartitionByList) { if (nodesEqualNode(pPartKey, *pNode)) { - return pCxt->currClause == SQL_CLAUSE_HAVING ? DEAL_RES_IGNORE_CHILD : rewriteExprToGroupKeyFunc(pCxt, pNode); + return pSelect->hasAggFuncs ? rewriteExprToGroupKeyFunc(pCxt, pNode) : DEAL_RES_IGNORE_CHILD; } if ((partionByTbname) && QUERY_NODE_COLUMN == nodeType(*pNode) && ((SColumnNode*)*pNode)->colType == COLUMN_TYPE_TAG) { @@ -3866,7 +3870,7 @@ static EDealRes doCheckAggColCoexist(SNode** pNode, void* pContext) { static EDealRes doCheckGetAggColCoexist(SNode** pNode, void* pContext) { CheckAggColCoexistCxt* pCxt = (CheckAggColCoexistCxt*)pContext; - int32_t code = 0; + int32_t code = 0; if (isVectorFunc(*pNode)) { return DEAL_RES_IGNORE_CHILD; } @@ -3893,7 +3897,7 @@ static int32_t resetSelectFuncNumWithoutDup(SSelectStmt* pSelect) { pSelect->selectFuncNum = 0; pSelect->lastProcessByRowFuncId = -1; SNodeList* pNodeList = NULL; - int32_t code = nodesMakeList(&pNodeList); + int32_t code = nodesMakeList(&pNodeList); if (TSDB_CODE_SUCCESS != code) return code; code = nodesCollectSelectFuncs(pSelect, SQL_CLAUSE_FROM, NULL, fmIsSelectFunc, pNodeList); if (TSDB_CODE_SUCCESS != code) { @@ -4278,8 +4282,8 @@ static int32_t setTableTsmas(STranslateContext* pCxt, SName* pName, SRealTableNo SVgroupInfo vgInfo = {0}; bool exists = false; toName(pCxt->pParseCxt->acctId, pRealTable->table.dbName, "", &tsmaTargetTbName); - int32_t len = tsnprintf(buf, TSDB_TABLE_FNAME_LEN + TSDB_TABLE_NAME_LEN, "%s.%s_%s", pTsma->dbFName, pTsma->name, - pRealTable->table.tableName); + int32_t len = tsnprintf(buf, TSDB_TABLE_FNAME_LEN + TSDB_TABLE_NAME_LEN, "%s.%s_%s", pTsma->dbFName, + pTsma->name, pRealTable->table.tableName); len = taosCreateMD5Hash(buf, len); strncpy(tsmaTargetTbName.tname, buf, MD5_OUTPUT_LEN); code = collectUseTable(&tsmaTargetTbName, pCxt->pTargetTables); @@ -4364,7 +4368,7 @@ static EDealRes doTranslateTbName(SNode** pNode, void* pContext) { if (FUNCTION_TYPE_TBNAME == pFunc->funcType) { SRewriteTbNameContext* pCxt = (SRewriteTbNameContext*)pContext; SValueNode* pVal = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); if (TSDB_CODE_SUCCESS != code) { pCxt->errCode = code; return DEAL_RES_ERROR; @@ -4957,8 +4961,7 @@ int32_t translateTable(STranslateContext* pCxt, SNode** pTable, SNode* pJoinPare } code = translateAudit(pCxt, pRealTable, &name); #endif - if (TSDB_CODE_SUCCESS == code) - code = setTableVgroupList(pCxt, &name, pRealTable); + if (TSDB_CODE_SUCCESS == code) code = setTableVgroupList(pCxt, &name, pRealTable); if (TSDB_CODE_SUCCESS == code) { code = setTableIndex(pCxt, &name, pRealTable); } @@ -5063,7 +5066,7 @@ static int32_t createAllColumns(STranslateContext* pCxt, bool igTags, SNodeList* static int32_t createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr, SNode** ppNodeOut) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -5153,12 +5156,12 @@ static int32_t createMultiResFuncsParas(STranslateContext* pCxt, SNodeList* pSrc static int32_t createMultiResFuncs(SFunctionNode* pSrcFunc, SNodeList* pExprs, SNodeList** pOutput) { SNodeList* pFuncs = NULL; - int32_t code = nodesMakeList(&pFuncs); + int32_t code = nodesMakeList(&pFuncs); if (NULL == pFuncs) { return code; } - SNode* pExpr = NULL; + SNode* pExpr = NULL; FOREACH(pExpr, pExprs) { SNode* pNode = NULL; code = createMultiResFunc(pSrcFunc, (SExprNode*)pExpr, &pNode); @@ -5205,7 +5208,7 @@ static int32_t createTags(STranslateContext* pCxt, SNodeList** pOutput) { SSchema* pTagsSchema = getTableTagSchema(pMeta); for (int32_t i = 0; i < pMeta->tableInfo.numOfTags; ++i) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (TSDB_CODE_SUCCESS != code) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY); } @@ -5291,7 +5294,7 @@ static int32_t getPositionValue(const SValueNode* pVal) { } static int32_t translateClausePosition(STranslateContext* pCxt, SNodeList* pProjectionList, SNodeList* pClauseList, - bool* pOther) { + bool* pOther) { *pOther = false; SNode* pNode = NULL; WHERE_EACH(pNode, pClauseList) { @@ -5350,8 +5353,8 @@ static int32_t translateOrderBy(STranslateContext* pCxt, SSelectStmt* pSelect) { } static EDealRes needFillImpl(SNode* pNode, void* pContext) { - if ((isAggFunc(pNode) || isInterpFunc(pNode)) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType - && FUNCTION_TYPE_GROUP_CONST_VALUE != ((SFunctionNode*)pNode)->funcType) { + if ((isAggFunc(pNode) || isInterpFunc(pNode)) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType && + FUNCTION_TYPE_GROUP_CONST_VALUE != ((SFunctionNode*)pNode)->funcType) { *(bool*)pContext = true; return DEAL_RES_END; } @@ -5435,8 +5438,8 @@ static int32_t checkProjectAlias(STranslateContext* pCxt, SNodeList* pProjection SHashObj* pUserAliasSet = taosHashInit(LIST_LENGTH(pProjectionList), taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); if (!pUserAliasSet) return terrno; - SNode* pProject = NULL; - int32_t code = TSDB_CODE_SUCCESS; + SNode* pProject = NULL; + int32_t code = TSDB_CODE_SUCCESS; FOREACH(pProject, pProjectionList) { SExprNode* pExpr = (SExprNode*)pProject; if (NULL != taosHashGet(pUserAliasSet, pExpr->userAlias, strlen(pExpr->userAlias))) { @@ -5458,11 +5461,9 @@ static int32_t translateProjectionList(STranslateContext* pCxt, SSelectStmt* pSe if (!pSelect->isSubquery) { return rewriteProjectAlias(pSelect->pProjectionList); } else { - SNode* pNode; + SNode* pNode; int32_t projIdx = 1; - FOREACH(pNode, pSelect->pProjectionList) { - ((SExprNode*)pNode)->projIdx = projIdx++; - } + FOREACH(pNode, pSelect->pProjectionList) { ((SExprNode*)pNode)->projIdx = projIdx++; } return TSDB_CODE_SUCCESS; } } @@ -5472,12 +5473,13 @@ typedef struct SReplaceGroupByAliasCxt { SNodeList* pProjectionList; } SReplaceGroupByAliasCxt; -static EDealRes replaceGroupByAliasImpl(SNode** pNode, void* pContext) { +static EDealRes translateGroupPartitionByImpl(SNode** pNode, void* pContext) { SReplaceGroupByAliasCxt* pCxt = pContext; SNodeList* pProjectionList = pCxt->pProjectionList; SNode* pProject = NULL; + int32_t code = TSDB_CODE_SUCCESS; + STranslateContext* pTransCxt = pCxt->pTranslateCxt; if (QUERY_NODE_VALUE == nodeType(*pNode)) { - STranslateContext* pTransCxt = pCxt->pTranslateCxt; SValueNode* pVal = (SValueNode*) *pNode; if (DEAL_RES_ERROR == translateValue(pTransCxt, pVal)) { return DEAL_RES_CONTINUE; @@ -5488,43 +5490,59 @@ static EDealRes replaceGroupByAliasImpl(SNode** pNode, void* pContext) { int32_t pos = getPositionValue(pVal); if (0 < pos && pos <= LIST_LENGTH(pProjectionList)) { SNode* pNew = NULL; - int32_t code = nodesCloneNode(nodesListGetNode(pProjectionList, pos - 1), (SNode**)&pNew); + code = nodesCloneNode(nodesListGetNode(pProjectionList, pos - 1), (SNode**)&pNew); if (TSDB_CODE_SUCCESS != code) { pCxt->pTranslateCxt->errCode = code; return DEAL_RES_ERROR; } nodesDestroyNode(*pNode); *pNode = pNew; - return DEAL_RES_CONTINUE; - } else { - return DEAL_RES_CONTINUE; } + code = translateExpr(pTransCxt, pNode); + if (TSDB_CODE_SUCCESS != code) { + pTransCxt->errCode = code; + return DEAL_RES_ERROR; + } + return DEAL_RES_CONTINUE; } else if (QUERY_NODE_COLUMN == nodeType(*pNode)) { - STranslateContext* pTransCxt = pCxt->pTranslateCxt; - return translateColumn(pTransCxt, (SColumnNode**)pNode); + bool asAlias = false; + EDealRes res = translateColumnInGroupByClause(pTransCxt, (SColumnNode**)pNode, &asAlias); + if (DEAL_RES_ERROR == res) { + return DEAL_RES_ERROR; + } + pTransCxt->errCode = TSDB_CODE_SUCCESS; + if (nodeType(*pNode) == QUERY_NODE_COLUMN && !asAlias) { + return DEAL_RES_CONTINUE; + } + code = translateExpr(pTransCxt, pNode); + if (TSDB_CODE_SUCCESS != code) { + pTransCxt->errCode = code; + return DEAL_RES_ERROR; + } + return DEAL_RES_CONTINUE; } - - return DEAL_RES_CONTINUE; + return doTranslateExpr(pNode, pTransCxt); } -static int32_t replaceGroupByAlias(STranslateContext* pCxt, SSelectStmt* pSelect) { +static int32_t translateGroupByList(STranslateContext* pCxt, SSelectStmt* pSelect) { if (NULL == pSelect->pGroupByList) { return TSDB_CODE_SUCCESS; } SReplaceGroupByAliasCxt cxt = { .pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList}; - nodesRewriteExprsPostOrder(pSelect->pGroupByList, replaceGroupByAliasImpl, &cxt); + nodesRewriteExprsPostOrder(pSelect->pGroupByList, translateGroupPartitionByImpl, &cxt); return pCxt->errCode; } -static int32_t replacePartitionByAlias(STranslateContext* pCxt, SSelectStmt* pSelect) { +static int32_t translatePartitionByList(STranslateContext* pCxt, SSelectStmt* pSelect) { if (NULL == pSelect->pPartitionByList) { return TSDB_CODE_SUCCESS; } + SReplaceGroupByAliasCxt cxt = { .pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList}; - nodesRewriteExprsPostOrder(pSelect->pPartitionByList, replaceGroupByAliasImpl, &cxt); + nodesRewriteExprsPostOrder(pSelect->pPartitionByList, translateGroupPartitionByImpl, &cxt); return pCxt->errCode; } @@ -5588,11 +5606,8 @@ static int32_t translateGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) { NODES_DESTORY_LIST(pSelect->pGroupByList); return TSDB_CODE_SUCCESS; } - code = replaceGroupByAlias(pCxt, pSelect); - } - if (TSDB_CODE_SUCCESS == code) { pSelect->timeLineResMode = TIME_LINE_NONE; - code = translateExprList(pCxt, pSelect->pGroupByList); + code = translateGroupByList(pCxt, pSelect); } return code; } @@ -5613,7 +5628,7 @@ static int32_t getQueryTimeRange(STranslateContext* pCxt, SNode* pWhere, STimeWi return TSDB_CODE_SUCCESS; } - SNode* pCond = NULL; + SNode* pCond = NULL; int32_t code = nodesCloneNode(pWhere, &pCond); if (NULL == pCond) { return code; @@ -5787,14 +5802,14 @@ static int32_t checkIntervalWindow(STranslateContext* pCxt, SIntervalWindowNode* return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_OFFSET_TOO_BIG); } if (!fixed) { - double offsetMonth = 0, intervalMonth = 0; + double offsetMonth = 0, intervalMonth = 0; int32_t code = getMonthsFromTimeVal(pOffset->datum.i, precision, pOffset->unit, &offsetMonth); if (TSDB_CODE_SUCCESS != code) { - return code; + return code; } code = getMonthsFromTimeVal(pInter->datum.i, precision, pInter->unit, &intervalMonth); if (TSDB_CODE_SUCCESS != code) { - return code; + return code; } if (offsetMonth > intervalMonth) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_OFFSET_TOO_BIG); @@ -5819,14 +5834,14 @@ static int32_t checkIntervalWindow(STranslateContext* pCxt, SIntervalWindowNode* return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_SLIDING_TOO_SMALL); } if (valInter) { - double slidingMonth = 0, intervalMonth = 0; + double slidingMonth = 0, intervalMonth = 0; int32_t code = getMonthsFromTimeVal(pSliding->datum.i, precision, pSliding->unit, &slidingMonth); if (TSDB_CODE_SUCCESS != code) { - return code; + return code; } code = getMonthsFromTimeVal(pInter->datum.i, precision, pInter->unit, &intervalMonth); if (TSDB_CODE_SUCCESS != code) { - return code; + return code; } if (slidingMonth > intervalMonth) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_SLIDING_TOO_BIG); @@ -6055,7 +6070,7 @@ static int32_t translateWindow(STranslateContext* pCxt, SSelectStmt* pSelect) { int32_t code = 0; if (QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow)) { if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && - !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { + !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { bool isTimelineAlignedQuery = false; code = isTimeLineAlignedQuery(pCxt->pCurrStmt, &isTimelineAlignedQuery); if (TSDB_CODE_SUCCESS != code) return code; @@ -6094,7 +6109,7 @@ static int32_t translateWindow(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t createDefaultFillNode(STranslateContext* pCxt, SNode** pOutput) { SFillNode* pFill = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FILL, (SNode**)&pFill); + int32_t code = nodesMakeNode(QUERY_NODE_FILL, (SNode**)&pFill); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -6117,7 +6132,7 @@ static int32_t createDefaultFillNode(STranslateContext* pCxt, SNode** pOutput) { static int32_t createDefaultEveryNode(STranslateContext* pCxt, SNode** pOutput) { SValueNode* pEvery = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pEvery); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pEvery); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -6212,12 +6227,24 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) { } } - if (NULL == pSelect->pRange || NULL == pSelect->pEvery || NULL == pSelect->pFill) { - if (pSelect->pRange != NULL && QUERY_NODE_OPERATOR == nodeType(pSelect->pRange) && pSelect->pEvery == NULL) { - // single point interp every can be omitted - } else { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE, - "Missing RANGE clause, EVERY clause or FILL clause"); + if (pCxt->createStream) { + if (NULL != pSelect->pRange) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "Stream Unsupported RANGE clause"); + } + + if (NULL == pSelect->pEvery || NULL == pSelect->pFill) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "Missing EVERY clause or FILL clause"); + } + } else { + if (NULL == pSelect->pRange || NULL == pSelect->pEvery || NULL == pSelect->pFill) { + if (pSelect->pRange != NULL && QUERY_NODE_OPERATOR == nodeType(pSelect->pRange) && pSelect->pEvery == NULL) { + // single point interp every can be omitted + } else { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE, + "Missing RANGE clause, EVERY clause or FILL clause"); + } } } @@ -6287,10 +6314,7 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelec (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { pSelect->timeLineResMode = TIME_LINE_MULTI; } - code = replacePartitionByAlias(pCxt, pSelect); - if (TSDB_CODE_SUCCESS == code) { - code = translateExprList(pCxt, pSelect->pPartitionByList); - } + code = translatePartitionByList(pCxt, pSelect); } if (TSDB_CODE_SUCCESS == code) { code = translateExprList(pCxt, pSelect->pTags); @@ -6308,7 +6332,7 @@ typedef struct SEqCondTbNameTableInfo { //[tableAlias.]tbname = tbNamVal static int32_t isOperatorEqTbnameCond(STranslateContext* pCxt, SOperatorNode* pOperator, char** ppTableAlias, - SArray** ppTabNames, bool* pRet) { + SArray** ppTabNames, bool* pRet) { if (pOperator->opType != OP_TYPE_EQUAL) { *pRet = false; return TSDB_CODE_SUCCESS; @@ -6359,7 +6383,7 @@ static int32_t isOperatorEqTbnameCond(STranslateContext* pCxt, SOperatorNode* pO //[tableAlias.]tbname in (value1, value2, ...) static int32_t isOperatorTbnameInCond(STranslateContext* pCxt, SOperatorNode* pOperator, char** ppTableAlias, - SArray** ppTbNames, bool* pRet) { + SArray** ppTbNames, bool* pRet) { if (pOperator->opType != OP_TYPE_IN) return false; if (nodeType(pOperator->pLeft) != QUERY_NODE_FUNCTION || ((SFunctionNode*)(pOperator->pLeft))->funcType != FUNCTION_TYPE_TBNAME || @@ -6383,8 +6407,8 @@ static int32_t isOperatorTbnameInCond(STranslateContext* pCxt, SOperatorNode* pO SNodeListNode* pValueListNode = (SNodeListNode*)pOperator->pRight; *ppTbNames = taosArrayInit(LIST_LENGTH(pValueListNode->pNodeList), sizeof(void*)); if (!*ppTbNames) return terrno; - SNodeList* pValueNodeList = pValueListNode->pNodeList; - SNode* pValNode = NULL; + SNodeList* pValueNodeList = pValueListNode->pNodeList; + SNode* pValNode = NULL; FOREACH(pValNode, pValueNodeList) { if (nodeType(pValNode) != QUERY_NODE_VALUE) { *pRet = false; @@ -6400,7 +6424,8 @@ static int32_t isOperatorTbnameInCond(STranslateContext* pCxt, SOperatorNode* pO return TSDB_CODE_SUCCESS; } -static int32_t findEqCondTbNameInOperatorNode(STranslateContext* pCxt, SNode* pWhere, SEqCondTbNameTableInfo* pInfo, bool* pRet) { +static int32_t findEqCondTbNameInOperatorNode(STranslateContext* pCxt, SNode* pWhere, SEqCondTbNameTableInfo* pInfo, + bool* pRet) { int32_t code = TSDB_CODE_SUCCESS; char* pTableAlias = NULL; bool eqTbnameCond = false, tbnameInCond = false; @@ -6467,7 +6492,7 @@ static int32_t findEqualCondTbnameInLogicCondAnd(STranslateContext* pCxt, SNode* static int32_t unionEqualCondTbnamesOfSameTable(SArray* aTableTbnames, SEqCondTbNameTableInfo* pInfo) { int32_t code = TSDB_CODE_SUCCESS; - bool bFoundTable = false; + bool bFoundTable = false; for (int i = 0; i < taosArrayGetSize(aTableTbnames); ++i) { SEqCondTbNameTableInfo* info = taosArrayGet(aTableTbnames, i); if (info->pRealTable == pInfo->pRealTable) { @@ -6547,7 +6572,7 @@ static int32_t findEqualCondTbname(STranslateContext* pCxt, SNode* pWhere, SArra } static void findVgroupsFromEqualTbname(STranslateContext* pCxt, SArray* aTbnames, const char* dbName, - int32_t numOfVgroups, SVgroupsInfo* vgsInfo) { + int32_t numOfVgroups, SVgroupsInfo* vgsInfo) { int32_t nVgroups = 0; int32_t nTbls = taosArrayGetSize(aTbnames); @@ -6584,10 +6609,10 @@ static void findVgroupsFromEqualTbname(STranslateContext* pCxt, SArray* aTbnames } static int32_t replaceToChildTableQuery(STranslateContext* pCxt, SEqCondTbNameTableInfo* pInfo) { - SName snameTb = {0}; - int32_t code = 0; + SName snameTb = {0}; + int32_t code = 0; SRealTableNode* pRealTable = pInfo->pRealTable; - char* tbName = taosArrayGetP(pInfo->aTbnames, 0); + char* tbName = taosArrayGetP(pInfo->aTbnames, 0); toName(pCxt->pParseCxt->acctId, pRealTable->table.dbName, tbName, &snameTb); STableMeta* pMeta = NULL; @@ -6604,14 +6629,14 @@ static int32_t replaceToChildTableQuery(STranslateContext* pCxt, SEqCondTbNameTa pRealTable->stbRewrite = true; if (pRealTable->pTsmas) { - // if select from a child table, fetch it's corresponding tsma target child table infos + // if select from a child table, fetch it's corresponding tsma target child table infos char buf[TSDB_TABLE_FNAME_LEN + TSDB_TABLE_NAME_LEN + 1]; for (int32_t i = 0; i < pRealTable->pTsmas->size; ++i) { STableTSMAInfo* pTsma = taosArrayGetP(pRealTable->pTsmas, i); SName tsmaTargetTbName = {0}; toName(pCxt->pParseCxt->acctId, pRealTable->table.dbName, "", &tsmaTargetTbName); int32_t len = tsnprintf(buf, TSDB_TABLE_FNAME_LEN + TSDB_TABLE_NAME_LEN, "%s.%s_%s", pTsma->dbFName, pTsma->name, - pRealTable->table.tableName); + pRealTable->table.tableName); len = taosCreateMD5Hash(buf, len); strncpy(tsmaTargetTbName.tname, buf, MD5_OUTPUT_LEN); STsmaTargetTbInfo ctbInfo = {0}; @@ -6639,16 +6664,16 @@ static int32_t replaceToChildTableQuery(STranslateContext* pCxt, SEqCondTbNameTa } static int32_t setEqualTbnameTableVgroups(STranslateContext* pCxt, SSelectStmt* pSelect, SArray* aTables) { - int32_t code = TSDB_CODE_SUCCESS; - int32_t aTableNum = taosArrayGetSize(aTables); - int32_t nTbls = 0; - bool stableQuery = false; + int32_t code = TSDB_CODE_SUCCESS; + int32_t aTableNum = taosArrayGetSize(aTables); + int32_t nTbls = 0; + bool stableQuery = false; SEqCondTbNameTableInfo* pInfo = NULL; qDebug("start to update stable vg for tbname optimize, aTableNum:%d", aTableNum); for (int i = 0; i < aTableNum; ++i) { pInfo = taosArrayGet(aTables, i); - int32_t numOfVgs = pInfo->pRealTable->pVgroupList->numOfVgroups; + int32_t numOfVgs = pInfo->pRealTable->pVgroupList->numOfVgroups; nTbls = taosArrayGetSize(pInfo->aTbnames); SVgroupsInfo* vgsInfo = taosMemoryMalloc(sizeof(SVgroupsInfo) + nTbls * sizeof(SVgroupInfo)); @@ -6714,7 +6739,8 @@ static int32_t setEqualTbnameTableVgroups(STranslateContext* pCxt, SSelectStmt* } } - qDebug("before ctbname optimize, code:%d, aTableNum:%d, nTbls:%d, stableQuery:%d", code, aTableNum, nTbls, stableQuery); + qDebug("before ctbname optimize, code:%d, aTableNum:%d, nTbls:%d, stableQuery:%d", code, aTableNum, nTbls, + stableQuery); if (TSDB_CODE_SUCCESS == code && 1 == aTableNum && 1 == nTbls && stableQuery && NULL == pInfo->pRealTable->pTsmas) { code = replaceToChildTableQuery(pCxt, pInfo); @@ -6770,13 +6796,13 @@ static int32_t checkLimit(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t createPrimaryKeyColByTable(STranslateContext* pCxt, STableNode* pTable, SNode** pPrimaryKey) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (TSDB_CODE_SUCCESS != code) { return code; } pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; strcpy(pCol->colName, ROWTS_PSEUDO_COLUMN_NAME); - bool found = false; + bool found = false; code = findAndSetColumn(pCxt, &pCol, pTable, &found, true); if (TSDB_CODE_SUCCESS != code || !found) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_VALID_PRIM_TS_REQUIRED); @@ -6808,8 +6834,8 @@ static EDealRes collectTableAlias(SNode* pNode, void* pContext) { *(SSHashObj**)pContext = pHash; } - if (TSDB_CODE_SUCCESS != tSimpleHashPut(*(SSHashObj**)pContext, pCol->tableAlias, strlen(pCol->tableAlias), pCol->tableAlias, - sizeof(pCol->tableAlias))) { + if (TSDB_CODE_SUCCESS != tSimpleHashPut(*(SSHashObj**)pContext, pCol->tableAlias, strlen(pCol->tableAlias), + pCol->tableAlias, sizeof(pCol->tableAlias))) { return DEAL_RES_ERROR; } @@ -6869,13 +6895,13 @@ static int32_t appendTsForImplicitTsFunc(STranslateContext* pCxt, SSelectStmt* p static int32_t createPkColByTable(STranslateContext* pCxt, SRealTableNode* pTable, SNode** pPk) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (TSDB_CODE_SUCCESS != code) { return code; } pCol->colId = pTable->pMeta->schema[1].colId; strcpy(pCol->colName, pTable->pMeta->schema[1].name); - bool found = false; + bool found = false; code = findAndSetColumn(pCxt, &pCol, (STableNode*)pTable, &found, true); if (TSDB_CODE_SUCCESS != code || !found) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTERNAL_ERROR); @@ -6943,7 +6969,7 @@ static EDealRes replaceOrderByAliasImpl(SNode** pNode, void* pContext) { (QUERY_NODE_COLUMN == nodeType(pProject) && !nodesEqualNode(*pNode, pProject)))) { continue; } - SNode* pNew = NULL; + SNode* pNew = NULL; int32_t code = nodesCloneNode(pProject, &pNew); if (NULL == pNew) { pCxt->pTranslateCxt->errCode = code; @@ -6966,7 +6992,7 @@ static EDealRes replaceOrderByAliasImpl(SNode** pNode, void* pContext) { } int32_t pos = getPositionValue(pVal); if (0 < pos && pos <= LIST_LENGTH(pProjectionList)) { - SNode* pNew = NULL; + SNode* pNew = NULL; int32_t code = nodesCloneNode(nodesListGetNode(pProjectionList, pos - 1), &pNew); if (NULL == pNew) { pCxt->pTranslateCxt->errCode = code; @@ -7075,8 +7101,7 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect } if (TSDB_CODE_SUCCESS == code) { code = resetSelectFuncNumWithoutDup(pSelect); - if (TSDB_CODE_SUCCESS == code) - code = checkAggColCoexist(pCxt, pSelect); + if (TSDB_CODE_SUCCESS == code) code = checkAggColCoexist(pCxt, pSelect); } /* if (TSDB_CODE_SUCCESS == code) { @@ -7137,7 +7162,7 @@ static int32_t translateSelect(STranslateContext* pCxt, SSelectStmt* pSelect) { static SNode* createSetOperProject(const char* pTableAlias, SNode* pNode) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (TSDB_CODE_SUCCESS != code) { return NULL; } @@ -7350,7 +7375,7 @@ static int32_t translateInsertQuery(STranslateContext* pCxt, SInsertStmt* pInser static int32_t addOrderByPrimaryKeyToQueryImpl(STranslateContext* pCxt, SNode* pPrimaryKeyExpr, SNodeList** pOrderByList) { SOrderByExprNode* pOrderByExpr = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_ORDER_BY_EXPR, (SNode**)&pOrderByExpr); + int32_t code = nodesMakeNode(QUERY_NODE_ORDER_BY_EXPR, (SNode**)&pOrderByExpr); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -7493,7 +7518,7 @@ static int32_t buildCreateDbRetentions(const SNodeList* pRetentions, SCreateDbRe } static int32_t buildCreateDbReq(STranslateContext* pCxt, SCreateDatabaseStmt* pStmt, SCreateDbReq* pReq) { - SName name = {0}; + SName name = {0}; int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); if (TSDB_CODE_SUCCESS != code) return code; (void)tNameGetFullDbName(&name, pReq->db); @@ -7532,6 +7557,8 @@ static int32_t buildCreateDbReq(STranslateContext* pCxt, SCreateDatabaseStmt* pS pReq->ignoreExist = pStmt->ignoreExists; pReq->withArbitrator = pStmt->pOptions->withArbitrator; pReq->encryptAlgorithm = pStmt->pOptions->encryptAlgorithm; + tstrncpy(pReq->dnodeListStr, pStmt->pOptions->dnodeListStr, TSDB_DNODE_LIST_LEN); + return buildCreateDbRetentions(pStmt->pOptions->pRetentions, pReq); } @@ -8005,8 +8032,8 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName code = checkOptionsDependency(pCxt, pDbName, pOptions); } if (TSDB_CODE_SUCCESS == code) { - code = - checkDbRangeOption(pCxt, "s3_chunksize", pOptions->s3ChunkSize, TSDB_MIN_S3_CHUNK_SIZE, TSDB_MAX_S3_CHUNK_SIZE); + code = checkDbRangeOption(pCxt, "s3_chunkpages", pOptions->s3ChunkSize, TSDB_MIN_S3_CHUNK_SIZE, + TSDB_MAX_S3_CHUNK_SIZE); } if (TSDB_CODE_SUCCESS == code) { code = checkDbRangeOption(pCxt, "s3_compact", pOptions->s3Compact, TSDB_MIN_S3_COMPACT, TSDB_MAX_S3_COMPACT); @@ -8026,7 +8053,7 @@ static int32_t checkCreateDatabase(STranslateContext* pCxt, SCreateDatabaseStmt* CMD_TYPE* pCmdReq = genericCmd; \ char* cmdSql = taosMemoryMalloc(sqlLen); \ if (cmdSql == NULL) { \ - return terrno; \ + return terrno; \ } \ memcpy(cmdSql, sql, sqlLen); \ pCmdReq->sqlLen = sqlLen; \ @@ -8210,7 +8237,7 @@ static int32_t translateCreateDatabase(STranslateContext* pCxt, SCreateDatabaseS static int32_t translateDropDatabase(STranslateContext* pCxt, SDropDatabaseStmt* pStmt) { SDropDbReq dropReq = {0}; SName name = {0}; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); if (TSDB_CODE_SUCCESS != code) return code; (void)tNameGetFullDbName(&name, dropReq.db); dropReq.ignoreNotExists = pStmt->ignoreNotExists; @@ -8292,7 +8319,7 @@ static int32_t translateAlterDatabase(STranslateContext* pCxt, SAlterDatabaseStm static int32_t translateTrimDatabase(STranslateContext* pCxt, STrimDatabaseStmt* pStmt) { STrimDbReq req = {.maxSpeed = pStmt->maxSpeed}; SName name = {0}; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); if (TSDB_CODE_SUCCESS != code) return code; (void)tNameGetFullDbName(&name, req.db); return buildCmdMsg(pCxt, TDMT_MND_TRIM_DB, (FSerializeFunc)tSerializeSTrimDbReq, &req); @@ -8329,7 +8356,7 @@ static int32_t columnDefNodeToField(SNodeList* pList, SArray** pArray, bool calB if (!pArray) return terrno; int32_t code = TSDB_CODE_SUCCESS; - SNode* pNode; + SNode* pNode; FOREACH(pNode, pList) { SColumnDefNode* pCol = (SColumnDefNode*)pNode; SFieldWithOptions field = {.type = pCol->dataType.type, .bytes = calcTypeBytes(pCol->dataType)}; @@ -8801,7 +8828,7 @@ typedef struct SSampleAstInfo { static int32_t buildTableForSampleAst(SSampleAstInfo* pInfo, SNode** pOutput) { SRealTableNode* pTable = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_REAL_TABLE, (SNode**)&pTable); + int32_t code = nodesMakeNode(QUERY_NODE_REAL_TABLE, (SNode**)&pTable); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -8815,7 +8842,7 @@ static int32_t buildTableForSampleAst(SSampleAstInfo* pInfo, SNode** pOutput) { static int32_t addWstartToSampleProjects(SNodeList* pProjectionList) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -8826,7 +8853,7 @@ static int32_t addWstartToSampleProjects(SNodeList* pProjectionList) { static int32_t addWendToSampleProjects(SNodeList* pProjectionList) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -8837,7 +8864,7 @@ static int32_t addWendToSampleProjects(SNodeList* pProjectionList) { static int32_t addWdurationToSampleProjects(SNodeList* pProjectionList) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -8874,7 +8901,7 @@ static int32_t buildProjectsForSampleAst(SSampleAstInfo* pInfo, SNodeList** pLis static int32_t buildIntervalForSampleAst(SSampleAstInfo* pInfo, SNode** pOutput) { SIntervalWindowNode* pInterval = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_INTERVAL_WINDOW, (SNode**)&pInterval); + int32_t code = nodesMakeNode(QUERY_NODE_INTERVAL_WINDOW, (SNode**)&pInterval); if (NULL == pInterval) { return code; } @@ -8896,7 +8923,7 @@ static int32_t buildIntervalForSampleAst(SSampleAstInfo* pInfo, SNode** pOutput) static int32_t buildSampleAst(STranslateContext* pCxt, SSampleAstInfo* pInfo, char** pAst, int32_t* pLen, char** pExpr, int32_t* pExprLen, int32_t* pProjectionTotalLen) { SSelectStmt* pSelect = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_SELECT_STMT, (SNode**)&pSelect); + int32_t code = nodesMakeNode(QUERY_NODE_SELECT_STMT, (SNode**)&pSelect); if (NULL == pSelect) { return code; } @@ -8937,7 +8964,7 @@ static void clearSampleAstInfo(SSampleAstInfo* pInfo) { static int32_t makeIntervalVal(SRetention* pRetension, int8_t precision, SNode** ppNode) { SValueNode* pVal = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); if (NULL == pVal) { return code; } @@ -8964,7 +8991,7 @@ static int32_t makeIntervalVal(SRetention* pRetension, int8_t precision, SNode** static int32_t createColumnFromDef(SColumnDefNode* pDef, SNode** ppCol) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (NULL == pCol) { return code; } @@ -8975,7 +9002,7 @@ static int32_t createColumnFromDef(SColumnDefNode* pDef, SNode** ppCol) { static int32_t createRollupFunc(SNode* pSrcFunc, SColumnDefNode* pColDef, SNode** ppRollupFunc) { SFunctionNode* pFunc = NULL; - int32_t code = nodesCloneNode(pSrcFunc, (SNode**)&pFunc); + int32_t code = nodesCloneNode(pSrcFunc, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -8995,7 +9022,7 @@ static int32_t createRollupFunc(SNode* pSrcFunc, SColumnDefNode* pColDef, SNode* static int32_t createRollupFuncs(SCreateTableStmt* pStmt, SNodeList** ppList) { SNodeList* pFuncs = NULL; - int32_t code = nodesMakeList(&pFuncs); + int32_t code = nodesMakeList(&pFuncs); if (NULL == pFuncs) { return code; } @@ -9024,7 +9051,8 @@ static int32_t createRollupFuncs(SCreateTableStmt* pStmt, SNodeList** ppList) { } *ppList = pFuncs; - return code;; + return code; + ; } static int32_t createRollupTableMeta(SCreateTableStmt* pStmt, int8_t precision, STableMeta** ppTbMeta) { @@ -9056,7 +9084,7 @@ static int32_t createRollupTableMeta(SCreateTableStmt* pStmt, int8_t precision, static int32_t createTbnameFunction(SFunctionNode** ppFunc) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -9153,8 +9181,7 @@ static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStm // columnDefNodeToField(pStmt->pCols, &pReq->pColumns, true); // columnDefNodeToField(pStmt->pTags, &pReq->pTags, true); code = columnDefNodeToField(pStmt->pCols, &pReq->pColumns, true); - if (TSDB_CODE_SUCCESS == code) - code = tagDefNodeToField(pStmt->pTags, &pReq->pTags, true); + if (TSDB_CODE_SUCCESS == code) code = tagDefNodeToField(pStmt->pTags, &pReq->pTags, true); if (TSDB_CODE_SUCCESS == code) { pReq->numOfColumns = LIST_LENGTH(pStmt->pCols); pReq->numOfTags = LIST_LENGTH(pStmt->pTags); @@ -9174,8 +9201,7 @@ static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStm toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &tableName); code = tNameExtractFullName(&tableName, pReq->name); } - if (TSDB_CODE_SUCCESS == code) - code = collectUseTable(&tableName, pCxt->pTables); + if (TSDB_CODE_SUCCESS == code) code = collectUseTable(&tableName, pCxt->pTables); if (TSDB_CODE_SUCCESS == code) { code = collectUseTable(&tableName, pCxt->pTargetTables); } @@ -9527,12 +9553,13 @@ static int32_t translateAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt static int32_t translateUseDatabase(STranslateContext* pCxt, SUseDatabaseStmt* pStmt) { SUseDbReq usedbReq = {0}; SName name = {0}; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); if (TSDB_CODE_SUCCESS == code) { code = tNameExtractFullName(&name, usedbReq.db); } if (TSDB_CODE_SUCCESS == code) - code = getDBVgVersion(pCxt, usedbReq.db, &usedbReq.vgVersion, &usedbReq.dbId, &usedbReq.numOfTable, &usedbReq.stateTs); + code = + getDBVgVersion(pCxt, usedbReq.db, &usedbReq.vgVersion, &usedbReq.dbId, &usedbReq.numOfTable, &usedbReq.stateTs); if (TSDB_CODE_SUCCESS == code) { code = buildCmdMsg(pCxt, TDMT_MND_USE_DB, (FSerializeFunc)tSerializeSUseDbReq, &usedbReq); } @@ -9801,9 +9828,9 @@ static int32_t buildCreateSmaReq(STranslateContext* pCxt, SCreateIndexStmt* pStm pReq->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit; pReq->offset = (NULL != pStmt->pOptions->pOffset ? ((SValueNode*)pStmt->pOptions->pOffset)->datum.i : 0); pReq->sliding = - (NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pReq->interval); + (NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pReq->interval); pReq->slidingUnit = - (NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pReq->intervalUnit); + (NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pReq->intervalUnit); } if (TSDB_CODE_SUCCESS == code && NULL != pStmt->pOptions->pStreamOptions) { @@ -10112,8 +10139,7 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS } else if ('\0' != pStmt->subDbName[0]) { pReq->subType = TOPIC_SUB_TYPE__DB; code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->subDbName, strlen(pStmt->subDbName)); - if (TSDB_CODE_SUCCESS == code) - (void)tNameGetFullDbName(&name, pReq->subDbName); + if (TSDB_CODE_SUCCESS == code) (void)tNameGetFullDbName(&name, pReq->subDbName); } else { pReq->subType = TOPIC_SUB_TYPE__COLUMN; char* dbName = ((SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable))->table.dbName; @@ -10202,7 +10228,7 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* // for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { SSchema* column = &pMeta->schema[0]; SColumnNode* col = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&col); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&col); if (NULL == col) { return code; } @@ -10239,7 +10265,7 @@ static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Only supertable table can be used"); } - SNodeList* pProjection = NULL; + SNodeList* pProjection = NULL; SRealTableNode* realTable = NULL; code = checkCollectTopicTags(pCxt, pStmt, pMeta, &pProjection); if (TSDB_CODE_SUCCESS == code) { @@ -10495,15 +10521,14 @@ static void getSourceDatabase(SNode* pStmt, int32_t acctId, char* pDbFName) { (void)tNameGetFullDbName(&name, pDbFName); } -static void getStreamQueryFirstProjectAliasName(SHashObj* pUserAliasSet, char* aliasName, int32_t len) { - if (NULL == taosHashGet(pUserAliasSet, "_wstart", strlen("_wstart"))) { - snprintf(aliasName, len, "%s", "_wstart"); - return; - } - if (NULL == taosHashGet(pUserAliasSet, "ts", strlen("ts"))) { - snprintf(aliasName, len, "%s", "ts"); - return; +static void getStreamQueryFirstProjectAliasName(SHashObj* pUserAliasSet, char* aliasName, int32_t len, char* defaultName[]) { + for (int32_t i = 0; defaultName[i] != NULL; i++) { + if (NULL == taosHashGet(pUserAliasSet, defaultName[i], strlen(defaultName[i]))) { + snprintf(aliasName, len, "%s", defaultName[i]); + return; + } } + do { taosRandStr(aliasName, len - 1); aliasName[len - 1] = '\0'; @@ -10518,7 +10543,47 @@ static int32_t setColumnDefNodePrimaryKey(SColumnDefNode* pNode, bool isPk) { if (!pNode->pOptions) { code = nodesMakeNode(QUERY_NODE_COLUMN_OPTIONS, &pNode->pOptions); } - if (TSDB_CODE_SUCCESS ==code) ((SColumnOptions*)pNode->pOptions)->bPrimaryKey = isPk; + if (TSDB_CODE_SUCCESS == code) ((SColumnOptions*)pNode->pOptions)->bPrimaryKey = isPk; + return code; +} + +static int32_t addIrowTsToCreateStreamQueryImpl(STranslateContext* pCxt, SSelectStmt* pSelect, + SHashObj* pUserAliasSet, SNodeList* pCols, SCMCreateStreamReq* pReq) { + SNode* pProj = nodesListGetNode(pSelect->pProjectionList, 0); + if (!pSelect->hasInterpFunc || + (QUERY_NODE_FUNCTION == nodeType(pProj) && 0 == strcmp("_irowts", ((SFunctionNode*)pProj)->functionName))) { + return TSDB_CODE_SUCCESS; + } + SFunctionNode* pFunc = NULL; + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + if (NULL == pFunc) { + return code; + } + tstrncpy(pFunc->functionName, "_irowts", tListLen(pFunc->functionName)); + tstrncpy(pFunc->node.userAlias, "_irowts", tListLen(pFunc->node.userAlias)); + char* defaultName[] = {"_irowts", NULL}; + getStreamQueryFirstProjectAliasName(pUserAliasSet, pFunc->node.aliasName, sizeof(pFunc->node.aliasName), defaultName); + code = getFuncInfo(pCxt, pFunc); + if (TSDB_CODE_SUCCESS == code) { + code = nodesListPushFront(pSelect->pProjectionList, (SNode*)pFunc); + } + if (TSDB_CODE_SUCCESS != code) { + nodesDestroyNode((SNode*)pFunc); + } + + if (TSDB_CODE_SUCCESS == code && STREAM_CREATE_STABLE_TRUE == pReq->createStb) { + SColumnDefNode* pColDef = NULL; + code = nodesMakeNode(QUERY_NODE_COLUMN_DEF, (SNode**)&pColDef); + if (TSDB_CODE_SUCCESS == code) { + strcpy(pColDef->colName, pFunc->node.aliasName); + pColDef->dataType = pFunc->node.resType; + pColDef->sma = true; + code = setColumnDefNodePrimaryKey(pColDef, false); + } + if (TSDB_CODE_SUCCESS == code) code = nodesListPushFront(pCols, (SNode*)pColDef); + if (TSDB_CODE_SUCCESS != code) nodesDestroyNode((SNode*)pColDef); + } + return code; } @@ -10535,7 +10600,9 @@ static int32_t addWstartTsToCreateStreamQueryImpl(STranslateContext* pCxt, SSele return code; } strcpy(pFunc->functionName, "_wstart"); - getStreamQueryFirstProjectAliasName(pUserAliasSet, pFunc->node.aliasName, sizeof(pFunc->node.aliasName)); + strcpy(pFunc->node.userAlias, "_irowts"); + char* defaultName[] = {"_wstart", "ts", NULL}; + getStreamQueryFirstProjectAliasName(pUserAliasSet, pFunc->node.aliasName, sizeof(pFunc->node.aliasName), defaultName); code = getFuncInfo(pCxt, pFunc); if (TSDB_CODE_SUCCESS == code) { code = nodesListPushFront(pSelect->pProjectionList, (SNode*)pFunc); @@ -10559,7 +10626,7 @@ static int32_t addWstartTsToCreateStreamQueryImpl(STranslateContext* pCxt, SSele return code; } -static int32_t addWstartTsToCreateStreamQuery(STranslateContext* pCxt, SNode* pStmt, SNodeList* pCols, +static int32_t addTsKeyToCreateStreamQuery(STranslateContext* pCxt, SNode* pStmt, SNodeList* pCols, SCMCreateStreamReq* pReq) { SSelectStmt* pSelect = (SSelectStmt*)pStmt; SHashObj* pUserAliasSet = NULL; @@ -10567,6 +10634,9 @@ static int32_t addWstartTsToCreateStreamQuery(STranslateContext* pCxt, SNode* pS if (TSDB_CODE_SUCCESS == code) { code = addWstartTsToCreateStreamQueryImpl(pCxt, pSelect, pUserAliasSet, pCols, pReq); } + if (TSDB_CODE_SUCCESS == code) { + code = addIrowTsToCreateStreamQueryImpl(pCxt, pSelect, pUserAliasSet, pCols, pReq); + } taosHashCleanup(pUserAliasSet); return code; } @@ -10589,7 +10659,7 @@ static int32_t addTagsToCreateStreamQuery(STranslateContext* pCxt, SCreateStream SNode* pPart = NULL; FOREACH(pPart, pSelect->pPartitionByList) { if (0 == strcmp(getTagNameForCreateStreamTag(pTag), ((SExprNode*)pPart)->userAlias)) { - SNode* pNew = NULL; + SNode* pNew = NULL; int32_t code = nodesCloneNode(pPart, &pNew); if (TSDB_CODE_SUCCESS != code) return code; if (TSDB_CODE_SUCCESS != (code = nodesListMakeStrictAppend(&pSelect->pTags, pNew))) { @@ -10608,7 +10678,7 @@ static int32_t addTagsToCreateStreamQuery(STranslateContext* pCxt, SCreateStream static int32_t createNullValue(SNode** ppNode) { SValueNode* pValue = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); if (NULL == pValue) { return code; } @@ -10625,8 +10695,7 @@ static int32_t addNullTagsForExistTable(STranslateContext* pCxt, STableMeta* pMe for (int32_t i = 0; TSDB_CODE_SUCCESS == code && i < numOfTags; ++i) { SNode* pNull = NULL; code = createNullValue(&pNull); - if (TSDB_CODE_SUCCESS == code) - code = nodesListMakeStrictAppend(&pSelect->pTags, pNull); + if (TSDB_CODE_SUCCESS == code) code = nodesListMakeStrictAppend(&pSelect->pTags, pNull); } return code; } @@ -10643,7 +10712,7 @@ static EDealRes rewriteSubtable(SNode** pNode, void* pContext) { SNode* pPart = NULL; FOREACH(pPart, pCxt->pPartitionList) { if (0 == strcmp(((SColumnNode*)*pNode)->colName, ((SExprNode*)pPart)->userAlias)) { - SNode* pNew = NULL; + SNode* pNew = NULL; int32_t code = nodesCloneNode(pPart, &pNew); if (NULL == pNew) { pCxt->pCxt->errCode = code; @@ -10686,8 +10755,7 @@ static int32_t addNullTagsForCreateTable(STranslateContext* pCxt, SCreateStreamS for (int32_t i = 0; TSDB_CODE_SUCCESS == code && i < LIST_LENGTH(pStmt->pTags); ++i) { SNode* pNull = NULL; code = createNullValue(&pNull); - if (TSDB_CODE_SUCCESS == code) - code = nodesListMakeStrictAppend(&((SSelectStmt*)pStmt->pQuery)->pTags, pNull); + if (TSDB_CODE_SUCCESS == code) code = nodesListMakeStrictAppend(&((SSelectStmt*)pStmt->pQuery)->pTags, pNull); } return code; } @@ -10700,9 +10768,9 @@ static int32_t addNullTagsToCreateStreamQuery(STranslateContext* pCxt, STableMet } static int32_t addColDefNodeByProj(SNodeList** ppCols, const SNode* pProject, int8_t flags) { - const SExprNode* pExpr = (const SExprNode*)pProject; - SColumnDefNode* pColDef = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN_DEF, (SNode**)&pColDef); + const SExprNode* pExpr = (const SExprNode*)pProject; + SColumnDefNode* pColDef = NULL; + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN_DEF, (SNode**)&pColDef); if (TSDB_CODE_SUCCESS != code) return code; strcpy(pColDef->colName, pExpr->userAlias); pColDef->dataType = pExpr->resType; @@ -10825,7 +10893,7 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm "SUBTABLE expression must not has column when no partition by clause"); } - if (NULL == pSelect->pWindow && STREAM_TRIGGER_AT_ONCE != pStmt->pOptions->triggerType) { + if (NULL == pSelect->pWindow && !pSelect->hasInterpFunc && STREAM_TRIGGER_AT_ONCE != pStmt->pOptions->triggerType) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "The trigger mode of non window query can only be AT_ONCE"); } @@ -10861,6 +10929,104 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm } } + if (pSelect->hasInterpFunc) { + // Temporary code + if (pStmt->pOptions->triggerType != STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "Stream interp function only support force window close"); + } + + if (pStmt->pOptions->triggerType == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + if (pStmt->pOptions->fillHistory) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "When trigger was force window close, Stream interp unsupported Fill history"); + } else if (pSelect->pFill != NULL) { + EFillMode mode = ((SFillNode*)(pSelect->pFill))->mode; + if (mode == FILL_MODE_NEXT) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "When trigger was force window close, Stream interp unsupported Fill(Next)"); + } else if (mode == FILL_MODE_LINEAR) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "When trigger was force window close, Stream interp unsupported Fill(Linear)"); + } + } + } + + if (pStmt->pOptions->triggerType == STREAM_TRIGGER_WINDOW_CLOSE) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "Stream interp unsupported window close"); + } + + if (pStmt->pOptions->triggerType == STREAM_TRIGGER_MAX_DELAY) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "Stream interp unsupported max delay"); + } + + if ((SRealTableNode*)pSelect->pFromTable && ((SRealTableNode*)pSelect->pFromTable)->pMeta && + TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType && + !hasTbnameFunction(pSelect->pPartitionByList)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "Interp for stream on super table must patitioned by table name"); + } + } + + if (pSelect->hasTwaOrElapsedFunc) { + if (pStmt->pOptions->triggerType != STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "Stream twa or elapsed function only support force window close"); + } + if (pSelect->pWindow->type != QUERY_NODE_INTERVAL_WINDOW) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "Stream twa or elapsed function only support interval"); + } + + if ((SRealTableNode*)pSelect->pFromTable && ((SRealTableNode*)pSelect->pFromTable)->pMeta && + TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType && + !hasTbnameFunction(pSelect->pPartitionByList)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "twa or elapsed on super table must patitioned by table name"); + } + } + + if (pStmt->pOptions->triggerType == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + if (pStmt->pOptions->fillHistory) { + return generateSyntaxErrMsgExt( + &pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "When trigger was force window close, Stream unsupported Fill history"); + } + + if (pStmt->pOptions->ignoreExpired != 1) { + return generateSyntaxErrMsgExt( + &pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "When trigger was force window close, Stream must not set ignore expired 0"); + } + + if (pStmt->pOptions->ignoreUpdate != 1) { + return generateSyntaxErrMsgExt( + &pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "When trigger was force window close, Stream must not set ignore update 0"); + } + + if (pSelect->pWindow != NULL && QUERY_NODE_INTERVAL_WINDOW == nodeType(pSelect->pWindow)) { + SIntervalWindowNode* pWindow = (SIntervalWindowNode*)pSelect->pWindow; + if (NULL != pWindow->pSliding) { + int64_t interval = ((SValueNode*)pWindow->pInterval)->datum.i; + int64_t sliding = ((SValueNode*)pWindow->pSliding)->datum.i; + if (interval != sliding) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "When trigger was force window close, Stream unsupported sliding"); + } + } + } + + if ((SRealTableNode*)pSelect->pFromTable && ((SRealTableNode*)pSelect->pFromTable)->pMeta && + TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType && + !hasTbnameFunction(pSelect->pPartitionByList)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, + "When trigger was force window close, Super table must patitioned by table name"); + } + } + if (NULL != pSelect->pGroupByList) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported Group by"); } @@ -10872,13 +11038,14 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm if (NULL != pStmt->pOptions->pDelay) { SValueNode* pVal = (SValueNode*)pStmt->pOptions->pDelay; - int64_t minDelay = 0; - char* str = "5s"; - if (DEAL_RES_ERROR != translateValue(pCxt, pVal) && TSDB_CODE_SUCCESS == - parseNatualDuration(str, strlen(str), &minDelay, &pVal->unit, pVal->node.resType.precision, false)) { + int64_t minDelay = 0; + char* str = "5s"; + if (DEAL_RES_ERROR != translateValue(pCxt, pVal) && + TSDB_CODE_SUCCESS == + parseNatualDuration(str, strlen(str), &minDelay, &pVal->unit, pVal->node.resType.precision, false)) { if (pVal->datum.i < minDelay) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, - "stream max delay must be bigger than 5 session"); + "stream max delay must be bigger than 5 seconds"); } } } @@ -10907,7 +11074,7 @@ static int32_t adjustDataTypeOfProjections(STranslateContext* pCxt, const STable REPLACE_NODE(pFunc); } SColumnDefNode* pColDef = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN_DEF, (SNode**)&pColDef); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN_DEF, (SNode**)&pColDef); if (TSDB_CODE_SUCCESS != code) return code; strcpy(pColDef->colName, pSchema->name); pColDef->dataType = dt; @@ -10942,7 +11109,7 @@ static int32_t projColPosCompar(const void* l, const void* r) { static void projColPosDelete(void* p) { nodesDestroyNode(((SProjColPos*)p)->pProj); } static int32_t addProjToProjColPos(STranslateContext* pCxt, const SSchema* pSchema, SNode* pProj, SArray* pProjColPos) { - SNode* pNewProj = NULL; + SNode* pNewProj = NULL; int32_t code = nodesCloneNode(pProj, &pNewProj); if (NULL == pNewProj) { return code; @@ -11170,8 +11337,7 @@ static int32_t adjustOrderOfTags(STranslateContext* pCxt, SNodeList* pTags, cons } SNode* pNull = NULL; code = createNullValue(&pNull); - if (TSDB_CODE_SUCCESS == code) - code = nodesListStrictAppend(pNewTagExprs, pNull); + if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pNewTagExprs, pNull); } } @@ -11275,7 +11441,7 @@ static int32_t translateStreamTargetTable(STranslateContext* pCxt, SCreateStream static int32_t createLastTsSelectStmt(char* pDb, const char* pTable, const char* pkColName, SNode** pQuery) { SColumnNode* col = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&col); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&col); if (NULL == col) { return code; } @@ -11425,7 +11591,7 @@ static int32_t checkAndAdjStreamDestTableSchema(STranslateContext* pCxt, SCreate .bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes}; } int32_t code = checkTableSchemaImpl(pCxt, pStmt->pTags, pStmt->pCols, NULL); - if (TSDB_CODE_SUCCESS == code && NULL == pSelect->pWindow && + if (TSDB_CODE_SUCCESS == code && NULL == pSelect->pWindow && !pSelect->hasInterpFunc && ((SRealTableNode*)pSelect->pFromTable && hasPkInTable(((SRealTableNode*)pSelect->pFromTable)->pMeta))) { if (1 >= LIST_LENGTH(pStmt->pCols) || 1 >= LIST_LENGTH(pSelect->pProjectionList)) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY); @@ -11466,7 +11632,7 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt code = addColsToCreateStreamQuery(pCxt, pStmt, pReq); } if (TSDB_CODE_SUCCESS == code) { - code = addWstartTsToCreateStreamQuery(pCxt, pStmt->pQuery, pStmt->pCols, pReq); + code = addTsKeyToCreateStreamQuery(pCxt, pStmt->pQuery, pStmt->pCols, pReq); } if (TSDB_CODE_SUCCESS == code) { code = checkStreamQuery(pCxt, pStmt); @@ -11623,7 +11789,7 @@ static int32_t createStreamReqVersionInfo(SSDataBlock* pBlock, SArray** pArray, for (int32_t i = 0; i < pBlock->info.rows; ++i) { SVgroupVer v = {.vgId = *(int32_t*)colDataGetData(pCol1, i), .ver = *(int64_t*)colDataGetData(pCol2, i)}; - if((taosArrayPush(*pArray, &v)) == NULL) { + if ((taosArrayPush(*pArray, &v)) == NULL) { taosArrayDestroy(*pArray); return terrno; } @@ -11678,7 +11844,7 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, SSDa static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pStmt) { SMDropStreamReq dropReq = {0}; SName name; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); if (TSDB_CODE_SUCCESS != code) return code; (void)tNameGetFullDbName(&name, dropReq.name); dropReq.igNotExists = pStmt->ignoreNotExists; @@ -11690,7 +11856,7 @@ static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pSt static int32_t translatePauseStream(STranslateContext* pCxt, SPauseStreamStmt* pStmt) { SMPauseStreamReq req = {0}; SName name; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); if (TSDB_CODE_SUCCESS != code) return code; (void)tNameGetFullDbName(&name, req.name); req.igNotExists = pStmt->ignoreNotExists; @@ -11700,7 +11866,7 @@ static int32_t translatePauseStream(STranslateContext* pCxt, SPauseStreamStmt* p static int32_t translateResumeStream(STranslateContext* pCxt, SResumeStreamStmt* pStmt) { SMResumeStreamReq req = {0}; SName name; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); if (TSDB_CODE_SUCCESS != code) return code; (void)tNameGetFullDbName(&name, req.name); req.igNotExists = pStmt->ignoreNotExists; @@ -11775,7 +11941,7 @@ static int32_t translateDropView(STranslateContext* pCxt, SDropViewStmt* pStmt) SCMDropViewReq dropReq = {0}; SName name = {0}; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); if (TSDB_CODE_SUCCESS == code) { (void)tNameGetFullDbName(&name, dropReq.dbFName); strncpy(dropReq.name, pStmt->viewName, sizeof(dropReq.name) - 1); @@ -11871,7 +12037,7 @@ static int32_t translateDropFunction(STranslateContext* pCxt, SDropFunctionStmt* static int32_t createRealTableForGrantTable(SGrantStmt* pStmt, SRealTableNode** pTable) { SRealTableNode* pRealTable = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_REAL_TABLE, (SNode**)&pRealTable); + int32_t code = nodesMakeNode(QUERY_NODE_REAL_TABLE, (SNode**)&pRealTable); if (NULL == pRealTable) { return code; } @@ -12074,11 +12240,10 @@ static int32_t translateShowCreateDatabase(STranslateContext* pCxt, SShowCreateD return terrno; } - SName name; + SName name; int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); (void)tNameGetFullDbName(&name, pStmt->dbFName); - if (TSDB_CODE_SUCCESS == code) - return getDBCfg(pCxt, pStmt->dbName, (SDbCfgInfo*)pStmt->pCfg); + if (TSDB_CODE_SUCCESS == code) return getDBCfg(pCxt, pStmt->dbName, (SDbCfgInfo*)pStmt->pCfg); return code; } @@ -12108,7 +12273,7 @@ static int32_t translateShowCreateView(STranslateContext* pCxt, SShowCreateViewS static int32_t createColumnNodeWithName(const char* name, SNode** ppCol) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (!pCol) return code; tstrncpy(pCol->colName, name, TSDB_COL_NAME_LEN); tstrncpy(pCol->node.aliasName, name, TSDB_COL_NAME_LEN); @@ -12170,7 +12335,7 @@ static int32_t buildTSMAAstStreamSubTable(SCreateTSMAStmt* pStmt, SMCreateSmaReq SFunctionNode* pConcatFunc = NULL; code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pConcatFunc); if (TSDB_CODE_SUCCESS != code) goto _end; - SValueNode* pVal = NULL; + SValueNode* pVal = NULL; code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); if (TSDB_CODE_SUCCESS != code) goto _end; @@ -12218,8 +12383,7 @@ static int32_t buildTSMAAst(STranslateContext* pCxt, SCreateTSMAStmt* pStmt, SMC info.pDbName = pStmt->dbName; info.pTableName = tbName; code = nodesCloneList(pStmt->pOptions->pFuncs, &info.pFuncs); - if (TSDB_CODE_SUCCESS == code) - code = nodesCloneNode(pStmt->pOptions->pInterval, &info.pInterval); + if (TSDB_CODE_SUCCESS == code) code = nodesCloneNode(pStmt->pOptions->pInterval, &info.pInterval); SFunctionNode* pTbnameFunc = NULL; if (TSDB_CODE_SUCCESS == code) { @@ -12244,10 +12408,9 @@ static int32_t buildTSMAAst(STranslateContext* pCxt, SCreateTSMAStmt* pStmt, SMC } code = nodesListAppend(info.pPartitionByList, pTagCol); if (TSDB_CODE_SUCCESS == code) { - SNode*pNew = NULL; + SNode* pNew = NULL; code = nodesCloneNode(pTagCol, &pNew); - if (TSDB_CODE_SUCCESS == code) - code = nodesListMakeStrictAppend(&info.pTags, pNew); + if (TSDB_CODE_SUCCESS == code) code = nodesListMakeStrictAppend(&info.pTags, pNew); } } @@ -12394,7 +12557,7 @@ static int32_t buildCreateTSMAReq(STranslateContext* pCxt, SCreateTSMAStmt* pStm pReq->interval = ((SValueNode*)pStmt->pOptions->pInterval)->datum.i; pReq->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit; -#define TSMA_MIN_INTERVAL_MS 1000 * 60 // 1m +#define TSMA_MIN_INTERVAL_MS 1000 * 60 // 1m #define TSMA_MAX_INTERVAL_MS (60UL * 60UL * 1000UL * 24UL * 365UL) // 1y if (!IS_CALENDAR_TIME_DURATION(pReq->intervalUnit)) { @@ -12405,8 +12568,7 @@ static int32_t buildCreateTSMAReq(STranslateContext* pCxt, SCreateTSMAStmt* pStm } else { if (pReq->intervalUnit == TIME_UNIT_MONTH && (pReq->interval < 1 || pReq->interval > 12)) return TSDB_CODE_TSMA_INVALID_INTERVAL; - if (pReq->intervalUnit == TIME_UNIT_YEAR && (pReq->interval != 1)) - return TSDB_CODE_TSMA_INVALID_INTERVAL; + if (pReq->intervalUnit == TIME_UNIT_YEAR && (pReq->interval != 1)) return TSDB_CODE_TSMA_INVALID_INTERVAL; } STableMeta* pTableMeta = NULL; @@ -12422,7 +12584,7 @@ static int32_t buildCreateTSMAReq(STranslateContext* pCxt, SCreateTSMAStmt* pStm if (TSDB_CODE_SUCCESS == code) { SValueNode* pInterval = (SValueNode*)pStmt->pOptions->pInterval; if (checkRecursiveTsmaInterval(pRecursiveTsma->interval, pRecursiveTsma->unit, pInterval->datum.i, - pInterval->unit, pDbInfo.precision, true)) { + pInterval->unit, pDbInfo.precision, true)) { } else { code = TSDB_CODE_TSMA_INVALID_RECURSIVE_INTERVAL; } @@ -13044,7 +13206,7 @@ int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pS static int32_t createStarCol(SNode** ppNode) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (NULL == pCol) { return code; } @@ -13055,7 +13217,7 @@ static int32_t createStarCol(SNode** ppNode) { static int32_t createProjectCol(const char* pProjCol, SNode** ppNode) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (NULL == pCol) { return code; } @@ -13098,7 +13260,7 @@ static int32_t createProjectCols(int32_t ncols, const char* const pCols[], SNode static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { SSelectStmt* pSelect = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_SELECT_STMT, (SNode**)&pSelect); + int32_t code = nodesMakeNode(QUERY_NODE_SELECT_STMT, (SNode**)&pSelect); if (NULL == pSelect) { return code; } @@ -13153,7 +13315,7 @@ static int32_t createOperatorNode(EOperatorType opType, const char* pColName, co } SOperatorNode* pOper = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); + int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); if (NULL == pOper) { return code; } @@ -13178,7 +13340,7 @@ static int32_t createOperatorNode(EOperatorType opType, const char* pColName, co static int32_t createParOperatorNode(EOperatorType opType, const char* pLeftCol, const char* pRightCol, SNode** ppResOp) { SOperatorNode* pOper = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); + int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); if (TSDB_CODE_SUCCESS != code) return code; pOper->opType = opType; @@ -13201,7 +13363,7 @@ static int32_t createParOperatorNode(EOperatorType opType, const char* pLeftCol, static int32_t createIsOperatorNode(EOperatorType opType, const char* pColName, SNode** pOp) { SOperatorNode* pOper = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); + int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); if (NULL == pOper) { return code; } @@ -13323,7 +13485,7 @@ static int32_t addShowUserDatabasesCond(SSelectStmt* pSelect) { SNode* pNameCond1 = NULL; SNode* pNameCond2 = NULL; SNode* pNameCond = NULL; - SValueNode* pValNode1 = NULL, *pValNode2 = NULL; + SValueNode *pValNode1 = NULL, *pValNode2 = NULL; code = nodesMakeValueNodeFromString(TSDB_INFORMATION_SCHEMA_DB, &pValNode1); if (TSDB_CODE_SUCCESS == code) { @@ -13337,11 +13499,9 @@ static int32_t addShowUserDatabasesCond(SSelectStmt* pSelect) { } nodesDestroyNode((SNode*)pValNode2); nodesDestroyNode((SNode*)pValNode1); - if (TSDB_CODE_SUCCESS == code) - code = createLogicCondNode(&pNameCond1, &pNameCond2, &pNameCond, LOGIC_COND_TYPE_AND); + if (TSDB_CODE_SUCCESS == code) code = createLogicCondNode(&pNameCond1, &pNameCond2, &pNameCond, LOGIC_COND_TYPE_AND); - if (TSDB_CODE_SUCCESS == code) - code = insertCondIntoSelectStmt(pSelect, &pNameCond); + if (TSDB_CODE_SUCCESS == code) code = insertCondIntoSelectStmt(pSelect, &pNameCond); if (TSDB_CODE_SUCCESS != code) { nodesDestroyNode(pNameCond1); @@ -13355,8 +13515,8 @@ static int32_t addShowSystemDatabasesCond(SSelectStmt* pSelect) { int32_t code = TSDB_CODE_SUCCESS; SNode* pNameCond1 = NULL; SNode* pNameCond2 = NULL; - SValueNode* pValNode1 = NULL, * pValNode2 = NULL; - SNode* pNameCond = NULL; + SValueNode *pValNode1 = NULL, *pValNode2 = NULL; + SNode* pNameCond = NULL; code = nodesMakeValueNodeFromString(TSDB_INFORMATION_SCHEMA_DB, &pValNode1); if (TSDB_CODE_SUCCESS == code) { code = nodesMakeValueNodeFromString(TSDB_PERFORMANCE_SCHEMA_DB, &pValNode2); @@ -13373,8 +13533,7 @@ static int32_t addShowSystemDatabasesCond(SSelectStmt* pSelect) { code = createLogicCondNode(&pNameCond1, &pNameCond2, &pNameCond, LOGIC_COND_TYPE_OR); } - if (TSDB_CODE_SUCCESS == code) - code = insertCondIntoSelectStmt(pSelect, &pNameCond); + if (TSDB_CODE_SUCCESS == code) code = insertCondIntoSelectStmt(pSelect, &pNameCond); if (TSDB_CODE_SUCCESS != code) { nodesDestroyNode(pNameCond1); @@ -13390,8 +13549,7 @@ static int32_t addShowNormalTablesCond(SSelectStmt* pSelect) { SValueNode* pValNode1 = NULL; code = nodesMakeValueNodeFromString("NORMAL_TABLE", &pValNode1); - if (TSDB_CODE_SUCCESS == code) - code = createOperatorNode(OP_TYPE_EQUAL, "type", (SNode*)pValNode1, &pTypeCond); + if (TSDB_CODE_SUCCESS == code) code = createOperatorNode(OP_TYPE_EQUAL, "type", (SNode*)pValNode1, &pTypeCond); nodesDestroyNode((SNode*)pValNode1); @@ -13406,8 +13564,7 @@ static int32_t addShowChildTablesCond(SSelectStmt* pSelect) { SValueNode* pValNode1 = NULL; code = nodesMakeValueNodeFromString("CHILD_TABLE", &pValNode1); - if (TSDB_CODE_SUCCESS == code) - code = createOperatorNode(OP_TYPE_EQUAL, "type", (SNode*)pValNode1, &pTypeCond); + if (TSDB_CODE_SUCCESS == code) code = createOperatorNode(OP_TYPE_EQUAL, "type", (SNode*)pValNode1, &pTypeCond); nodesDestroyNode((SNode*)pValNode1); @@ -13506,7 +13663,8 @@ static int32_t checkShowTags(STranslateContext* pCxt, const SShowStmt* pShow) { int32_t code = 0; SName name = {0}; STableMeta* pTableMeta = NULL; - toName(pCxt->pParseCxt->acctId, ((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal, &name); + toName(pCxt->pParseCxt->acctId, ((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal, + &name); code = getTargetMeta(pCxt, &name, &pTableMeta, true); if (TSDB_CODE_SUCCESS != code) { code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_GET_META_ERROR, tstrerror(code)); @@ -13533,7 +13691,7 @@ static int32_t rewriteShowTags(STranslateContext* pCxt, SQuery* pQuery) { static int32_t createTagsFunction(SFunctionNode** ppNode) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -13548,7 +13706,7 @@ static int32_t createShowTableTagsProjections(SNodeList** pProjections, SNodeLis return TSDB_CODE_SUCCESS; } SFunctionNode* pTbNameFunc = NULL; - int32_t code = createTbnameFunction(&pTbNameFunc); + int32_t code = createTbnameFunction(&pTbNameFunc); if (TSDB_CODE_SUCCESS == code) { code = nodesListMakeStrictAppend(pProjections, (SNode*)pTbNameFunc); } @@ -13630,7 +13788,7 @@ static int32_t rewriteShowVnodes(STranslateContext* pCxt, SQuery* pQuery) { static int32_t createBlockDistInfoFunc(SFunctionNode** ppNode) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -13643,7 +13801,7 @@ static int32_t createBlockDistInfoFunc(SFunctionNode** ppNode) { static int32_t createBlockDistFunc(SFunctionNode** ppNode) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -13670,8 +13828,7 @@ static int32_t rewriteShowTableDist(STranslateContext* pCxt, SQuery* pQuery) { NODES_DESTORY_LIST(pStmt->pProjectionList); SFunctionNode* pFuncNew = NULL; code = createBlockDistFunc(&pFuncNew); - if (TSDB_CODE_SUCCESS == code) - code = nodesListMakeStrictAppend(&pStmt->pProjectionList, (SNode*)pFuncNew); + if (TSDB_CODE_SUCCESS == code) code = nodesListMakeStrictAppend(&pStmt->pProjectionList, (SNode*)pFuncNew); } if (TSDB_CODE_SUCCESS == code) { pCxt->showRewrite = true; @@ -13725,7 +13882,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* } SNode* pCol; col_id_t index = 0; - int32_t code = tInitDefaultSColCmprWrapperByCols(&req.colCmpr, req.ntb.schemaRow.nCols); + int32_t code = tInitDefaultSColCmprWrapperByCols(&req.colCmpr, req.ntb.schemaRow.nCols); if (TSDB_CODE_SUCCESS != code) { tdDestroySVCreateTbReq(&req); return code; @@ -13823,7 +13980,7 @@ static void destroyCreateTbReqBatch(void* data) { int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray) { SVnodeModifyOpStmt* pNewStmt = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pNewStmt); + int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pNewStmt); if (pNewStmt == NULL) { return code; } @@ -13952,7 +14109,7 @@ static int32_t addCreateTbReqIntoVgroup(SHashObj* pVgroupHashmap, const char* db } static int32_t createCastFuncForTag(STranslateContext* pCxt, SNode* pNode, SDataType dt, SNode** pCast) { - SNode* pExpr = NULL; + SNode* pExpr = NULL; int32_t code = nodesCloneNode(pNode, (SNode**)&pExpr); if (NULL == pExpr) { return code; @@ -14636,7 +14793,7 @@ static int32_t rewriteCreateMultiTable(STranslateContext* pCxt, SQuery* pQuery) static int32_t rewriteCreateTableFromFile(STranslateContext* pCxt, SQuery* pQuery) { SVnodeModifyOpStmt* pModifyStmt = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pModifyStmt); + int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pModifyStmt); if (pModifyStmt == NULL) { return code; } @@ -15535,7 +15692,7 @@ static int32_t rewriteShowCompactDetailsStmt(STranslateContext* pCxt, SQuery* pQ static int32_t createParWhenThenNode(SNode* pWhen, SNode* pThen, SNode** ppResWhenThen) { SWhenThenNode* pWThen = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_WHEN_THEN, (SNode**)&pWThen); + int32_t code = nodesMakeNode(QUERY_NODE_WHEN_THEN, (SNode**)&pWThen); if (TSDB_CODE_SUCCESS != code) return code; pWThen->pWhen = pWhen; @@ -15547,7 +15704,7 @@ static int32_t createParWhenThenNode(SNode* pWhen, SNode* pThen, SNode** ppResWh static int32_t createParCaseWhenNode(SNode* pCase, SNodeList* pWhenThenList, SNode* pElse, const char* pAias, SNode** ppResCaseWhen) { SCaseWhenNode* pCaseWhen = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_CASE_WHEN, (SNode**)&pCaseWhen); + int32_t code = nodesMakeNode(QUERY_NODE_CASE_WHEN, (SNode**)&pCaseWhen); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -15566,7 +15723,7 @@ static int32_t createParCaseWhenNode(SNode* pCase, SNodeList* pWhenThenList, SNo static int32_t createParFunctionNode(const char* pFunName, const char* pAias, SNodeList* pParameterList, SNode** ppResFunc) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -15580,7 +15737,7 @@ static int32_t createParFunctionNode(const char* pFunName, const char* pAias, SN static int32_t createParListNode(SNode* pItem, SNodeList** ppResList) { SNodeList* pList = NULL; - int32_t code = nodesMakeList(&pList); + int32_t code = nodesMakeList(&pList); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -15591,7 +15748,7 @@ static int32_t createParListNode(SNode* pItem, SNodeList** ppResList) { static int32_t createParTempTableNode(SSelectStmt* pSubquery, SNode** ppResTempTable) { STempTableNode* pTempTable = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_TEMP_TABLE, (SNode**)&pTempTable); + int32_t code = nodesMakeNode(QUERY_NODE_TEMP_TABLE, (SNode**)&pTempTable); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -15623,12 +15780,9 @@ static int32_t rewriteShowAliveStmt(STranslateContext* pCxt, SQuery* pQuery) { SNode* pCond3 = NULL; SNode* pCond4 = NULL; code = createOperatorNode(OP_TYPE_EQUAL, "v1_status", (SNode*)pValNode, &pCond1); - if (TSDB_CODE_SUCCESS == code) - code = createOperatorNode(OP_TYPE_EQUAL, "v2_status", (SNode*)pValNode, &pCond2); - if (TSDB_CODE_SUCCESS == code) - code = createOperatorNode(OP_TYPE_EQUAL, "v3_status", (SNode*)pValNode, &pCond3); - if (TSDB_CODE_SUCCESS == code) - code = createOperatorNode(OP_TYPE_EQUAL, "v4_status", (SNode*)pValNode, &pCond4); + if (TSDB_CODE_SUCCESS == code) code = createOperatorNode(OP_TYPE_EQUAL, "v2_status", (SNode*)pValNode, &pCond2); + if (TSDB_CODE_SUCCESS == code) code = createOperatorNode(OP_TYPE_EQUAL, "v3_status", (SNode*)pValNode, &pCond3); + if (TSDB_CODE_SUCCESS == code) code = createOperatorNode(OP_TYPE_EQUAL, "v4_status", (SNode*)pValNode, &pCond4); nodesDestroyNode((SNode*)pValNode); if (TSDB_CODE_SUCCESS != code) { nodesDestroyNode(pCond1); @@ -15643,10 +15797,8 @@ static int32_t rewriteShowAliveStmt(STranslateContext* pCxt, SQuery* pQuery) { SNode* pTemp2 = NULL; SNode* pFullCond = NULL; code = createLogicCondNode(&pCond1, &pCond2, &pTemp1, LOGIC_COND_TYPE_OR); - if (TSDB_CODE_SUCCESS == code) - code = createLogicCondNode(&pTemp1, &pCond3, &pTemp2, LOGIC_COND_TYPE_OR); - if (TSDB_CODE_SUCCESS == code) - code = createLogicCondNode(&pTemp2, &pCond4, &pFullCond, LOGIC_COND_TYPE_OR); + if (TSDB_CODE_SUCCESS == code) code = createLogicCondNode(&pTemp1, &pCond3, &pTemp2, LOGIC_COND_TYPE_OR); + if (TSDB_CODE_SUCCESS == code) code = createLogicCondNode(&pTemp2, &pCond4, &pFullCond, LOGIC_COND_TYPE_OR); if (TSDB_CODE_SUCCESS != code) { nodesDestroyNode(pCond1); nodesDestroyNode(pCond2); @@ -15969,8 +16121,8 @@ static int32_t rewriteShowAliveStmt(STranslateContext* pCxt, SQuery* pQuery) { } // pSubSelect, pWhenThenlist need to free - // case when leader_col = count_col and leader_col > 0 then 1 when leader_col < count_col and leader_col > 0 then 2 else - // 0 end as status + // case when leader_col = count_col and leader_col > 0 then 1 when leader_col < count_col and leader_col > 0 then 2 + // else 0 end as status pElse = NULL; code = nodesMakeValueNodeFromInt32(0, &pElse); if (TSDB_CODE_SUCCESS != code) { diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c deleted file mode 100644 index 5849977a3a2..00000000000 --- a/source/libs/parser/src/sql.c +++ /dev/null @@ -1,8100 +0,0 @@ -/* -** 2000-05-29 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Driver template for the LEMON parser generator. -** -** The "lemon" program processes an LALR(1) input grammar file, then uses -** this template to construct a parser. The "lemon" program inserts text -** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the -** interstitial "-" characters) contained in this template is changed into -** the value of the %name directive from the grammar. Otherwise, the content -** of this template is copied straight through into the generate parser -** source file. -** -** The following is the concatenation of all %include directives from the -** input grammar file: -*/ -#include -#include -/************ Begin %include sections from the grammar ************************/ - -#include -#include -#include -#include -#include - -#define ALLOW_FORBID_FUNC - -#include "functionMgt.h" -#include "nodes.h" -#include "parToken.h" -#include "ttokendef.h" -#include "parAst.h" - -#define YYSTACKDEPTH 0 -/**************** End of %include directives **********************************/ -/* These constants specify the various numeric values for terminal symbols -** in a format understandable to "makeheaders". This section is blank unless -** "lemon" is run with the "-m" command-line option. -***************** Begin makeheaders token definitions *************************/ -/**************** End makeheaders token definitions ***************************/ - -/* The next sections is a series of control #defines. -** various aspects of the generated parser. -** YYCODETYPE is the data type used to store the integer codes -** that represent terminal and non-terminal symbols. -** "unsigned char" is used if there are fewer than -** 256 symbols. Larger types otherwise. -** YYNOCODE is a number of type YYCODETYPE that is not used for -** any terminal or nonterminal symbol. -** YYFALLBACK If defined, this indicates that one or more tokens -** (also known as: "terminal symbols") have fall-back -** values which should be used if the original symbol -** would not parse. This permits keywords to sometimes -** be used as identifiers, for example. -** YYACTIONTYPE is the data type used for "action codes" - numbers -** that indicate what to do in response to the next -** token. -** ParseTOKENTYPE is the data type used for minor type for terminal -** symbols. Background: A "minor type" is a semantic -** value associated with a terminal or non-terminal -** symbols. For example, for an "ID" terminal symbol, -** the minor type might be the name of the identifier. -** Each non-terminal can have a different minor type. -** Terminal symbols all have the same minor type, though. -** This macros defines the minor type for terminal -** symbols. -** YYMINORTYPE is the data type used for all minor types. -** This is typically a union of many types, one of -** which is ParseTOKENTYPE. The entry in the union -** for terminal symbols is called "yy0". -** YYSTACKDEPTH is the maximum depth of the parser's stack. If -** zero the stack is dynamically sized using realloc() -** ParseARG_SDECL A static variable declaration for the %extra_argument -** ParseARG_PDECL A parameter declaration for the %extra_argument -** ParseARG_PARAM Code to pass %extra_argument as a subroutine parameter -** ParseARG_STORE Code to store %extra_argument into yypParser -** ParseARG_FETCH Code to extract %extra_argument from yypParser -** ParseCTX_* As ParseARG_ except for %extra_context -** YYERRORSYMBOL is the code number of the error symbol. If not -** defined, then do no error processing. -** YYNSTATE the combined number of states. -** YYNRULE the number of rules in the grammar -** YYNTOKEN Number of terminal symbols -** YY_MAX_SHIFT Maximum value for shift actions -** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions -** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions -** YY_ERROR_ACTION The yy_action[] code for syntax error -** YY_ACCEPT_ACTION The yy_action[] code for accept -** YY_NO_ACTION The yy_action[] code for no-op -** YY_MIN_REDUCE Minimum value for reduce actions -** YY_MAX_REDUCE Maximum value for reduce actions -*/ -#ifndef INTERFACE -# define INTERFACE 1 -#endif -/************* Begin control #defines *****************************************/ -#define YYCODETYPE unsigned short int -#define YYNOCODE 573 -#define YYACTIONTYPE unsigned short int -#define ParseTOKENTYPE SToken -typedef union { - int yyinit; - ParseTOKENTYPE yy0; - EFillMode yy102; - EOperatorType yy140; - ETrimType yy300; - ENullOrder yy307; - EOrder yy410; - SDataType yy424; - int64_t yy483; - SToken yy557; - bool yy569; - SShowTablesOption yy595; - SAlterOption yy683; - STokenPair yy723; - EShowKind yy741; - EJoinSubType yy744; - EJoinType yy792; - int8_t yy815; - int32_t yy904; - SNodeList* yy946; - SNode* yy974; -} YYMINORTYPE; -#ifndef YYSTACKDEPTH -#define YYSTACKDEPTH 100 -#endif -#define ParseARG_SDECL SAstCreateContext* pCxt ; -#define ParseARG_PDECL , SAstCreateContext* pCxt -#define ParseARG_PARAM ,pCxt -#define ParseARG_FETCH SAstCreateContext* pCxt =yypParser->pCxt ; -#define ParseARG_STORE yypParser->pCxt =pCxt ; -#define ParseCTX_SDECL -#define ParseCTX_PDECL -#define ParseCTX_PARAM -#define ParseCTX_FETCH -#define ParseCTX_STORE -#define YYFALLBACK 1 -#define YYNSTATE 1025 -#define YYNRULE 784 -#define YYNRULE_WITH_ACTION 784 -#define YYNTOKEN 389 -#define YY_MAX_SHIFT 1024 -#define YY_MIN_SHIFTREDUCE 1516 -#define YY_MAX_SHIFTREDUCE 2299 -#define YY_ERROR_ACTION 2300 -#define YY_ACCEPT_ACTION 2301 -#define YY_NO_ACTION 2302 -#define YY_MIN_REDUCE 2303 -#define YY_MAX_REDUCE 3086 -/************* End control #defines *******************************************/ -#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) - -/* Define the yytestcase() macro to be a no-op if is not already defined -** otherwise. -** -** Applications can choose to define yytestcase() in the %include section -** to a macro that can assist in verifying code coverage. For production -** code the yytestcase() macro should be turned off. But it is useful -** for testing. -*/ -#ifndef yytestcase -# define yytestcase(X) -#endif - - -/* Next are the tables used to determine what action to take based on the -** current state and lookahead token. These tables are used to implement -** functions that take a state number and lookahead value and return an -** action integer. -** -** Suppose the action integer is N. Then the action is determined as -** follows -** -** 0 <= N <= YY_MAX_SHIFT Shift N. That is, push the lookahead -** token onto the stack and goto state N. -** -** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then -** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE. -** -** N == YY_ERROR_ACTION A syntax error has occurred. -** -** N == YY_ACCEPT_ACTION The parser accepts its input. -** -** N == YY_NO_ACTION No such action. Denotes unused -** slots in the yy_action[] table. -** -** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE -** and YY_MAX_REDUCE -** -** The action table is constructed as a single large table named yy_action[]. -** Given state S and lookahead X, the action is computed as either: -** -** (A) N = yy_action[ yy_shift_ofst[S] + X ] -** (B) N = yy_default[S] -** -** The (A) formula is preferred. The B formula is used instead if -** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X. -** -** The formulas above are for computing the action when the lookahead is -** a terminal symbol. If the lookahead is a non-terminal (as occurs after -** a reduce action) then the yy_reduce_ofst[] array is used in place of -** the yy_shift_ofst[] array. -** -** The following are the tables generated in this section: -** -** yy_action[] A single table containing all actions. -** yy_lookahead[] A table containing the lookahead for each entry in -** yy_action. Used to detect hash collisions. -** yy_shift_ofst[] For each state, the offset into yy_action for -** shifting terminals. -** yy_reduce_ofst[] For each state, the offset into yy_action for -** shifting non-terminals after a reduce. -** yy_default[] Default action for each state. -** -*********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (4475) -static const YYACTIONTYPE yy_action[] = { - /* 0 */ 839, 683, 3059, 691, 684, 2351, 684, 2351, 3054, 2577, - /* 10 */ 3054, 2816, 60, 58, 678, 59, 57, 56, 55, 54, - /* 20 */ 506, 2304, 2016, 2521, 915, 2517, 838, 231, 858, 3058, - /* 30 */ 2816, 3055, 840, 3055, 3057, 205, 2014, 332, 2124, 2399, - /* 40 */ 2041, 2820, 151, 2841, 530, 150, 149, 148, 147, 146, - /* 50 */ 145, 144, 143, 142, 914, 851, 170, 466, 854, 389, - /* 60 */ 2820, 53, 52, 220, 680, 59, 57, 56, 55, 54, - /* 70 */ 2303, 2119, 151, 2045, 884, 150, 149, 148, 147, 146, - /* 80 */ 145, 144, 143, 142, 782, 2022, 2041, 2674, 2859, 829, - /* 90 */ 2647, 2822, 2824, 501, 160, 159, 158, 157, 156, 155, - /* 100 */ 154, 153, 152, 688, 2806, 919, 896, 2672, 901, 685, - /* 110 */ 2822, 2825, 1592, 2759, 1591, 1021, 851, 170, 61, 990, - /* 120 */ 989, 988, 987, 536, 919, 986, 985, 175, 980, 979, - /* 130 */ 978, 977, 976, 975, 974, 174, 968, 967, 966, 535, - /* 140 */ 534, 963, 962, 961, 211, 210, 960, 531, 959, 958, - /* 150 */ 957, 2840, 63, 1593, 2891, 2127, 2128, 197, 134, 2842, - /* 160 */ 900, 2844, 2845, 895, 75, 2448, 883, 2892, 919, 2041, - /* 170 */ 802, 772, 1592, 213, 1591, 2954, 2042, 251, 3054, 500, - /* 180 */ 2950, 207, 2962, 850, 75, 162, 849, 766, 166, 770, - /* 190 */ 768, 303, 302, 3054, 2077, 2087, 3060, 231, 198, 232, - /* 200 */ 2315, 3055, 840, 3059, 2126, 2129, 528, 3001, 509, 2570, - /* 210 */ 2572, 838, 231, 1593, 1862, 1863, 3055, 840, 195, 2017, - /* 220 */ 2044, 2015, 824, 107, 914, 9, 882, 2220, 106, 2522, - /* 230 */ 53, 52, 914, 2041, 59, 57, 56, 55, 54, 676, - /* 240 */ 64, 853, 200, 2962, 2963, 2213, 168, 2967, 674, 227, - /* 250 */ 318, 670, 666, 581, 2020, 2021, 2074, 2841, 2076, 2079, - /* 260 */ 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2088, 2089, 2090, - /* 270 */ 892, 885, 894, 914, 917, 916, 881, 2111, 2112, 2113, - /* 280 */ 2114, 2115, 2118, 2120, 2121, 2122, 2123, 2125, 2, 60, - /* 290 */ 58, 2216, 700, 2841, 445, 105, 2039, 506, 75, 2016, - /* 300 */ 458, 118, 2859, 625, 644, 642, 469, 441, 897, 2046, - /* 310 */ 245, 1889, 1890, 2014, 646, 2124, 915, 2517, 2806, 2286, - /* 320 */ 896, 2616, 53, 52, 127, 2074, 59, 57, 56, 55, - /* 330 */ 54, 510, 468, 604, 2599, 648, 236, 220, 2859, 2191, - /* 340 */ 446, 606, 830, 825, 818, 814, 810, 339, 2119, 2674, - /* 350 */ 2154, 884, 584, 785, 2806, 19, 896, 701, 2667, 339, - /* 360 */ 1888, 1891, 2022, 498, 2647, 2840, 2042, 2326, 2891, 2671, - /* 370 */ 901, 63, 436, 2842, 900, 2844, 2845, 895, 893, 339, - /* 380 */ 883, 2892, 919, 874, 2919, 954, 187, 186, 951, 950, - /* 390 */ 949, 184, 1021, 467, 973, 15, 337, 2474, 572, 2044, - /* 400 */ 571, 2840, 2251, 887, 2891, 592, 2674, 2078, 134, 2842, - /* 410 */ 900, 2844, 2845, 895, 886, 2155, 883, 2892, 919, 922, - /* 420 */ 508, 172, 224, 181, 2925, 2954, 2671, 901, 2806, 500, - /* 430 */ 2950, 570, 2127, 2128, 801, 339, 2654, 2633, 2564, 634, - /* 440 */ 633, 631, 630, 629, 624, 623, 622, 621, 450, 590, - /* 450 */ 2643, 611, 610, 609, 608, 607, 601, 600, 599, 267, - /* 460 */ 594, 593, 465, 686, 699, 2359, 585, 1850, 1851, 2075, - /* 470 */ 220, 2077, 2087, 1869, 821, 820, 2249, 2250, 2252, 2253, - /* 480 */ 2254, 2126, 2129, 2974, 2188, 2189, 2190, 2974, 2974, 2974, - /* 490 */ 2974, 2974, 620, 339, 2160, 618, 2017, 2648, 2015, 41, - /* 500 */ 617, 1786, 1787, 882, 247, 53, 52, 619, 616, 59, - /* 510 */ 57, 56, 55, 54, 44, 502, 2149, 2150, 2151, 2152, - /* 520 */ 2153, 2157, 2158, 2159, 79, 2293, 2193, 2194, 2195, 2196, - /* 530 */ 2197, 2020, 2021, 2074, 2841, 2076, 2079, 2080, 2081, 2082, - /* 540 */ 2083, 2084, 2085, 2086, 2088, 2089, 2090, 892, 885, 854, - /* 550 */ 3059, 917, 916, 881, 2111, 2112, 2212, 786, 3054, 2118, - /* 560 */ 2120, 2121, 2122, 2123, 2125, 2, 60, 58, 2841, 2191, - /* 570 */ 2016, 828, 533, 532, 506, 337, 2016, 3058, 73, 2859, - /* 580 */ 2577, 3055, 3056, 897, 2014, 2135, 45, 355, 1747, 799, - /* 590 */ 2014, 2041, 2124, 702, 269, 2806, 2023, 896, 686, 866, - /* 600 */ 2359, 2859, 1561, 1738, 946, 945, 944, 1742, 943, 1744, - /* 610 */ 1745, 942, 939, 2859, 1753, 936, 1755, 1756, 933, 930, - /* 620 */ 927, 1568, 2499, 473, 786, 2119, 195, 2969, 884, 2806, - /* 630 */ 923, 896, 19, 2022, 137, 596, 2643, 2523, 75, 2022, - /* 640 */ 693, 2713, 2840, 526, 2519, 2891, 1563, 1566, 1567, 134, - /* 650 */ 2842, 900, 2844, 2845, 895, 2292, 2966, 883, 2892, 919, - /* 660 */ 915, 2517, 706, 1021, 213, 196, 2954, 725, 724, 1021, - /* 670 */ 500, 2950, 15, 827, 526, 2519, 2840, 915, 2517, 2891, - /* 680 */ 161, 2841, 2045, 135, 2842, 900, 2844, 2845, 895, 731, - /* 690 */ 249, 883, 2892, 919, 487, 2721, 897, 161, 3002, 2489, - /* 700 */ 2954, 56, 55, 54, 2953, 2950, 736, 788, 2713, 2127, - /* 710 */ 2128, 627, 2643, 2974, 2188, 2189, 2190, 2974, 2974, 2974, - /* 720 */ 2974, 2974, 518, 53, 52, 956, 2859, 59, 57, 56, - /* 730 */ 55, 54, 752, 751, 750, 851, 170, 14, 13, 742, - /* 740 */ 167, 746, 2806, 783, 896, 745, 1683, 521, 2077, 2087, - /* 750 */ 744, 749, 480, 479, 2191, 12, 743, 558, 2126, 2129, - /* 760 */ 478, 739, 738, 737, 2969, 2078, 254, 2017, 2022, 2015, - /* 770 */ 2571, 2572, 43, 2017, 1695, 2015, 956, 2045, 53, 52, - /* 780 */ 882, 2026, 59, 57, 56, 55, 54, 2799, 1694, 2840, - /* 790 */ 523, 703, 2891, 2965, 2041, 1685, 199, 2842, 900, 2844, - /* 800 */ 2845, 895, 2020, 2021, 883, 2892, 919, 2301, 2020, 2021, - /* 810 */ 2074, 2841, 2076, 2079, 2080, 2081, 2082, 2083, 2084, 2085, - /* 820 */ 2086, 2088, 2089, 2090, 892, 885, 897, 2075, 917, 916, - /* 830 */ 881, 2111, 2112, 339, 2262, 2325, 2118, 2120, 2121, 2122, - /* 840 */ 2123, 2125, 2, 12, 60, 58, 803, 3012, 851, 170, - /* 850 */ 512, 2661, 506, 704, 2016, 2168, 2859, 947, 226, 1699, - /* 860 */ 1747, 140, 2962, 2963, 919, 168, 2967, 649, 2014, 2969, - /* 870 */ 2124, 117, 2806, 1698, 896, 1738, 946, 945, 944, 1742, - /* 880 */ 943, 1744, 1745, 891, 890, 2577, 1753, 889, 1755, 1756, - /* 890 */ 888, 930, 927, 464, 2324, 562, 2806, 539, 2964, 2188, - /* 900 */ 2189, 2190, 538, 2119, 2575, 1917, 884, 1985, 113, 396, - /* 910 */ 19, 1595, 1596, 915, 2517, 1568, 307, 2022, 2046, 2840, - /* 920 */ 2506, 126, 2891, 564, 560, 638, 135, 2842, 900, 2844, - /* 930 */ 2845, 895, 1984, 68, 883, 2892, 919, 2511, 517, 516, - /* 940 */ 553, 1566, 1567, 2954, 2841, 915, 2517, 1021, 2951, 2508, - /* 950 */ 15, 2800, 651, 234, 802, 2806, 225, 915, 2517, 897, - /* 960 */ 2760, 2361, 3054, 520, 519, 578, 190, 238, 2078, 486, - /* 970 */ 2721, 409, 533, 532, 201, 2962, 2963, 579, 168, 2967, - /* 980 */ 3060, 231, 2030, 512, 839, 3055, 840, 2127, 2128, 2859, - /* 990 */ 407, 89, 3054, 256, 88, 2323, 2023, 919, 2124, 727, - /* 1000 */ 726, 1943, 1944, 470, 2491, 2806, 2504, 896, 447, 2322, - /* 1010 */ 838, 231, 2502, 2046, 512, 3055, 840, 802, 637, 255, - /* 1020 */ 265, 661, 659, 656, 654, 3054, 2077, 2087, 919, 97, - /* 1030 */ 2075, 2119, 635, 3, 53, 52, 2126, 2129, 59, 57, - /* 1040 */ 56, 55, 54, 3060, 231, 2022, 173, 66, 3055, 840, - /* 1050 */ 2321, 2017, 2840, 2015, 113, 2891, 2806, 2507, 882, 134, - /* 1060 */ 2842, 900, 2844, 2845, 895, 2243, 75, 883, 2892, 919, - /* 1070 */ 2806, 748, 747, 471, 3074, 879, 2954, 346, 347, 2244, - /* 1080 */ 500, 2950, 345, 2510, 574, 3058, 2020, 2021, 2074, 573, - /* 1090 */ 2076, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2088, - /* 1100 */ 2089, 2090, 892, 885, 970, 76, 917, 916, 881, 2111, - /* 1110 */ 2112, 2806, 984, 982, 2118, 2120, 2121, 2122, 2123, 2125, - /* 1120 */ 2, 60, 58, 2130, 915, 2517, 2320, 2319, 2242, 506, - /* 1130 */ 2318, 2016, 53, 52, 2331, 1014, 59, 57, 56, 55, - /* 1140 */ 54, 802, 306, 2577, 598, 2014, 305, 2124, 2449, 3054, - /* 1150 */ 759, 496, 954, 187, 186, 951, 950, 949, 184, 101, - /* 1160 */ 100, 577, 2575, 846, 244, 773, 2577, 3060, 231, 2488, - /* 1170 */ 2317, 40, 3055, 840, 511, 395, 972, 569, 567, 2031, - /* 1180 */ 2119, 2026, 403, 884, 304, 2575, 2554, 2806, 2806, 444, - /* 1190 */ 2402, 2806, 556, 314, 2022, 552, 548, 544, 541, 570, - /* 1200 */ 2727, 53, 52, 762, 402, 59, 57, 56, 55, 54, - /* 1210 */ 756, 754, 915, 2517, 2034, 2036, 12, 301, 10, 2314, - /* 1220 */ 116, 2841, 2521, 196, 1021, 453, 34, 61, 485, 2577, - /* 1230 */ 774, 2806, 612, 2520, 917, 916, 897, 527, 3009, 185, - /* 1240 */ 915, 2517, 2118, 2120, 2121, 2122, 2123, 2125, 2575, 915, - /* 1250 */ 2517, 53, 52, 915, 2517, 59, 57, 56, 55, 54, - /* 1260 */ 613, 339, 308, 85, 2127, 2128, 2859, 2045, 84, 614, - /* 1270 */ 752, 751, 750, 705, 2041, 2746, 809, 742, 167, 746, - /* 1280 */ 2806, 396, 2806, 745, 896, 915, 2517, 2313, 744, 749, - /* 1290 */ 480, 479, 333, 2312, 743, 915, 2517, 2577, 478, 739, - /* 1300 */ 738, 737, 2386, 2077, 2087, 2512, 915, 2517, 875, 877, - /* 1310 */ 2926, 2926, 2263, 2126, 2129, 309, 2576, 954, 187, 186, - /* 1320 */ 951, 950, 949, 184, 753, 67, 317, 2311, 2017, 2840, - /* 1330 */ 2015, 2156, 2891, 833, 843, 882, 134, 2842, 900, 2844, - /* 1340 */ 2845, 895, 2722, 2316, 883, 2892, 919, 948, 2806, 952, - /* 1350 */ 2568, 3074, 2568, 2954, 2806, 2310, 735, 500, 2950, 2309, - /* 1360 */ 734, 205, 2238, 2020, 2021, 2074, 2841, 2076, 2079, 2080, - /* 1370 */ 2081, 2082, 2083, 2084, 2085, 2086, 2088, 2089, 2090, 892, - /* 1380 */ 885, 897, 2626, 917, 916, 881, 2111, 2112, 2806, 915, - /* 1390 */ 2517, 2118, 2120, 2121, 2122, 2123, 2125, 2, 60, 58, - /* 1400 */ 2841, 915, 2517, 915, 2517, 2202, 506, 2750, 2016, 857, - /* 1410 */ 2161, 2859, 915, 2517, 2308, 897, 2806, 3022, 842, 2307, - /* 1420 */ 2806, 350, 2014, 871, 2124, 171, 588, 2806, 2925, 896, - /* 1430 */ 42, 2306, 357, 53, 52, 915, 2517, 59, 57, 56, - /* 1440 */ 55, 54, 53, 52, 529, 2859, 59, 57, 56, 55, - /* 1450 */ 54, 775, 915, 2517, 195, 908, 1570, 2119, 915, 2517, - /* 1460 */ 884, 2806, 2040, 896, 546, 2522, 953, 915, 2517, 2568, - /* 1470 */ 847, 2022, 909, 2588, 2840, 2806, 3015, 2891, 913, 163, - /* 1480 */ 2806, 413, 2842, 900, 2844, 2845, 895, 385, 32, 883, - /* 1490 */ 2892, 919, 2806, 103, 91, 2492, 294, 296, 2841, 292, - /* 1500 */ 295, 1021, 177, 2046, 61, 2109, 2295, 2296, 2840, 2579, - /* 1510 */ 2075, 2891, 740, 897, 1678, 134, 2842, 900, 2844, 2845, - /* 1520 */ 895, 222, 177, 883, 2892, 919, 298, 2384, 176, 297, - /* 1530 */ 3074, 300, 2954, 812, 299, 1676, 500, 2950, 2375, 741, - /* 1540 */ 185, 2127, 2128, 2859, 2101, 2373, 62, 822, 104, 755, - /* 1550 */ 53, 52, 62, 2232, 59, 57, 56, 55, 54, 2806, - /* 1560 */ 757, 896, 1674, 1679, 14, 13, 880, 760, 53, 52, - /* 1570 */ 364, 363, 59, 57, 56, 55, 54, 2025, 53, 52, - /* 1580 */ 2077, 2087, 59, 57, 56, 55, 54, 325, 1933, 47, - /* 1590 */ 2126, 2129, 53, 52, 214, 344, 59, 57, 56, 55, - /* 1600 */ 54, 90, 62, 366, 365, 2017, 2840, 2015, 1941, 2891, - /* 1610 */ 77, 852, 882, 134, 2842, 900, 2844, 2845, 895, 2236, - /* 1620 */ 62, 883, 2892, 919, 2827, 2024, 856, 165, 2929, 2860, - /* 1630 */ 2954, 632, 2248, 776, 500, 2950, 368, 367, 2247, 844, - /* 1640 */ 2020, 2021, 2074, 62, 2076, 2079, 2080, 2081, 2082, 2083, - /* 1650 */ 2084, 2085, 2086, 2088, 2089, 2090, 892, 885, 62, 2441, - /* 1660 */ 917, 916, 881, 2111, 2112, 62, 90, 647, 2118, 2120, - /* 1670 */ 2121, 2122, 2123, 2125, 2, 60, 58, 2841, 182, 2097, - /* 1680 */ 323, 348, 779, 506, 2440, 2016, 163, 863, 2100, 2099, - /* 1690 */ 185, 2829, 897, 132, 816, 129, 2162, 2352, 46, 2014, - /* 1700 */ 87, 2124, 2367, 2102, 370, 369, 2110, 53, 52, 3005, - /* 1710 */ 39, 59, 57, 56, 55, 54, 372, 371, 1656, 53, - /* 1720 */ 52, 819, 2859, 59, 57, 56, 55, 54, 2841, 2103, - /* 1730 */ 374, 373, 376, 375, 2119, 378, 377, 884, 2806, 802, - /* 1740 */ 896, 925, 2146, 897, 1886, 380, 379, 3054, 2022, 382, - /* 1750 */ 381, 2091, 1876, 384, 383, 1629, 964, 965, 48, 492, - /* 1760 */ 1008, 826, 2028, 183, 360, 3060, 231, 1657, 185, 164, - /* 1770 */ 3055, 840, 912, 2859, 488, 182, 1729, 860, 1021, 1648, - /* 1780 */ 1646, 61, 2652, 555, 2565, 2840, 401, 537, 2891, 2806, - /* 1790 */ 2358, 896, 134, 2842, 900, 2844, 2845, 895, 795, 3006, - /* 1800 */ 883, 2892, 919, 3016, 1630, 834, 835, 3074, 330, 2954, - /* 1810 */ 2027, 338, 335, 500, 2950, 2653, 53, 52, 2127, 2128, - /* 1820 */ 59, 57, 56, 55, 54, 2475, 5, 1760, 540, 545, - /* 1830 */ 2096, 462, 2039, 554, 2049, 566, 2840, 565, 239, 2891, - /* 1840 */ 568, 240, 394, 135, 2842, 900, 2844, 2845, 895, 1768, - /* 1850 */ 242, 883, 2892, 919, 1775, 1773, 1910, 2077, 2087, 582, - /* 1860 */ 2954, 188, 2040, 589, 878, 2950, 253, 2126, 2129, 53, - /* 1870 */ 52, 591, 595, 59, 57, 56, 55, 54, 597, 640, - /* 1880 */ 602, 615, 2017, 626, 2015, 2645, 53, 52, 636, 882, - /* 1890 */ 59, 57, 56, 55, 54, 628, 639, 641, 652, 477, - /* 1900 */ 475, 653, 650, 259, 258, 657, 2047, 655, 658, 262, - /* 1910 */ 660, 662, 4, 681, 682, 689, 2042, 2020, 2021, 2074, - /* 1920 */ 690, 2076, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, - /* 1930 */ 2088, 2089, 2090, 892, 885, 692, 270, 917, 916, 881, - /* 1940 */ 2111, 2112, 694, 787, 109, 2118, 2120, 2121, 2122, 2123, - /* 1950 */ 2125, 2, 60, 58, 273, 2048, 695, 2050, 696, 276, - /* 1960 */ 506, 698, 2016, 2051, 855, 278, 2668, 110, 2052, 111, - /* 1970 */ 112, 49, 2662, 1727, 2841, 730, 2014, 707, 2124, 284, - /* 1980 */ 2098, 287, 474, 472, 114, 733, 732, 763, 764, 897, - /* 1990 */ 2736, 3047, 139, 2505, 291, 439, 2501, 2095, 778, 115, - /* 2000 */ 802, 293, 191, 780, 138, 2841, 735, 310, 3054, 136, - /* 2010 */ 734, 2119, 2043, 178, 884, 2503, 2498, 192, 193, 2859, - /* 2020 */ 897, 802, 397, 2714, 790, 2022, 3060, 231, 791, 3054, - /* 2030 */ 789, 3055, 840, 315, 313, 2806, 2733, 896, 2732, 823, - /* 2040 */ 797, 861, 3021, 794, 3020, 8, 832, 3060, 231, 326, - /* 2050 */ 2859, 806, 3055, 840, 796, 1021, 2841, 2993, 61, 807, - /* 2060 */ 805, 837, 804, 320, 204, 2973, 2806, 328, 896, 324, - /* 2070 */ 322, 897, 327, 2986, 836, 329, 331, 845, 848, 169, - /* 2080 */ 493, 2044, 2840, 2210, 2208, 2891, 217, 340, 179, 134, - /* 2090 */ 2842, 900, 2844, 2845, 895, 2127, 2128, 883, 2892, 919, - /* 2100 */ 3077, 2859, 3053, 398, 3074, 2970, 2954, 859, 334, 2682, - /* 2110 */ 500, 2950, 2681, 898, 1, 2680, 2891, 2806, 497, 896, - /* 2120 */ 135, 2842, 900, 2844, 2845, 895, 864, 872, 883, 2892, - /* 2130 */ 919, 399, 869, 74, 2077, 2087, 865, 2954, 180, 2935, - /* 2140 */ 353, 457, 2950, 904, 2126, 2129, 902, 233, 906, 907, - /* 2150 */ 2798, 2797, 128, 400, 2793, 2518, 2792, 404, 2784, 2017, - /* 2160 */ 358, 2015, 2783, 125, 2840, 387, 882, 2891, 2775, 2774, - /* 2170 */ 921, 134, 2842, 900, 2844, 2845, 895, 1540, 1016, 883, - /* 2180 */ 2892, 919, 2790, 1017, 2789, 2781, 3074, 2780, 2954, 1018, - /* 2190 */ 189, 1013, 500, 2950, 2020, 2021, 2074, 391, 2076, 2079, - /* 2200 */ 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2088, 2089, 2090, - /* 2210 */ 892, 885, 1020, 2769, 917, 916, 881, 2111, 2112, 2768, - /* 2220 */ 2787, 2786, 2118, 2120, 2121, 2122, 2123, 2125, 2, 60, - /* 2230 */ 58, 2778, 2777, 2766, 2765, 390, 2763, 506, 2762, 2016, - /* 2240 */ 2569, 65, 476, 454, 427, 782, 440, 455, 524, 438, - /* 2250 */ 428, 2841, 406, 2014, 408, 2124, 2758, 2757, 2756, 98, - /* 2260 */ 2751, 542, 543, 1967, 1968, 547, 897, 237, 2749, 549, - /* 2270 */ 550, 551, 1966, 2748, 2747, 463, 2745, 557, 2744, 559, - /* 2280 */ 2743, 561, 2742, 563, 1954, 2718, 241, 2717, 2119, 243, - /* 2290 */ 1913, 884, 99, 1912, 2695, 2694, 2859, 2693, 575, 576, - /* 2300 */ 2692, 2691, 2022, 2635, 580, 1849, 2632, 583, 2631, 2625, - /* 2310 */ 2622, 586, 2806, 246, 896, 248, 102, 2620, 587, 2621, - /* 2320 */ 2619, 2624, 2623, 2618, 2617, 2615, 2614, 449, 448, 2613, - /* 2330 */ 2612, 2610, 1021, 2841, 603, 15, 250, 513, 605, 2609, - /* 2340 */ 2608, 2607, 2606, 2630, 2605, 2604, 2603, 2628, 897, 2611, - /* 2350 */ 2602, 522, 2601, 2124, 2600, 2598, 2597, 2596, 2595, 2840, - /* 2360 */ 2594, 2593, 2891, 252, 2592, 108, 134, 2842, 900, 2844, - /* 2370 */ 2845, 895, 2127, 2128, 883, 2892, 919, 2591, 2859, 257, - /* 2380 */ 2582, 2927, 2590, 2954, 2589, 2587, 2119, 500, 2950, 2586, - /* 2390 */ 2660, 2629, 2627, 2585, 2806, 2584, 896, 1855, 2583, 643, - /* 2400 */ 2581, 645, 2580, 2578, 2406, 1696, 260, 2405, 1700, 261, - /* 2410 */ 1692, 2077, 2087, 2404, 451, 452, 2403, 2401, 2398, 263, - /* 2420 */ 665, 2126, 2129, 663, 264, 664, 2397, 668, 2390, 672, - /* 2430 */ 667, 2377, 669, 671, 2365, 675, 2017, 677, 2015, 2364, - /* 2440 */ 673, 2840, 679, 882, 2891, 2347, 1569, 212, 134, 2842, - /* 2450 */ 900, 2844, 2845, 895, 94, 266, 883, 2892, 919, 2346, - /* 2460 */ 2826, 2716, 223, 876, 268, 2954, 687, 95, 2712, 500, - /* 2470 */ 2950, 2020, 2021, 2074, 2702, 2076, 2079, 2080, 2081, 2082, - /* 2480 */ 2083, 2084, 2085, 2086, 2088, 2089, 2090, 892, 885, 784, - /* 2490 */ 2690, 917, 916, 881, 2111, 2112, 275, 2689, 277, 2118, - /* 2500 */ 2120, 2121, 2122, 2123, 2125, 2, 280, 1024, 2666, 282, - /* 2510 */ 2659, 2493, 1622, 2400, 2396, 2841, 708, 709, 2394, 710, - /* 2520 */ 712, 713, 714, 2392, 717, 716, 393, 718, 2389, 720, - /* 2530 */ 897, 722, 2372, 721, 2007, 2370, 1983, 2371, 2369, 2366, - /* 2540 */ 728, 1012, 1010, 2343, 2495, 221, 1779, 290, 86, 2494, - /* 2550 */ 2387, 1780, 1682, 1681, 1006, 1002, 998, 994, 1680, 388, - /* 2560 */ 2859, 1677, 1675, 1673, 981, 1672, 983, 515, 514, 2008, - /* 2570 */ 1671, 2385, 1670, 481, 1664, 482, 2806, 1669, 896, 2376, - /* 2580 */ 483, 1666, 1665, 1663, 2374, 484, 2342, 2341, 2340, 917, - /* 2590 */ 916, 761, 765, 2339, 2338, 767, 769, 2118, 2120, 2121, - /* 2600 */ 2122, 2123, 2125, 2337, 771, 133, 1948, 758, 1950, 141, - /* 2610 */ 361, 1947, 2715, 1952, 33, 312, 80, 2711, 69, 1919, - /* 2620 */ 1921, 2841, 2701, 2840, 792, 70, 2891, 1923, 2688, 316, - /* 2630 */ 202, 2842, 900, 2844, 2845, 895, 897, 1938, 883, 2892, - /* 2640 */ 919, 867, 793, 2687, 2841, 1898, 798, 194, 800, 1897, - /* 2650 */ 3059, 22, 17, 808, 25, 781, 811, 489, 35, 897, - /* 2660 */ 6, 2265, 7, 23, 228, 24, 2859, 216, 38, 229, - /* 2670 */ 2827, 78, 319, 2205, 26, 2280, 2239, 18, 2279, 36, - /* 2680 */ 817, 2237, 2806, 813, 896, 815, 359, 494, 873, 2859, - /* 2690 */ 2203, 342, 841, 3075, 2284, 321, 341, 2246, 203, 215, - /* 2700 */ 2283, 495, 72, 37, 336, 2806, 503, 896, 208, 2231, - /* 2710 */ 96, 2686, 2665, 2664, 2201, 311, 2285, 230, 120, 2286, - /* 2720 */ 2841, 121, 351, 2185, 2658, 2184, 343, 119, 122, 2840, - /* 2730 */ 2241, 27, 2891, 218, 349, 897, 437, 2842, 900, 2844, - /* 2740 */ 2845, 895, 82, 71, 883, 2892, 919, 11, 868, 13, - /* 2750 */ 2032, 209, 2840, 2147, 219, 2891, 352, 2067, 21, 199, - /* 2760 */ 2842, 900, 2844, 2845, 895, 2859, 862, 883, 2892, 919, - /* 2770 */ 2137, 2841, 870, 28, 29, 354, 2657, 2136, 2094, 2490, - /* 2780 */ 20, 2806, 2093, 896, 50, 932, 897, 123, 935, 938, - /* 2790 */ 941, 51, 910, 362, 2092, 2059, 16, 30, 31, 83, - /* 2800 */ 905, 903, 2106, 356, 2841, 490, 124, 129, 2299, 92, - /* 2810 */ 3013, 911, 2904, 2903, 918, 81, 2859, 924, 920, 897, - /* 2820 */ 1761, 525, 926, 386, 1758, 928, 929, 931, 2840, 1757, - /* 2830 */ 934, 2891, 2806, 1754, 896, 437, 2842, 900, 2844, 2845, - /* 2840 */ 895, 1748, 937, 883, 2892, 919, 2298, 940, 899, 2859, - /* 2850 */ 1746, 130, 131, 1774, 93, 1752, 491, 1751, 1750, 1749, - /* 2860 */ 1770, 1620, 955, 1660, 1659, 2806, 1658, 896, 1655, 2841, - /* 2870 */ 1652, 1651, 1650, 1649, 969, 1647, 1645, 1690, 1644, 2840, - /* 2880 */ 1643, 1689, 2891, 971, 897, 235, 437, 2842, 900, 2844, - /* 2890 */ 2845, 895, 2841, 1641, 883, 2892, 919, 1640, 1639, 1638, - /* 2900 */ 1637, 1636, 1635, 1686, 1684, 1632, 1631, 894, 1628, 1627, - /* 2910 */ 1626, 1625, 2840, 992, 2859, 2891, 2395, 991, 993, 430, - /* 2920 */ 2842, 900, 2844, 2845, 895, 2393, 995, 883, 2892, 919, - /* 2930 */ 2806, 997, 896, 996, 2391, 999, 1000, 2859, 1001, 2388, - /* 2940 */ 1003, 1004, 1005, 2368, 1007, 2363, 1009, 2362, 1011, 2336, - /* 2950 */ 1558, 1541, 1015, 2806, 1546, 896, 1548, 392, 1019, 1022, - /* 2960 */ 2841, 2302, 2018, 405, 1023, 2302, 2302, 2302, 2302, 2302, - /* 2970 */ 2302, 2302, 2302, 831, 2302, 897, 2302, 2840, 2302, 2302, - /* 2980 */ 2891, 2302, 2302, 2302, 202, 2842, 900, 2844, 2845, 895, - /* 2990 */ 2302, 2302, 883, 2892, 919, 2302, 2302, 2302, 2302, 2302, - /* 3000 */ 2840, 2302, 2302, 2891, 2302, 2859, 2302, 436, 2842, 900, - /* 3010 */ 2844, 2845, 895, 2302, 2302, 883, 2892, 919, 2302, 2920, - /* 3020 */ 2302, 2806, 2302, 896, 2302, 2841, 2302, 289, 2302, 2302, - /* 3030 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3040 */ 897, 2302, 2302, 729, 2302, 504, 206, 3076, 2302, 2302, - /* 3050 */ 2302, 2302, 2841, 2302, 2302, 723, 719, 715, 711, 2302, - /* 3060 */ 288, 2302, 2302, 2302, 2302, 2302, 2302, 897, 2840, 2302, - /* 3070 */ 2859, 2891, 2302, 2302, 2302, 437, 2842, 900, 2844, 2845, - /* 3080 */ 895, 2302, 2302, 883, 2892, 919, 2806, 2302, 896, 2302, - /* 3090 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2859, 2302, 2302, - /* 3100 */ 2302, 2302, 2302, 2302, 2302, 2302, 286, 2302, 2302, 2302, - /* 3110 */ 499, 285, 2302, 2806, 2302, 896, 2302, 2302, 2302, 2302, - /* 3120 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3130 */ 2302, 2302, 2302, 2840, 2302, 2302, 2891, 505, 2841, 2302, - /* 3140 */ 422, 2842, 900, 2844, 2845, 895, 2302, 2302, 883, 2892, - /* 3150 */ 919, 2302, 2302, 897, 2302, 2302, 2302, 2302, 2841, 2302, - /* 3160 */ 2840, 2302, 2302, 2891, 2302, 2302, 2302, 437, 2842, 900, - /* 3170 */ 2844, 2845, 895, 897, 2302, 883, 2892, 919, 272, 2841, - /* 3180 */ 2302, 2302, 2302, 2859, 2302, 2302, 2302, 283, 2302, 2302, - /* 3190 */ 2302, 274, 281, 2302, 897, 2302, 2302, 279, 697, 2806, - /* 3200 */ 2302, 896, 2302, 2859, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3210 */ 2302, 2302, 2302, 2302, 2302, 2302, 271, 2302, 2302, 2806, - /* 3220 */ 2302, 896, 2302, 507, 2859, 2302, 2302, 2302, 2302, 2302, - /* 3230 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3240 */ 2806, 2302, 896, 2302, 2302, 2302, 2840, 2302, 2302, 2891, - /* 3250 */ 2302, 2302, 2302, 437, 2842, 900, 2844, 2845, 895, 2302, - /* 3260 */ 2302, 883, 2892, 919, 2302, 2302, 2840, 2302, 2302, 2891, - /* 3270 */ 2302, 2302, 2302, 418, 2842, 900, 2844, 2845, 895, 2302, - /* 3280 */ 2302, 883, 2892, 919, 2302, 2841, 2302, 777, 2302, 2302, - /* 3290 */ 2891, 2302, 2302, 2302, 432, 2842, 900, 2844, 2845, 895, - /* 3300 */ 897, 2302, 883, 2892, 919, 2841, 2302, 2302, 2302, 2302, - /* 3310 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3320 */ 897, 2302, 2302, 2302, 2302, 2841, 2302, 2302, 2302, 2302, - /* 3330 */ 2859, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3340 */ 897, 2302, 2302, 2302, 2302, 2302, 2806, 2302, 896, 2302, - /* 3350 */ 2859, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3360 */ 2302, 2302, 2302, 2302, 2302, 2302, 2806, 2302, 896, 2302, - /* 3370 */ 2859, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3380 */ 2302, 2302, 2302, 2302, 2302, 2302, 2806, 2302, 896, 2302, - /* 3390 */ 2302, 2302, 2302, 2840, 2302, 2302, 2891, 2302, 2302, 2302, - /* 3400 */ 414, 2842, 900, 2844, 2845, 895, 2302, 2302, 883, 2892, - /* 3410 */ 919, 2302, 2841, 2840, 2302, 2302, 2891, 2302, 2302, 2302, - /* 3420 */ 410, 2842, 900, 2844, 2845, 895, 2302, 897, 883, 2892, - /* 3430 */ 919, 2302, 2302, 2840, 2302, 2302, 2891, 2302, 2841, 2302, - /* 3440 */ 411, 2842, 900, 2844, 2845, 895, 2302, 2302, 883, 2892, - /* 3450 */ 919, 2302, 2302, 897, 2302, 2302, 2302, 2859, 2302, 2302, - /* 3460 */ 2302, 2841, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3470 */ 2302, 2302, 2302, 2806, 2302, 896, 897, 2302, 2302, 2302, - /* 3480 */ 2302, 2841, 2302, 2859, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3490 */ 2302, 2302, 2302, 2302, 2302, 2302, 897, 2302, 2302, 2806, - /* 3500 */ 2302, 896, 2302, 2841, 2302, 2302, 2859, 2302, 2302, 2302, - /* 3510 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 897, 2302, - /* 3520 */ 2840, 2302, 2806, 2891, 896, 2302, 2859, 415, 2842, 900, - /* 3530 */ 2844, 2845, 895, 2302, 2302, 883, 2892, 919, 2302, 2302, - /* 3540 */ 2302, 2302, 2806, 2302, 896, 2302, 2840, 2302, 2859, 2891, - /* 3550 */ 2302, 2302, 2302, 429, 2842, 900, 2844, 2845, 895, 2302, - /* 3560 */ 2302, 883, 2892, 919, 2806, 2302, 896, 2302, 2841, 2840, - /* 3570 */ 2302, 2302, 2891, 2302, 2302, 2302, 416, 2842, 900, 2844, - /* 3580 */ 2845, 895, 2302, 897, 883, 2892, 919, 2841, 2302, 2840, - /* 3590 */ 2302, 2302, 2891, 2302, 2302, 2302, 417, 2842, 900, 2844, - /* 3600 */ 2845, 895, 897, 2302, 883, 2892, 919, 2302, 2302, 2841, - /* 3610 */ 2302, 2840, 2302, 2859, 2891, 2302, 2302, 2302, 433, 2842, - /* 3620 */ 900, 2844, 2845, 895, 897, 2302, 883, 2892, 919, 2806, - /* 3630 */ 2302, 896, 2859, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3640 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2806, 2302, - /* 3650 */ 896, 2302, 2302, 2302, 2859, 2302, 2302, 2302, 2302, 2302, - /* 3660 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3670 */ 2806, 2302, 896, 2302, 2841, 2302, 2840, 2302, 2302, 2891, - /* 3680 */ 2302, 2302, 2302, 419, 2842, 900, 2844, 2845, 895, 897, - /* 3690 */ 2302, 883, 2892, 919, 2302, 2840, 2302, 2302, 2891, 2302, - /* 3700 */ 2302, 2302, 434, 2842, 900, 2844, 2845, 895, 2302, 2302, - /* 3710 */ 883, 2892, 919, 2302, 2302, 2302, 2302, 2840, 2302, 2859, - /* 3720 */ 2891, 2302, 2302, 2841, 420, 2842, 900, 2844, 2845, 895, - /* 3730 */ 2302, 2302, 883, 2892, 919, 2806, 2302, 896, 897, 2302, - /* 3740 */ 2302, 2302, 2302, 2841, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3750 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 897, 2302, - /* 3760 */ 2302, 2302, 2302, 2841, 2302, 2302, 2302, 2302, 2859, 2302, - /* 3770 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 897, 2302, - /* 3780 */ 2302, 2302, 2840, 2302, 2806, 2891, 896, 2302, 2859, 435, - /* 3790 */ 2842, 900, 2844, 2845, 895, 2302, 2302, 883, 2892, 919, - /* 3800 */ 2302, 2302, 2302, 2302, 2806, 2302, 896, 2302, 2859, 2302, - /* 3810 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3820 */ 2302, 2302, 2302, 2302, 2806, 2302, 896, 2302, 2302, 2302, - /* 3830 */ 2302, 2840, 2302, 2302, 2891, 2302, 2302, 2302, 421, 2842, - /* 3840 */ 900, 2844, 2845, 895, 2302, 2302, 883, 2892, 919, 2302, - /* 3850 */ 2841, 2840, 2302, 2302, 2891, 2302, 2302, 2302, 412, 2842, - /* 3860 */ 900, 2844, 2845, 895, 2302, 897, 883, 2892, 919, 2841, - /* 3870 */ 2302, 2840, 2302, 2302, 2891, 2302, 2302, 2302, 423, 2842, - /* 3880 */ 900, 2844, 2845, 895, 897, 2302, 883, 2892, 919, 2841, - /* 3890 */ 2302, 2302, 2302, 2302, 2302, 2859, 2302, 2302, 2302, 2302, - /* 3900 */ 2302, 2302, 2302, 2302, 897, 2302, 2302, 2302, 2302, 2302, - /* 3910 */ 2302, 2806, 2302, 896, 2859, 2302, 2302, 2302, 2302, 2302, - /* 3920 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3930 */ 2806, 2302, 896, 2302, 2859, 2302, 2302, 2302, 2302, 2302, - /* 3940 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 3950 */ 2806, 2302, 896, 2302, 2302, 2302, 2841, 2302, 2840, 2302, - /* 3960 */ 2302, 2891, 2302, 2302, 2302, 424, 2842, 900, 2844, 2845, - /* 3970 */ 895, 897, 2302, 883, 2892, 919, 2841, 2840, 2302, 2302, - /* 3980 */ 2891, 2302, 2302, 2302, 425, 2842, 900, 2844, 2845, 895, - /* 3990 */ 2302, 897, 883, 2892, 919, 2841, 2302, 2840, 2302, 2302, - /* 4000 */ 2891, 2859, 2302, 2302, 426, 2842, 900, 2844, 2845, 895, - /* 4010 */ 897, 2302, 883, 2892, 919, 2302, 2302, 2806, 2302, 896, - /* 4020 */ 2302, 2859, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 4030 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2806, 2302, 896, - /* 4040 */ 2859, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 4050 */ 2302, 2302, 2302, 2302, 2302, 2302, 2806, 2302, 896, 2302, - /* 4060 */ 2302, 2302, 2302, 2841, 2840, 2302, 2302, 2891, 2302, 2302, - /* 4070 */ 2302, 442, 2842, 900, 2844, 2845, 895, 2302, 897, 883, - /* 4080 */ 2892, 919, 2841, 2302, 2840, 2302, 2302, 2891, 2302, 2302, - /* 4090 */ 2302, 443, 2842, 900, 2844, 2845, 895, 897, 2302, 883, - /* 4100 */ 2892, 919, 2302, 2840, 2302, 2302, 2891, 2302, 2859, 2302, - /* 4110 */ 2853, 2842, 900, 2844, 2845, 895, 2302, 2302, 883, 2892, - /* 4120 */ 919, 2302, 2302, 2302, 2806, 2302, 896, 2859, 2302, 2302, - /* 4130 */ 2302, 2841, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 4140 */ 2302, 2302, 2302, 2806, 2302, 896, 897, 2302, 2302, 2302, - /* 4150 */ 2302, 2841, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 4160 */ 2302, 2302, 2302, 2302, 2302, 2302, 897, 2302, 2302, 2302, - /* 4170 */ 2302, 2840, 2302, 2302, 2891, 2302, 2859, 2302, 2852, 2842, - /* 4180 */ 900, 2844, 2845, 895, 2302, 2302, 883, 2892, 919, 2302, - /* 4190 */ 2840, 2302, 2806, 2891, 896, 2302, 2859, 2851, 2842, 900, - /* 4200 */ 2844, 2845, 895, 2302, 2302, 883, 2892, 919, 2302, 2302, - /* 4210 */ 2302, 2302, 2806, 2302, 896, 2302, 2302, 2302, 2302, 2841, - /* 4220 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 4230 */ 2302, 2302, 2302, 2302, 897, 2302, 2302, 2302, 2302, 2840, - /* 4240 */ 2302, 2302, 2891, 2841, 2302, 2302, 459, 2842, 900, 2844, - /* 4250 */ 2845, 895, 2302, 2302, 883, 2892, 919, 2302, 897, 2840, - /* 4260 */ 2302, 2302, 2891, 2302, 2859, 2302, 460, 2842, 900, 2844, - /* 4270 */ 2845, 895, 2302, 2302, 883, 2892, 919, 2302, 2302, 2302, - /* 4280 */ 2806, 2302, 896, 2302, 2302, 2302, 2302, 2302, 2859, 2302, - /* 4290 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 4300 */ 2302, 2302, 2302, 2302, 2806, 2302, 896, 2302, 2302, 2302, - /* 4310 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 4320 */ 2302, 2302, 2302, 2302, 2302, 2841, 2302, 2840, 2302, 2302, - /* 4330 */ 2891, 2302, 2302, 2302, 456, 2842, 900, 2844, 2845, 895, - /* 4340 */ 897, 2302, 883, 2892, 919, 2302, 2302, 2302, 2302, 2841, - /* 4350 */ 2302, 2840, 2302, 2302, 2891, 2302, 2302, 2302, 461, 2842, - /* 4360 */ 900, 2844, 2845, 895, 897, 2302, 883, 2892, 919, 2302, - /* 4370 */ 2859, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 4380 */ 2302, 2302, 2302, 2302, 2302, 2302, 2806, 2302, 896, 2302, - /* 4390 */ 2302, 2302, 2302, 2302, 2859, 2302, 2302, 2302, 2302, 2302, - /* 4400 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 4410 */ 2806, 2302, 896, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 4420 */ 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, 2302, - /* 4430 */ 2302, 2302, 2302, 898, 2302, 2302, 2891, 2302, 2302, 2302, - /* 4440 */ 432, 2842, 900, 2844, 2845, 895, 2302, 2302, 883, 2892, - /* 4450 */ 919, 2302, 2302, 2302, 2302, 2302, 2302, 2840, 2302, 2302, - /* 4460 */ 2891, 2302, 2302, 2302, 431, 2842, 900, 2844, 2845, 895, - /* 4470 */ 2302, 2302, 883, 2892, 919, -}; -static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 536, 401, 536, 401, 404, 405, 404, 405, 544, 437, - /* 10 */ 544, 423, 12, 13, 4, 12, 13, 14, 15, 16, - /* 20 */ 20, 0, 22, 438, 406, 407, 562, 563, 456, 563, - /* 30 */ 423, 567, 568, 567, 568, 538, 36, 540, 38, 0, - /* 40 */ 20, 453, 21, 392, 426, 24, 25, 26, 27, 28, - /* 50 */ 29, 30, 31, 32, 20, 406, 407, 431, 407, 34, - /* 60 */ 453, 8, 9, 437, 54, 12, 13, 14, 15, 16, - /* 70 */ 0, 71, 21, 20, 74, 24, 25, 26, 27, 28, - /* 80 */ 29, 30, 31, 32, 499, 85, 20, 455, 437, 20, - /* 90 */ 464, 503, 504, 505, 24, 25, 26, 27, 28, 29, - /* 100 */ 30, 31, 32, 14, 453, 517, 455, 475, 476, 20, - /* 110 */ 503, 504, 20, 479, 22, 115, 406, 407, 118, 80, - /* 120 */ 81, 82, 83, 84, 517, 86, 87, 88, 89, 90, - /* 130 */ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, - /* 140 */ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 150 */ 111, 500, 118, 61, 503, 155, 156, 416, 507, 508, - /* 160 */ 509, 510, 511, 512, 118, 424, 515, 516, 517, 20, - /* 170 */ 536, 21, 20, 522, 22, 524, 20, 71, 544, 528, - /* 180 */ 529, 532, 533, 534, 118, 536, 537, 37, 36, 39, - /* 190 */ 40, 41, 42, 544, 194, 195, 562, 563, 391, 548, - /* 200 */ 393, 567, 568, 3, 204, 205, 451, 556, 427, 454, - /* 210 */ 455, 562, 563, 61, 194, 195, 567, 568, 437, 219, - /* 220 */ 20, 221, 199, 117, 20, 44, 226, 14, 122, 448, - /* 230 */ 8, 9, 20, 20, 12, 13, 14, 15, 16, 57, - /* 240 */ 118, 531, 532, 533, 534, 4, 536, 537, 66, 196, - /* 250 */ 71, 69, 70, 406, 254, 255, 256, 392, 258, 259, - /* 260 */ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, - /* 270 */ 270, 271, 407, 20, 274, 275, 276, 277, 278, 279, - /* 280 */ 280, 281, 282, 283, 284, 285, 286, 287, 288, 12, - /* 290 */ 13, 14, 406, 392, 18, 189, 20, 20, 118, 22, - /* 300 */ 78, 122, 437, 27, 457, 458, 30, 460, 407, 256, - /* 310 */ 463, 155, 156, 36, 38, 38, 406, 407, 453, 119, - /* 320 */ 455, 0, 8, 9, 120, 256, 12, 13, 14, 15, - /* 330 */ 16, 431, 56, 57, 0, 59, 426, 437, 437, 158, - /* 340 */ 64, 65, 319, 320, 321, 322, 323, 313, 71, 455, - /* 350 */ 128, 74, 76, 20, 453, 78, 455, 471, 472, 313, - /* 360 */ 204, 205, 85, 469, 464, 500, 20, 392, 503, 475, - /* 370 */ 476, 118, 507, 508, 509, 510, 511, 512, 513, 313, - /* 380 */ 515, 516, 517, 518, 519, 148, 149, 150, 151, 152, - /* 390 */ 153, 154, 115, 117, 422, 118, 196, 425, 218, 20, - /* 400 */ 220, 500, 254, 438, 503, 129, 455, 194, 507, 508, - /* 410 */ 509, 510, 511, 512, 449, 193, 515, 516, 517, 20, - /* 420 */ 469, 520, 436, 522, 523, 524, 475, 476, 453, 528, - /* 430 */ 529, 251, 155, 156, 52, 313, 160, 161, 452, 163, - /* 440 */ 164, 165, 166, 167, 168, 169, 170, 171, 172, 406, - /* 450 */ 407, 175, 176, 177, 178, 179, 180, 181, 182, 402, - /* 460 */ 184, 185, 186, 406, 20, 408, 190, 191, 192, 256, - /* 470 */ 437, 194, 195, 197, 326, 327, 328, 329, 330, 331, - /* 480 */ 332, 204, 205, 302, 303, 304, 305, 306, 307, 308, - /* 490 */ 309, 310, 158, 313, 272, 174, 219, 464, 221, 2, - /* 500 */ 179, 155, 156, 226, 461, 8, 9, 173, 187, 12, - /* 510 */ 13, 14, 15, 16, 292, 293, 294, 295, 296, 297, - /* 520 */ 298, 299, 300, 301, 4, 211, 306, 307, 308, 309, - /* 530 */ 310, 254, 255, 256, 392, 258, 259, 260, 261, 262, - /* 540 */ 263, 264, 265, 266, 267, 268, 269, 270, 271, 407, - /* 550 */ 536, 274, 275, 276, 277, 278, 315, 406, 544, 282, - /* 560 */ 283, 284, 285, 286, 287, 288, 12, 13, 392, 158, - /* 570 */ 22, 407, 12, 13, 20, 196, 22, 563, 196, 437, - /* 580 */ 437, 567, 568, 407, 36, 14, 525, 526, 115, 207, - /* 590 */ 36, 20, 38, 20, 402, 453, 36, 455, 406, 456, - /* 600 */ 408, 437, 4, 130, 131, 132, 133, 134, 135, 136, - /* 610 */ 137, 138, 139, 437, 141, 142, 143, 144, 145, 146, - /* 620 */ 147, 23, 438, 439, 406, 71, 437, 506, 74, 453, - /* 630 */ 231, 455, 78, 85, 437, 406, 407, 448, 118, 85, - /* 640 */ 489, 490, 500, 446, 447, 503, 48, 49, 50, 507, - /* 650 */ 508, 509, 510, 511, 512, 341, 535, 515, 516, 517, - /* 660 */ 406, 407, 77, 115, 522, 437, 524, 411, 412, 115, - /* 670 */ 528, 529, 118, 509, 446, 447, 500, 406, 407, 503, - /* 680 */ 426, 392, 20, 507, 508, 509, 510, 511, 512, 435, - /* 690 */ 461, 515, 516, 517, 497, 498, 407, 426, 556, 0, - /* 700 */ 524, 14, 15, 16, 528, 529, 435, 489, 490, 155, - /* 710 */ 156, 406, 407, 302, 303, 304, 305, 306, 307, 308, - /* 720 */ 309, 310, 36, 8, 9, 77, 437, 12, 13, 14, - /* 730 */ 15, 16, 80, 81, 82, 406, 407, 1, 2, 87, - /* 740 */ 88, 89, 453, 129, 455, 93, 36, 36, 194, 195, - /* 750 */ 98, 99, 100, 101, 158, 289, 104, 76, 204, 205, - /* 760 */ 108, 109, 110, 111, 506, 194, 461, 219, 85, 221, - /* 770 */ 454, 455, 2, 219, 22, 221, 77, 20, 8, 9, - /* 780 */ 226, 221, 12, 13, 14, 15, 16, 440, 36, 500, - /* 790 */ 443, 406, 503, 535, 20, 85, 507, 508, 509, 510, - /* 800 */ 511, 512, 254, 255, 515, 516, 517, 389, 254, 255, - /* 810 */ 256, 392, 258, 259, 260, 261, 262, 263, 264, 265, - /* 820 */ 266, 267, 268, 269, 270, 271, 407, 256, 274, 275, - /* 830 */ 276, 277, 278, 313, 119, 392, 282, 283, 284, 285, - /* 840 */ 286, 287, 288, 289, 12, 13, 557, 558, 406, 407, - /* 850 */ 503, 466, 20, 468, 22, 119, 437, 129, 196, 22, - /* 860 */ 115, 532, 533, 534, 517, 536, 537, 115, 36, 506, - /* 870 */ 38, 189, 453, 36, 455, 130, 131, 132, 133, 134, - /* 880 */ 135, 136, 137, 138, 139, 437, 141, 142, 143, 144, - /* 890 */ 145, 146, 147, 445, 392, 214, 453, 479, 535, 303, - /* 900 */ 304, 305, 484, 71, 456, 223, 74, 221, 415, 437, - /* 910 */ 78, 62, 63, 406, 407, 23, 149, 85, 256, 500, - /* 920 */ 440, 413, 503, 242, 243, 94, 507, 508, 509, 510, - /* 930 */ 511, 512, 221, 426, 515, 516, 517, 444, 252, 253, - /* 940 */ 44, 49, 50, 524, 392, 406, 407, 115, 529, 441, - /* 950 */ 118, 440, 115, 196, 536, 453, 485, 406, 407, 407, - /* 960 */ 479, 409, 544, 252, 253, 426, 18, 71, 194, 497, - /* 970 */ 498, 23, 12, 13, 532, 533, 534, 426, 536, 537, - /* 980 */ 562, 563, 22, 503, 536, 567, 568, 155, 156, 437, - /* 990 */ 42, 43, 544, 162, 46, 392, 36, 517, 38, 411, - /* 1000 */ 412, 234, 235, 55, 0, 453, 438, 455, 60, 392, - /* 1010 */ 562, 563, 438, 256, 503, 567, 568, 536, 187, 188, - /* 1020 */ 72, 73, 74, 75, 76, 544, 194, 195, 517, 413, - /* 1030 */ 256, 71, 201, 33, 8, 9, 204, 205, 12, 13, - /* 1040 */ 14, 15, 16, 562, 563, 85, 430, 47, 567, 568, - /* 1050 */ 392, 219, 500, 221, 415, 503, 453, 441, 226, 507, - /* 1060 */ 508, 509, 510, 511, 512, 22, 118, 515, 516, 517, - /* 1070 */ 453, 420, 421, 434, 522, 115, 524, 149, 150, 36, - /* 1080 */ 528, 529, 154, 444, 479, 3, 254, 255, 256, 484, - /* 1090 */ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, - /* 1100 */ 268, 269, 270, 271, 13, 157, 274, 275, 276, 277, - /* 1110 */ 278, 453, 420, 421, 282, 283, 284, 285, 286, 287, - /* 1120 */ 288, 12, 13, 14, 406, 407, 392, 392, 85, 20, - /* 1130 */ 392, 22, 8, 9, 395, 396, 12, 13, 14, 15, - /* 1140 */ 16, 536, 150, 437, 426, 36, 154, 38, 424, 544, - /* 1150 */ 4, 445, 148, 149, 150, 151, 152, 153, 154, 211, - /* 1160 */ 212, 213, 456, 33, 216, 19, 437, 562, 563, 0, - /* 1170 */ 392, 47, 567, 568, 445, 438, 85, 229, 230, 219, - /* 1180 */ 71, 221, 428, 74, 38, 456, 432, 453, 453, 241, - /* 1190 */ 0, 453, 244, 438, 85, 247, 248, 249, 250, 251, - /* 1200 */ 432, 8, 9, 57, 438, 12, 13, 14, 15, 16, - /* 1210 */ 64, 65, 406, 407, 254, 255, 289, 71, 291, 392, - /* 1220 */ 228, 392, 438, 437, 115, 233, 33, 118, 236, 437, - /* 1230 */ 238, 453, 426, 447, 274, 275, 407, 445, 409, 33, - /* 1240 */ 406, 407, 282, 283, 284, 285, 286, 287, 456, 406, - /* 1250 */ 407, 8, 9, 406, 407, 12, 13, 14, 15, 16, - /* 1260 */ 426, 313, 494, 117, 155, 156, 437, 20, 122, 426, - /* 1270 */ 80, 81, 82, 426, 20, 0, 33, 87, 88, 89, - /* 1280 */ 453, 437, 453, 93, 455, 406, 407, 392, 98, 99, - /* 1290 */ 100, 101, 571, 392, 104, 406, 407, 437, 108, 109, - /* 1300 */ 110, 111, 0, 194, 195, 426, 406, 407, 521, 521, - /* 1310 */ 523, 523, 119, 204, 205, 426, 456, 148, 149, 150, - /* 1320 */ 151, 152, 153, 154, 22, 119, 426, 392, 219, 500, - /* 1330 */ 221, 193, 503, 13, 33, 226, 507, 508, 509, 510, - /* 1340 */ 511, 512, 498, 393, 515, 516, 517, 450, 453, 450, - /* 1350 */ 453, 522, 453, 524, 453, 392, 148, 528, 529, 392, - /* 1360 */ 152, 538, 119, 254, 255, 256, 392, 258, 259, 260, - /* 1370 */ 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, - /* 1380 */ 271, 407, 0, 274, 275, 276, 277, 278, 453, 406, - /* 1390 */ 407, 282, 283, 284, 285, 286, 287, 288, 12, 13, - /* 1400 */ 392, 406, 407, 406, 407, 85, 20, 0, 22, 426, - /* 1410 */ 272, 437, 406, 407, 392, 407, 453, 409, 336, 392, - /* 1420 */ 453, 426, 36, 426, 38, 520, 44, 453, 523, 455, - /* 1430 */ 292, 392, 426, 8, 9, 406, 407, 12, 13, 14, - /* 1440 */ 15, 16, 8, 9, 427, 437, 12, 13, 14, 15, - /* 1450 */ 16, 55, 406, 407, 437, 426, 14, 71, 406, 407, - /* 1460 */ 74, 453, 20, 455, 57, 448, 450, 406, 407, 453, - /* 1470 */ 340, 85, 426, 0, 500, 453, 465, 503, 426, 33, - /* 1480 */ 453, 507, 508, 509, 510, 511, 512, 426, 514, 515, - /* 1490 */ 516, 517, 453, 47, 129, 0, 123, 123, 392, 126, - /* 1500 */ 126, 115, 33, 256, 118, 119, 155, 156, 500, 0, - /* 1510 */ 256, 503, 13, 407, 36, 507, 508, 509, 510, 511, - /* 1520 */ 512, 246, 33, 515, 516, 517, 123, 0, 33, 126, - /* 1530 */ 522, 123, 524, 33, 126, 36, 528, 529, 0, 13, - /* 1540 */ 33, 155, 156, 437, 119, 0, 33, 560, 183, 22, - /* 1550 */ 8, 9, 33, 119, 12, 13, 14, 15, 16, 453, - /* 1560 */ 22, 455, 36, 85, 1, 2, 78, 22, 8, 9, - /* 1570 */ 12, 13, 12, 13, 14, 15, 16, 36, 8, 9, - /* 1580 */ 194, 195, 12, 13, 14, 15, 16, 553, 119, 47, - /* 1590 */ 204, 205, 8, 9, 33, 33, 12, 13, 14, 15, - /* 1600 */ 16, 33, 33, 12, 13, 219, 500, 221, 119, 503, - /* 1610 */ 33, 539, 226, 507, 508, 509, 510, 511, 512, 119, - /* 1620 */ 33, 515, 516, 517, 51, 36, 119, 410, 522, 437, - /* 1630 */ 524, 158, 119, 237, 528, 529, 12, 13, 119, 338, - /* 1640 */ 254, 255, 256, 33, 258, 259, 260, 261, 262, 263, - /* 1650 */ 264, 265, 266, 267, 268, 269, 270, 271, 33, 423, - /* 1660 */ 274, 275, 276, 277, 278, 33, 33, 158, 282, 283, - /* 1670 */ 284, 285, 286, 287, 288, 12, 13, 392, 33, 119, - /* 1680 */ 119, 119, 479, 20, 423, 22, 33, 119, 119, 119, - /* 1690 */ 33, 118, 407, 118, 409, 120, 119, 405, 273, 36, - /* 1700 */ 33, 38, 0, 119, 12, 13, 119, 8, 9, 465, - /* 1710 */ 47, 12, 13, 14, 15, 16, 12, 13, 36, 8, - /* 1720 */ 9, 559, 437, 12, 13, 14, 15, 16, 392, 119, - /* 1730 */ 12, 13, 12, 13, 71, 12, 13, 74, 453, 536, - /* 1740 */ 455, 33, 254, 407, 119, 12, 13, 544, 85, 12, - /* 1750 */ 13, 119, 119, 12, 13, 36, 13, 13, 47, 559, - /* 1760 */ 58, 559, 221, 33, 119, 562, 563, 85, 33, 33, - /* 1770 */ 567, 568, 119, 437, 478, 33, 119, 559, 115, 36, - /* 1780 */ 36, 118, 465, 501, 452, 500, 119, 410, 503, 453, - /* 1790 */ 407, 455, 507, 508, 509, 510, 511, 512, 486, 465, - /* 1800 */ 515, 516, 517, 465, 85, 543, 543, 522, 530, 524, - /* 1810 */ 221, 546, 564, 528, 529, 465, 8, 9, 155, 156, - /* 1820 */ 12, 13, 14, 15, 16, 425, 316, 119, 480, 57, - /* 1830 */ 119, 502, 20, 406, 20, 491, 500, 236, 496, 503, - /* 1840 */ 491, 415, 482, 507, 508, 509, 510, 511, 512, 119, - /* 1850 */ 415, 515, 516, 517, 119, 119, 217, 194, 195, 406, - /* 1860 */ 524, 119, 20, 407, 528, 529, 47, 204, 205, 8, - /* 1870 */ 9, 462, 407, 12, 13, 14, 15, 16, 462, 193, - /* 1880 */ 459, 406, 219, 407, 221, 406, 8, 9, 459, 226, - /* 1890 */ 12, 13, 14, 15, 16, 462, 459, 459, 116, 41, - /* 1900 */ 42, 419, 114, 406, 418, 113, 20, 406, 417, 406, - /* 1910 */ 406, 406, 52, 399, 403, 399, 20, 254, 255, 256, - /* 1920 */ 403, 258, 259, 260, 261, 262, 263, 264, 265, 266, - /* 1930 */ 267, 268, 269, 270, 271, 491, 415, 274, 275, 276, - /* 1940 */ 277, 278, 455, 479, 415, 282, 283, 284, 285, 286, - /* 1950 */ 287, 288, 12, 13, 415, 20, 408, 20, 481, 415, - /* 1960 */ 20, 408, 22, 20, 479, 415, 472, 415, 20, 415, - /* 1970 */ 415, 272, 466, 52, 392, 399, 36, 406, 38, 433, - /* 1980 */ 119, 415, 124, 125, 433, 127, 437, 395, 395, 407, - /* 1990 */ 453, 409, 406, 437, 437, 399, 437, 119, 239, 118, - /* 2000 */ 536, 437, 437, 495, 196, 392, 148, 413, 544, 437, - /* 2010 */ 152, 71, 20, 493, 74, 437, 437, 437, 437, 437, - /* 2020 */ 407, 536, 491, 490, 225, 85, 562, 563, 488, 544, - /* 2030 */ 224, 567, 568, 413, 487, 453, 453, 455, 453, 325, - /* 2040 */ 406, 324, 552, 455, 552, 333, 210, 562, 563, 551, - /* 2050 */ 437, 453, 567, 568, 480, 115, 392, 555, 118, 335, - /* 2060 */ 334, 312, 317, 473, 552, 542, 453, 549, 455, 554, - /* 2070 */ 473, 407, 550, 409, 311, 480, 541, 337, 339, 407, - /* 2080 */ 342, 20, 500, 129, 314, 503, 408, 413, 413, 507, - /* 2090 */ 508, 509, 510, 511, 512, 155, 156, 515, 516, 517, - /* 2100 */ 572, 437, 566, 473, 522, 506, 524, 453, 565, 453, - /* 2110 */ 528, 529, 453, 500, 547, 453, 503, 453, 453, 455, - /* 2120 */ 507, 508, 509, 510, 511, 512, 202, 466, 515, 516, - /* 2130 */ 517, 473, 453, 118, 194, 195, 470, 524, 413, 527, - /* 2140 */ 413, 528, 529, 453, 204, 205, 202, 545, 467, 466, - /* 2150 */ 453, 453, 118, 432, 453, 407, 453, 406, 453, 219, - /* 2160 */ 413, 221, 453, 413, 500, 413, 226, 503, 453, 453, - /* 2170 */ 442, 507, 508, 509, 510, 511, 512, 22, 394, 515, - /* 2180 */ 516, 517, 453, 35, 453, 453, 522, 453, 524, 37, - /* 2190 */ 397, 40, 528, 529, 254, 255, 256, 400, 258, 259, - /* 2200 */ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, - /* 2210 */ 270, 271, 399, 453, 274, 275, 276, 277, 278, 453, - /* 2220 */ 453, 453, 282, 283, 284, 285, 286, 287, 288, 12, - /* 2230 */ 13, 453, 453, 453, 453, 398, 453, 20, 453, 22, - /* 2240 */ 453, 483, 439, 474, 429, 499, 492, 474, 439, 429, - /* 2250 */ 429, 392, 414, 36, 390, 38, 0, 0, 0, 47, - /* 2260 */ 0, 36, 245, 36, 36, 245, 407, 36, 0, 36, - /* 2270 */ 36, 245, 36, 0, 0, 245, 0, 36, 0, 36, - /* 2280 */ 0, 22, 0, 36, 240, 0, 227, 0, 71, 227, - /* 2290 */ 221, 74, 228, 219, 0, 0, 437, 0, 215, 214, - /* 2300 */ 0, 0, 85, 161, 51, 51, 0, 36, 0, 0, - /* 2310 */ 0, 36, 453, 51, 455, 51, 47, 0, 57, 0, - /* 2320 */ 0, 0, 0, 0, 0, 0, 0, 12, 13, 0, - /* 2330 */ 0, 0, 115, 392, 36, 118, 179, 22, 179, 0, - /* 2340 */ 0, 0, 0, 0, 0, 0, 0, 0, 407, 0, - /* 2350 */ 0, 36, 0, 38, 0, 0, 0, 0, 0, 500, - /* 2360 */ 0, 0, 503, 51, 0, 47, 507, 508, 509, 510, - /* 2370 */ 511, 512, 155, 156, 515, 516, 517, 0, 437, 161, - /* 2380 */ 0, 522, 0, 524, 0, 0, 71, 528, 529, 0, - /* 2390 */ 0, 0, 0, 0, 453, 0, 455, 22, 0, 160, - /* 2400 */ 0, 159, 0, 0, 0, 22, 71, 0, 22, 71, - /* 2410 */ 36, 194, 195, 0, 52, 52, 0, 0, 0, 71, - /* 2420 */ 44, 204, 205, 36, 71, 57, 0, 57, 0, 57, - /* 2430 */ 36, 0, 44, 36, 0, 36, 219, 56, 221, 0, - /* 2440 */ 44, 500, 36, 226, 503, 0, 14, 33, 507, 508, - /* 2450 */ 509, 510, 511, 512, 44, 47, 515, 516, 517, 0, - /* 2460 */ 51, 0, 51, 522, 45, 524, 51, 44, 0, 528, - /* 2470 */ 529, 254, 255, 256, 0, 258, 259, 260, 261, 262, - /* 2480 */ 263, 264, 265, 266, 267, 268, 269, 270, 271, 1, - /* 2490 */ 0, 274, 275, 276, 277, 278, 44, 0, 210, 282, - /* 2500 */ 283, 284, 285, 286, 287, 288, 51, 19, 0, 51, - /* 2510 */ 0, 0, 79, 0, 0, 392, 36, 57, 0, 44, - /* 2520 */ 36, 57, 44, 0, 57, 36, 38, 44, 0, 36, - /* 2530 */ 407, 44, 0, 57, 219, 0, 221, 0, 0, 0, - /* 2540 */ 36, 53, 54, 0, 0, 57, 22, 126, 128, 0, - /* 2550 */ 0, 36, 22, 36, 66, 67, 68, 69, 36, 71, - /* 2560 */ 437, 36, 36, 36, 33, 36, 33, 252, 253, 254, - /* 2570 */ 36, 0, 36, 22, 22, 22, 453, 36, 455, 0, - /* 2580 */ 22, 36, 36, 36, 0, 22, 0, 0, 0, 274, - /* 2590 */ 275, 36, 36, 0, 0, 36, 36, 282, 283, 284, - /* 2600 */ 285, 286, 287, 0, 22, 117, 36, 59, 36, 20, - /* 2610 */ 122, 36, 0, 119, 118, 51, 118, 0, 196, 36, - /* 2620 */ 22, 392, 0, 500, 22, 196, 503, 226, 0, 202, - /* 2630 */ 507, 508, 509, 510, 511, 512, 407, 232, 515, 516, - /* 2640 */ 517, 153, 196, 0, 392, 196, 206, 222, 206, 196, - /* 2650 */ 3, 33, 318, 22, 318, 231, 36, 36, 118, 407, - /* 2660 */ 52, 119, 52, 33, 51, 33, 437, 33, 33, 33, - /* 2670 */ 51, 3, 118, 36, 33, 36, 119, 318, 36, 118, - /* 2680 */ 114, 119, 453, 118, 455, 116, 198, 36, 200, 437, - /* 2690 */ 85, 203, 569, 570, 36, 119, 208, 119, 118, 118, - /* 2700 */ 36, 36, 33, 118, 51, 453, 477, 455, 51, 119, - /* 2710 */ 118, 0, 0, 0, 119, 227, 119, 118, 44, 119, - /* 2720 */ 392, 44, 203, 119, 0, 119, 119, 118, 44, 500, - /* 2730 */ 119, 33, 503, 118, 118, 407, 507, 508, 509, 510, - /* 2740 */ 511, 512, 118, 302, 515, 516, 517, 290, 119, 2, - /* 2750 */ 22, 51, 500, 254, 51, 503, 118, 22, 118, 507, - /* 2760 */ 508, 509, 510, 511, 512, 437, 199, 515, 516, 517, - /* 2770 */ 116, 392, 199, 118, 118, 198, 0, 116, 119, 0, - /* 2780 */ 118, 453, 119, 455, 118, 118, 407, 44, 118, 118, - /* 2790 */ 118, 118, 22, 51, 119, 119, 118, 118, 118, 118, - /* 2800 */ 199, 119, 119, 118, 392, 477, 118, 120, 22, 118, - /* 2810 */ 558, 121, 118, 118, 118, 118, 437, 36, 129, 407, - /* 2820 */ 119, 36, 118, 33, 119, 36, 118, 36, 500, 119, - /* 2830 */ 36, 503, 453, 119, 455, 507, 508, 509, 510, 511, - /* 2840 */ 512, 119, 36, 515, 516, 517, 232, 36, 257, 437, - /* 2850 */ 119, 118, 118, 36, 118, 140, 477, 140, 140, 140, - /* 2860 */ 22, 79, 78, 22, 36, 453, 36, 455, 36, 392, - /* 2870 */ 36, 36, 36, 36, 112, 36, 36, 85, 36, 500, - /* 2880 */ 36, 85, 503, 112, 407, 33, 507, 508, 509, 510, - /* 2890 */ 511, 512, 392, 36, 515, 516, 517, 36, 36, 22, - /* 2900 */ 36, 36, 36, 85, 36, 36, 36, 407, 36, 36, - /* 2910 */ 22, 36, 500, 57, 437, 503, 0, 36, 44, 507, - /* 2920 */ 508, 509, 510, 511, 512, 0, 36, 515, 516, 517, - /* 2930 */ 453, 44, 455, 57, 0, 36, 57, 437, 44, 0, - /* 2940 */ 36, 57, 44, 0, 36, 0, 22, 0, 22, 0, - /* 2950 */ 36, 22, 33, 453, 36, 455, 36, 22, 21, 21, - /* 2960 */ 392, 573, 22, 22, 20, 573, 573, 573, 573, 573, - /* 2970 */ 573, 573, 573, 561, 573, 407, 573, 500, 573, 573, - /* 2980 */ 503, 573, 573, 573, 507, 508, 509, 510, 511, 512, - /* 2990 */ 573, 573, 515, 516, 517, 573, 573, 573, 573, 573, - /* 3000 */ 500, 573, 573, 503, 573, 437, 573, 507, 508, 509, - /* 3010 */ 510, 511, 512, 573, 573, 515, 516, 517, 573, 519, - /* 3020 */ 573, 453, 573, 455, 573, 392, 573, 38, 573, 573, - /* 3030 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 3040 */ 407, 573, 573, 54, 573, 477, 57, 570, 573, 573, - /* 3050 */ 573, 573, 392, 573, 573, 66, 67, 68, 69, 573, - /* 3060 */ 71, 573, 573, 573, 573, 573, 573, 407, 500, 573, - /* 3070 */ 437, 503, 573, 573, 573, 507, 508, 509, 510, 511, - /* 3080 */ 512, 573, 573, 515, 516, 517, 453, 573, 455, 573, - /* 3090 */ 573, 573, 573, 573, 573, 573, 573, 437, 573, 573, - /* 3100 */ 573, 573, 573, 573, 573, 573, 117, 573, 573, 573, - /* 3110 */ 477, 122, 573, 453, 573, 455, 573, 573, 573, 573, - /* 3120 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 3130 */ 573, 573, 573, 500, 573, 573, 503, 477, 392, 573, - /* 3140 */ 507, 508, 509, 510, 511, 512, 573, 573, 515, 516, - /* 3150 */ 517, 573, 573, 407, 573, 573, 573, 573, 392, 573, - /* 3160 */ 500, 573, 573, 503, 573, 573, 573, 507, 508, 509, - /* 3170 */ 510, 511, 512, 407, 573, 515, 516, 517, 189, 392, - /* 3180 */ 573, 573, 573, 437, 573, 573, 573, 198, 573, 573, - /* 3190 */ 573, 202, 203, 573, 407, 573, 573, 208, 209, 453, - /* 3200 */ 573, 455, 573, 437, 573, 573, 573, 573, 573, 573, - /* 3210 */ 573, 573, 573, 573, 573, 573, 227, 573, 573, 453, - /* 3220 */ 573, 455, 573, 477, 437, 573, 573, 573, 573, 573, - /* 3230 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 3240 */ 453, 573, 455, 573, 573, 573, 500, 573, 573, 503, - /* 3250 */ 573, 573, 573, 507, 508, 509, 510, 511, 512, 573, - /* 3260 */ 573, 515, 516, 517, 573, 573, 500, 573, 573, 503, - /* 3270 */ 573, 573, 573, 507, 508, 509, 510, 511, 512, 573, - /* 3280 */ 573, 515, 516, 517, 573, 392, 573, 500, 573, 573, - /* 3290 */ 503, 573, 573, 573, 507, 508, 509, 510, 511, 512, - /* 3300 */ 407, 573, 515, 516, 517, 392, 573, 573, 573, 573, - /* 3310 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 3320 */ 407, 573, 573, 573, 573, 392, 573, 573, 573, 573, - /* 3330 */ 437, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 3340 */ 407, 573, 573, 573, 573, 573, 453, 573, 455, 573, - /* 3350 */ 437, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 3360 */ 573, 573, 573, 573, 573, 573, 453, 573, 455, 573, - /* 3370 */ 437, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 3380 */ 573, 573, 573, 573, 573, 573, 453, 573, 455, 573, - /* 3390 */ 573, 573, 573, 500, 573, 573, 503, 573, 573, 573, - /* 3400 */ 507, 508, 509, 510, 511, 512, 573, 573, 515, 516, - /* 3410 */ 517, 573, 392, 500, 573, 573, 503, 573, 573, 573, - /* 3420 */ 507, 508, 509, 510, 511, 512, 573, 407, 515, 516, - /* 3430 */ 517, 573, 573, 500, 573, 573, 503, 573, 392, 573, - /* 3440 */ 507, 508, 509, 510, 511, 512, 573, 573, 515, 516, - /* 3450 */ 517, 573, 573, 407, 573, 573, 573, 437, 573, 573, - /* 3460 */ 573, 392, 573, 573, 573, 573, 573, 573, 573, 573, - /* 3470 */ 573, 573, 573, 453, 573, 455, 407, 573, 573, 573, - /* 3480 */ 573, 392, 573, 437, 573, 573, 573, 573, 573, 573, - /* 3490 */ 573, 573, 573, 573, 573, 573, 407, 573, 573, 453, - /* 3500 */ 573, 455, 573, 392, 573, 573, 437, 573, 573, 573, - /* 3510 */ 573, 573, 573, 573, 573, 573, 573, 573, 407, 573, - /* 3520 */ 500, 573, 453, 503, 455, 573, 437, 507, 508, 509, - /* 3530 */ 510, 511, 512, 573, 573, 515, 516, 517, 573, 573, - /* 3540 */ 573, 573, 453, 573, 455, 573, 500, 573, 437, 503, - /* 3550 */ 573, 573, 573, 507, 508, 509, 510, 511, 512, 573, - /* 3560 */ 573, 515, 516, 517, 453, 573, 455, 573, 392, 500, - /* 3570 */ 573, 573, 503, 573, 573, 573, 507, 508, 509, 510, - /* 3580 */ 511, 512, 573, 407, 515, 516, 517, 392, 573, 500, - /* 3590 */ 573, 573, 503, 573, 573, 573, 507, 508, 509, 510, - /* 3600 */ 511, 512, 407, 573, 515, 516, 517, 573, 573, 392, - /* 3610 */ 573, 500, 573, 437, 503, 573, 573, 573, 507, 508, - /* 3620 */ 509, 510, 511, 512, 407, 573, 515, 516, 517, 453, - /* 3630 */ 573, 455, 437, 573, 573, 573, 573, 573, 573, 573, - /* 3640 */ 573, 573, 573, 573, 573, 573, 573, 573, 453, 573, - /* 3650 */ 455, 573, 573, 573, 437, 573, 573, 573, 573, 573, - /* 3660 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 3670 */ 453, 573, 455, 573, 392, 573, 500, 573, 573, 503, - /* 3680 */ 573, 573, 573, 507, 508, 509, 510, 511, 512, 407, - /* 3690 */ 573, 515, 516, 517, 573, 500, 573, 573, 503, 573, - /* 3700 */ 573, 573, 507, 508, 509, 510, 511, 512, 573, 573, - /* 3710 */ 515, 516, 517, 573, 573, 573, 573, 500, 573, 437, - /* 3720 */ 503, 573, 573, 392, 507, 508, 509, 510, 511, 512, - /* 3730 */ 573, 573, 515, 516, 517, 453, 573, 455, 407, 573, - /* 3740 */ 573, 573, 573, 392, 573, 573, 573, 573, 573, 573, - /* 3750 */ 573, 573, 573, 573, 573, 573, 573, 573, 407, 573, - /* 3760 */ 573, 573, 573, 392, 573, 573, 573, 573, 437, 573, - /* 3770 */ 573, 573, 573, 573, 573, 573, 573, 573, 407, 573, - /* 3780 */ 573, 573, 500, 573, 453, 503, 455, 573, 437, 507, - /* 3790 */ 508, 509, 510, 511, 512, 573, 573, 515, 516, 517, - /* 3800 */ 573, 573, 573, 573, 453, 573, 455, 573, 437, 573, - /* 3810 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 3820 */ 573, 573, 573, 573, 453, 573, 455, 573, 573, 573, - /* 3830 */ 573, 500, 573, 573, 503, 573, 573, 573, 507, 508, - /* 3840 */ 509, 510, 511, 512, 573, 573, 515, 516, 517, 573, - /* 3850 */ 392, 500, 573, 573, 503, 573, 573, 573, 507, 508, - /* 3860 */ 509, 510, 511, 512, 573, 407, 515, 516, 517, 392, - /* 3870 */ 573, 500, 573, 573, 503, 573, 573, 573, 507, 508, - /* 3880 */ 509, 510, 511, 512, 407, 573, 515, 516, 517, 392, - /* 3890 */ 573, 573, 573, 573, 573, 437, 573, 573, 573, 573, - /* 3900 */ 573, 573, 573, 573, 407, 573, 573, 573, 573, 573, - /* 3910 */ 573, 453, 573, 455, 437, 573, 573, 573, 573, 573, - /* 3920 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 3930 */ 453, 573, 455, 573, 437, 573, 573, 573, 573, 573, - /* 3940 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 3950 */ 453, 573, 455, 573, 573, 573, 392, 573, 500, 573, - /* 3960 */ 573, 503, 573, 573, 573, 507, 508, 509, 510, 511, - /* 3970 */ 512, 407, 573, 515, 516, 517, 392, 500, 573, 573, - /* 3980 */ 503, 573, 573, 573, 507, 508, 509, 510, 511, 512, - /* 3990 */ 573, 407, 515, 516, 517, 392, 573, 500, 573, 573, - /* 4000 */ 503, 437, 573, 573, 507, 508, 509, 510, 511, 512, - /* 4010 */ 407, 573, 515, 516, 517, 573, 573, 453, 573, 455, - /* 4020 */ 573, 437, 573, 573, 573, 573, 573, 573, 573, 573, - /* 4030 */ 573, 573, 573, 573, 573, 573, 573, 453, 573, 455, - /* 4040 */ 437, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 4050 */ 573, 573, 573, 573, 573, 573, 453, 573, 455, 573, - /* 4060 */ 573, 573, 573, 392, 500, 573, 573, 503, 573, 573, - /* 4070 */ 573, 507, 508, 509, 510, 511, 512, 573, 407, 515, - /* 4080 */ 516, 517, 392, 573, 500, 573, 573, 503, 573, 573, - /* 4090 */ 573, 507, 508, 509, 510, 511, 512, 407, 573, 515, - /* 4100 */ 516, 517, 573, 500, 573, 573, 503, 573, 437, 573, - /* 4110 */ 507, 508, 509, 510, 511, 512, 573, 573, 515, 516, - /* 4120 */ 517, 573, 573, 573, 453, 573, 455, 437, 573, 573, - /* 4130 */ 573, 392, 573, 573, 573, 573, 573, 573, 573, 573, - /* 4140 */ 573, 573, 573, 453, 573, 455, 407, 573, 573, 573, - /* 4150 */ 573, 392, 573, 573, 573, 573, 573, 573, 573, 573, - /* 4160 */ 573, 573, 573, 573, 573, 573, 407, 573, 573, 573, - /* 4170 */ 573, 500, 573, 573, 503, 573, 437, 573, 507, 508, - /* 4180 */ 509, 510, 511, 512, 573, 573, 515, 516, 517, 573, - /* 4190 */ 500, 573, 453, 503, 455, 573, 437, 507, 508, 509, - /* 4200 */ 510, 511, 512, 573, 573, 515, 516, 517, 573, 573, - /* 4210 */ 573, 573, 453, 573, 455, 573, 573, 573, 573, 392, - /* 4220 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 4230 */ 573, 573, 573, 573, 407, 573, 573, 573, 573, 500, - /* 4240 */ 573, 573, 503, 392, 573, 573, 507, 508, 509, 510, - /* 4250 */ 511, 512, 573, 573, 515, 516, 517, 573, 407, 500, - /* 4260 */ 573, 573, 503, 573, 437, 573, 507, 508, 509, 510, - /* 4270 */ 511, 512, 573, 573, 515, 516, 517, 573, 573, 573, - /* 4280 */ 453, 573, 455, 573, 573, 573, 573, 573, 437, 573, - /* 4290 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 4300 */ 573, 573, 573, 573, 453, 573, 455, 573, 573, 573, - /* 4310 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 4320 */ 573, 573, 573, 573, 573, 392, 573, 500, 573, 573, - /* 4330 */ 503, 573, 573, 573, 507, 508, 509, 510, 511, 512, - /* 4340 */ 407, 573, 515, 516, 517, 573, 573, 573, 573, 392, - /* 4350 */ 573, 500, 573, 573, 503, 573, 573, 573, 507, 508, - /* 4360 */ 509, 510, 511, 512, 407, 573, 515, 516, 517, 573, - /* 4370 */ 437, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 4380 */ 573, 573, 573, 573, 573, 573, 453, 573, 455, 573, - /* 4390 */ 573, 573, 573, 573, 437, 573, 573, 573, 573, 573, - /* 4400 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 4410 */ 453, 573, 455, 573, 573, 573, 573, 573, 573, 573, - /* 4420 */ 573, 573, 573, 573, 573, 573, 573, 573, 573, 573, - /* 4430 */ 573, 573, 573, 500, 573, 573, 503, 573, 573, 573, - /* 4440 */ 507, 508, 509, 510, 511, 512, 573, 573, 515, 516, - /* 4450 */ 517, 573, 573, 573, 573, 573, 573, 500, 573, 573, - /* 4460 */ 503, 573, 573, 573, 507, 508, 509, 510, 511, 512, - /* 4470 */ 573, 573, 515, 516, 517, 389, 389, 389, 389, 389, - /* 4480 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4490 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4500 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4510 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4520 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4530 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4540 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4550 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4560 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4570 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4580 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4590 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4600 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4610 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4620 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4630 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4640 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4650 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4660 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4670 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4680 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4690 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4700 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4710 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4720 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4730 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4740 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4750 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4760 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4770 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4780 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4790 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4800 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4810 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4820 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4830 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4840 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4850 */ 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, - /* 4860 */ 389, 389, 389, 389, -}; -#define YY_SHIFT_COUNT (1024) -#define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2989) -static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 948, 277, 554, 277, 832, 832, 832, 832, 832, 832, - /* 10 */ 832, 832, 832, 832, 832, 832, 1109, 1940, 1940, 2217, - /* 20 */ 0, 1386, 1940, 1940, 1940, 1940, 1940, 1940, 1940, 1940, - /* 30 */ 1940, 1940, 1663, 1940, 1940, 1940, 1940, 1940, 1940, 1940, - /* 40 */ 1940, 1940, 1940, 1940, 1940, 1940, 1940, 1940, 1940, 1940, - /* 50 */ 1940, 1940, 1940, 1940, 1940, 1940, 1940, 1940, 1940, 1940, - /* 60 */ 1940, 1940, 1940, 34, 66, 180, 253, 46, 122, 46, - /* 70 */ 46, 253, 253, 46, 960, 46, 276, 960, 520, 46, - /* 80 */ 149, 2315, 156, 156, 212, 212, 2315, 2315, 598, 598, - /* 90 */ 156, 20, 20, 346, 89, 89, 69, 204, 212, 212, - /* 100 */ 212, 212, 212, 212, 212, 212, 212, 212, 212, 333, - /* 110 */ 444, 573, 212, 212, 585, 149, 212, 333, 212, 149, - /* 120 */ 212, 212, 212, 212, 149, 212, 212, 212, 149, 212, - /* 130 */ 149, 149, 149, 648, 222, 222, 473, 473, 745, 652, - /* 140 */ 181, 51, 548, 548, 548, 548, 548, 548, 548, 548, - /* 150 */ 548, 548, 548, 548, 548, 548, 548, 548, 548, 548, - /* 160 */ 548, 1858, 200, 20, 346, 849, 849, 710, 379, 379, - /* 170 */ 379, 927, 927, 699, 1091, 710, 585, 149, 614, 149, - /* 180 */ 149, 466, 149, 149, 683, 149, 683, 683, 728, 25, - /* 190 */ 2488, 473, 473, 473, 473, 473, 473, 1190, 21, 53, - /* 200 */ 411, 411, 314, 148, 23, 220, 152, 596, 213, 571, - /* 210 */ 560, 560, 892, 662, 1043, 1043, 1043, 382, 1043, 774, - /* 220 */ 757, 92, 896, 1442, 1208, 682, 1247, 1247, 1254, 1320, - /* 230 */ 1320, 1082, 1000, 241, 1247, 1091, 1510, 1772, 1812, 1814, - /* 240 */ 1601, 585, 1814, 585, 1639, 1812, 1842, 1819, 1842, 1819, - /* 250 */ 1686, 1812, 1842, 1812, 1819, 1686, 1686, 1686, 1782, 1788, - /* 260 */ 1812, 1812, 1792, 1812, 1812, 1812, 1886, 1860, 1886, 1860, - /* 270 */ 1814, 585, 585, 1896, 585, 1935, 1937, 585, 1935, 585, - /* 280 */ 1943, 585, 1948, 585, 585, 1921, 1921, 1812, 585, 1886, - /* 290 */ 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, - /* 300 */ 149, 1812, 25, 25, 1886, 683, 683, 683, 1759, 1881, - /* 310 */ 1814, 648, 1992, 1799, 1806, 1896, 648, 1510, 1812, 683, - /* 320 */ 1714, 1717, 1714, 1717, 1712, 1836, 1714, 1724, 1726, 1745, - /* 330 */ 1510, 1749, 1763, 1738, 1739, 1740, 1842, 2061, 1954, 1770, - /* 340 */ 1935, 648, 648, 1717, 683, 683, 683, 683, 1717, 683, - /* 350 */ 1924, 648, 683, 1948, 648, 2015, 683, 1944, 1948, 648, - /* 360 */ 728, 648, 1842, 683, 683, 683, 683, 683, 683, 683, - /* 370 */ 683, 683, 683, 683, 683, 683, 683, 683, 683, 683, - /* 380 */ 683, 683, 683, 683, 683, 2034, 683, 1812, 648, 2155, - /* 390 */ 2148, 2152, 2151, 1886, 4475, 4475, 4475, 4475, 4475, 4475, - /* 400 */ 4475, 4475, 4475, 4475, 4475, 4475, 39, 2989, 70, 1146, - /* 410 */ 1193, 1243, 1425, 1711, 715, 1434, 1560, 1570, 1124, 497, - /* 420 */ 770, 1584, 1542, 1861, 1878, 1699, 1808, 1004, 1169, 1026, - /* 430 */ 1026, 1026, 1026, 1026, 1026, 1026, 1026, 1026, 237, 150, - /* 440 */ 992, 831, 3, 3, 681, 106, 321, 182, 686, 711, - /* 450 */ 334, 752, 837, 767, 928, 928, 687, 736, 1138, 687, - /* 460 */ 687, 687, 1407, 1275, 1206, 1382, 1446, 1365, 1473, 1509, - /* 470 */ 10, 1495, 1373, 399, 1374, 1403, 399, 1408, 1478, 1499, - /* 480 */ 1526, 1302, 1527, 1538, 1545, 1396, 1469, 1489, 179, 1500, - /* 490 */ 1513, 1519, 1561, 1351, 1301, 1130, 1507, 1562, 1568, 1569, - /* 500 */ 1563, 1577, 1488, 1587, 1610, 1632, 1573, 1625, 1633, 1645, - /* 510 */ 1653, 1657, 1558, 1591, 1624, 1692, 1704, 1718, 1720, 1723, - /* 520 */ 1733, 1737, 1741, 1667, 399, 1708, 1730, 1735, 1736, 1742, - /* 530 */ 1575, 1682, 1541, 1589, 1743, 1744, 1719, 1702, 2256, 2257, - /* 540 */ 2258, 2212, 2260, 2225, 2017, 2227, 2228, 2231, 2020, 2268, - /* 550 */ 2233, 2234, 2026, 2236, 2273, 2274, 2030, 2276, 2241, 2278, - /* 560 */ 2243, 2280, 2259, 2282, 2247, 2044, 2285, 2059, 2287, 2062, - /* 570 */ 2064, 2069, 2074, 2294, 2295, 2297, 2083, 2085, 2300, 2301, - /* 580 */ 2142, 2253, 2254, 2306, 2271, 2308, 2309, 2275, 2261, 2310, - /* 590 */ 2262, 2319, 2269, 2317, 2320, 2321, 2264, 2322, 2323, 2324, - /* 600 */ 2325, 2326, 2329, 2157, 2298, 2330, 2159, 2331, 2339, 2340, - /* 610 */ 2341, 2342, 2343, 2344, 2345, 2346, 2347, 2349, 2350, 2352, - /* 620 */ 2354, 2355, 2356, 2357, 2358, 2360, 2361, 2312, 2364, 2318, - /* 630 */ 2377, 2382, 2384, 2385, 2389, 2390, 2391, 2392, 2393, 2395, - /* 640 */ 2375, 2398, 2218, 2380, 2239, 2400, 2242, 2402, 2403, 2383, - /* 650 */ 2362, 2386, 2363, 2404, 2335, 2407, 2338, 2374, 2413, 2348, - /* 660 */ 2416, 2353, 2417, 2418, 2387, 2368, 2376, 2426, 2394, 2370, - /* 670 */ 2388, 2428, 2397, 2372, 2396, 2431, 2399, 2434, 2381, 2439, - /* 680 */ 2406, 2445, 2408, 2410, 2414, 2409, 2411, 2432, 2415, 2459, - /* 690 */ 2419, 2423, 2461, 2468, 2474, 2490, 2452, 2288, 2497, 2409, - /* 700 */ 2455, 2508, 2409, 2458, 2510, 2511, 2433, 2513, 2514, 2480, - /* 710 */ 2460, 2475, 2518, 2484, 2464, 2478, 2523, 2489, 2467, 2483, - /* 720 */ 2528, 2493, 2476, 2487, 2532, 2535, 2537, 2538, 2539, 2504, - /* 730 */ 2543, 2544, 2420, 2421, 2515, 2524, 2549, 2530, 2517, 2522, - /* 740 */ 2525, 2526, 2527, 2529, 2534, 2536, 2541, 2531, 2533, 2545, - /* 750 */ 2546, 2552, 2547, 2550, 2551, 2571, 2553, 2579, 2558, 2548, - /* 760 */ 2584, 2563, 2555, 2586, 2587, 2588, 2556, 2593, 2559, 2594, - /* 770 */ 2560, 2603, 2582, 2589, 2570, 2572, 2575, 2494, 2496, 2612, - /* 780 */ 2422, 2405, 2424, 2498, 2401, 2409, 2564, 2617, 2429, 2583, - /* 790 */ 2598, 2622, 2425, 2602, 2446, 2427, 2628, 2643, 2449, 2440, - /* 800 */ 2453, 2442, 2647, 2618, 2334, 2540, 2542, 2554, 2557, 2631, - /* 810 */ 2561, 2562, 2620, 2621, 2565, 2608, 2569, 2610, 2566, 2576, - /* 820 */ 2630, 2632, 2578, 2580, 2581, 2585, 2590, 2634, 2613, 2619, - /* 830 */ 2592, 2635, 2336, 2605, 2595, 2636, 2599, 2637, 2597, 2600, - /* 840 */ 2668, 2641, 2359, 2639, 2642, 2651, 2658, 2664, 2665, 2604, - /* 850 */ 2606, 2653, 2441, 2669, 2657, 2711, 2712, 2609, 2674, 2607, - /* 860 */ 2611, 2615, 2616, 2567, 2624, 2713, 2677, 2519, 2724, 2629, - /* 870 */ 2638, 2573, 2684, 2577, 2698, 2654, 2457, 2661, 2747, 2728, - /* 880 */ 2499, 2640, 2655, 2656, 2662, 2666, 2659, 2663, 2667, 2670, - /* 890 */ 2671, 2672, 2673, 2675, 2700, 2678, 2679, 2703, 2676, 2735, - /* 900 */ 2591, 2680, 2681, 2776, 2682, 2685, 2601, 2743, 2688, 2687, - /* 910 */ 2779, 2770, 2690, 2691, 2409, 2742, 2694, 2695, 2683, 2696, - /* 920 */ 2697, 2689, 2786, 2614, 2701, 2781, 2785, 2704, 2705, 2789, - /* 930 */ 2708, 2710, 2791, 2667, 2714, 2794, 2670, 2722, 2806, 2671, - /* 940 */ 2731, 2811, 2672, 2715, 2717, 2718, 2719, 2733, 2790, 2734, - /* 950 */ 2817, 2736, 2790, 2790, 2838, 2782, 2784, 2841, 2828, 2830, - /* 960 */ 2832, 2834, 2835, 2836, 2837, 2839, 2840, 2842, 2844, 2792, - /* 970 */ 2762, 2796, 2771, 2852, 2857, 2861, 2862, 2877, 2864, 2865, - /* 980 */ 2866, 2818, 2531, 2868, 2533, 2869, 2870, 2872, 2873, 2888, - /* 990 */ 2875, 2916, 2881, 2856, 2874, 2925, 2890, 2876, 2887, 2934, - /* 1000 */ 2899, 2879, 2894, 2939, 2904, 2884, 2898, 2943, 2908, 2945, - /* 1010 */ 2924, 2947, 2926, 2914, 2949, 2929, 2919, 2918, 2920, 2935, - /* 1020 */ 2937, 2940, 2941, 2938, 2944, -}; -#define YY_REDUCE_COUNT (405) -#define YY_REDUCE_MIN (-536) -#define YY_REDUCE_MAX (3957) -static const short yy_reduce_ofst[] = { - /* 0 */ 418, -349, -99, 142, 552, 829, 1008, 1285, 1582, 1664, - /* 10 */ 1106, 1859, 1941, 176, 1336, 1613, -135, 289, 2123, 419, - /* 20 */ 974, 2229, 2252, 2328, 2379, 2412, 2477, 2500, 2568, 2633, - /* 30 */ 2660, 2746, 2766, 2787, 2893, 2913, 2933, 3020, 3046, 3069, - /* 40 */ 3089, 3111, 3176, 3195, 3217, 3282, 3331, 3351, 3371, 3458, - /* 50 */ 3477, 3497, 3564, 3584, 3603, 3671, 3690, 3739, 3759, 3827, - /* 60 */ 3851, 3933, 3957, -351, 448, 605, -290, -366, 481, 1203, - /* 70 */ 1464, 329, 442, 1485, -412, -536, -153, -393, -534, 14, - /* 80 */ 197, 347, -106, -49, 254, 271, 480, 511, -400, -398, - /* 90 */ -368, -374, -100, -245, 57, 192, 164, -382, -90, 507, - /* 100 */ 539, 551, 43, 229, 718, 806, 834, 843, 305, 151, - /* 110 */ -114, 385, 847, 879, 639, 472, 889, 218, 900, 706, - /* 120 */ 983, 995, 997, 1006, -219, 1029, 1046, 1052, 729, 1061, - /* 130 */ 228, 792, 1017, 616, 61, 61, 184, -415, -35, -259, - /* 140 */ -503, -193, -25, 443, 502, 603, 617, 658, 734, 735, - /* 150 */ 738, 778, 827, 895, 901, 935, 963, 967, 1022, 1027, - /* 160 */ 1039, -14, 121, 33, 316, 256, 588, 651, 121, 258, - /* 170 */ 363, 787, 788, 508, -28, 692, 493, 844, 768, -428, - /* 180 */ 143, 905, 189, 786, 897, 860, 899, 1016, 754, 739, - /* 190 */ 471, 568, 574, 737, 755, 766, 784, 724, 950, 1011, - /* 200 */ 823, 823, 721, 987, 1034, 1072, 1217, 823, 1192, 1192, - /* 210 */ 1236, 1261, 1292, 1244, 1162, 1200, 1202, 1296, 1218, 1192, - /* 220 */ 1317, 1377, 1282, 1383, 1332, 1312, 1334, 1338, 1192, 1262, - /* 230 */ 1263, 1248, 1278, 1265, 1350, 1400, 1348, 1329, 1427, 1344, - /* 240 */ 1342, 1426, 1349, 1435, 1360, 1453, 1456, 1409, 1465, 1416, - /* 250 */ 1421, 1475, 1476, 1479, 1433, 1429, 1437, 1438, 1482, 1486, - /* 260 */ 1497, 1501, 1491, 1503, 1504, 1505, 1514, 1511, 1516, 1517, - /* 270 */ 1444, 1521, 1529, 1487, 1539, 1548, 1477, 1544, 1553, 1550, - /* 280 */ 1494, 1552, 1506, 1554, 1555, 1546, 1551, 1571, 1566, 1576, - /* 290 */ 1549, 1556, 1557, 1559, 1564, 1565, 1572, 1578, 1579, 1580, - /* 300 */ 1581, 1586, 1592, 1593, 1596, 1537, 1583, 1585, 1508, 1520, - /* 310 */ 1531, 1594, 1533, 1540, 1547, 1588, 1620, 1574, 1634, 1598, - /* 320 */ 1490, 1590, 1492, 1597, 1502, 1515, 1512, 1498, 1522, 1518, - /* 330 */ 1595, 1523, 1535, 1528, 1536, 1543, 1672, 1599, 1567, 1602, - /* 340 */ 1678, 1674, 1675, 1630, 1654, 1656, 1659, 1662, 1658, 1665, - /* 350 */ 1666, 1725, 1679, 1661, 1727, 1612, 1690, 1681, 1683, 1747, - /* 360 */ 1721, 1750, 1748, 1697, 1698, 1701, 1703, 1705, 1709, 1715, - /* 370 */ 1716, 1729, 1731, 1732, 1734, 1760, 1766, 1767, 1768, 1778, - /* 380 */ 1779, 1780, 1781, 1783, 1785, 1728, 1787, 1751, 1752, 1784, - /* 390 */ 1793, 1837, 1797, 1813, 1758, 1803, 1746, 1754, 1769, 1773, - /* 400 */ 1815, 1820, 1809, 1821, 1838, 1864, -}; -static const YYACTIONTYPE yy_default[] = { - /* 0 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 10 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 20 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 30 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 40 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 50 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 60 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 70 */ 2300, 2300, 2300, 2683, 2300, 2300, 2639, 2300, 2300, 2300, - /* 80 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 90 */ 2300, 2646, 2646, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 100 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 110 */ 2300, 2300, 2300, 2300, 2410, 2300, 2300, 2300, 2300, 2300, - /* 120 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 130 */ 2300, 2300, 2300, 2408, 2956, 2300, 3084, 2724, 2300, 2300, - /* 140 */ 2985, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 150 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 160 */ 2300, 2300, 2968, 2300, 2300, 2381, 2381, 2300, 2968, 2968, - /* 170 */ 2968, 2928, 2928, 2408, 2300, 2300, 2410, 2300, 2726, 2300, - /* 180 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2553, 2330, - /* 190 */ 2709, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 3014, - /* 200 */ 2960, 2961, 3078, 2300, 3017, 2979, 2300, 2974, 2300, 2300, - /* 210 */ 2300, 2300, 2300, 3004, 2300, 2300, 2300, 2300, 2300, 2300, - /* 220 */ 2651, 2300, 2752, 2300, 2496, 2703, 2300, 2300, 2300, 2300, - /* 230 */ 2300, 3062, 2958, 2998, 2300, 2300, 3008, 2300, 2300, 2300, - /* 240 */ 2740, 2410, 2300, 2410, 2696, 2634, 2300, 2644, 2300, 2644, - /* 250 */ 2641, 2300, 2300, 2300, 2644, 2641, 2641, 2641, 2484, 2480, - /* 260 */ 2300, 2300, 2478, 2300, 2300, 2300, 2300, 2360, 2300, 2360, - /* 270 */ 2300, 2410, 2410, 2300, 2410, 2300, 2300, 2410, 2300, 2410, - /* 280 */ 2300, 2410, 2300, 2410, 2410, 2513, 2513, 2300, 2410, 2300, - /* 290 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 300 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2738, 2719, - /* 310 */ 2300, 2408, 2300, 2707, 2705, 2300, 2408, 3008, 2300, 2300, - /* 320 */ 3032, 3027, 3032, 3027, 3046, 3042, 3032, 3051, 3048, 3010, - /* 330 */ 3008, 2991, 2987, 3081, 3068, 3064, 2300, 2300, 2996, 2994, - /* 340 */ 2300, 2408, 2408, 3027, 2300, 2300, 2300, 2300, 3027, 2300, - /* 350 */ 2300, 2408, 2300, 2300, 2408, 2300, 2300, 2300, 2300, 2408, - /* 360 */ 2300, 2408, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 370 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 380 */ 2300, 2300, 2300, 2300, 2300, 2515, 2300, 2300, 2408, 2300, - /* 390 */ 2332, 2334, 2344, 2300, 2698, 3084, 2724, 2729, 2679, 2679, - /* 400 */ 2556, 2556, 3084, 2556, 2411, 2305, 2300, 2300, 2300, 2300, - /* 410 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 420 */ 2300, 2300, 2857, 2300, 2300, 2300, 2300, 2300, 2300, 3045, - /* 430 */ 3044, 2858, 2300, 2932, 2931, 2930, 2921, 2857, 2509, 2300, - /* 440 */ 2300, 2300, 2856, 2855, 2300, 2300, 2300, 2300, 2300, 2300, - /* 450 */ 2300, 2300, 2300, 2300, 2670, 2669, 2849, 2300, 2300, 2850, - /* 460 */ 2848, 2847, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 470 */ 2300, 2300, 2300, 2500, 2300, 2300, 2497, 2300, 2300, 2300, - /* 480 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 490 */ 2300, 2300, 2300, 2300, 3065, 3069, 2300, 2300, 2300, 2300, - /* 500 */ 2957, 2300, 2300, 2300, 2300, 2300, 2828, 2300, 2300, 2300, - /* 510 */ 2300, 2300, 2796, 2791, 2782, 2773, 2788, 2779, 2767, 2785, - /* 520 */ 2776, 2764, 2761, 2300, 2524, 2300, 2300, 2300, 2300, 2300, - /* 530 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 540 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 550 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 560 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 570 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 580 */ 2640, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 590 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 600 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 610 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 620 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 630 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 640 */ 2300, 2300, 2300, 2300, 2300, 2300, 2655, 2300, 2300, 2300, - /* 650 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 660 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 670 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 680 */ 2300, 2300, 2300, 2300, 2349, 2835, 2300, 2300, 2300, 2300, - /* 690 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2838, - /* 700 */ 2300, 2300, 2839, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 710 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 720 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 730 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 740 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2455, 2454, 2300, - /* 750 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 760 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 770 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2840, 2300, 2300, - /* 780 */ 2300, 2300, 2723, 2300, 2300, 2830, 2300, 2300, 2300, 2300, - /* 790 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 800 */ 2300, 2300, 3061, 3011, 2300, 2300, 2300, 2300, 2300, 2300, - /* 810 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 820 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2828, - /* 830 */ 2300, 3043, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 3059, - /* 840 */ 2300, 3063, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2967, - /* 850 */ 2963, 2300, 2300, 2959, 2300, 2300, 2300, 2300, 2300, 2300, - /* 860 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 870 */ 2300, 2300, 2300, 2300, 2918, 2300, 2300, 2300, 2952, 2300, - /* 880 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2552, 2551, - /* 890 */ 2550, 2549, 2300, 2300, 2300, 2300, 2300, 2300, 2840, 2300, - /* 900 */ 2843, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 910 */ 2300, 2300, 2300, 2300, 2827, 2300, 2895, 2894, 2300, 2300, - /* 920 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2546, 2300, 2300, - /* 930 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 940 */ 2300, 2300, 2300, 2530, 2528, 2527, 2526, 2300, 2563, 2300, - /* 950 */ 2300, 2300, 2559, 2558, 2300, 2300, 2300, 2300, 2300, 2300, - /* 960 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 970 */ 2300, 2300, 2300, 2429, 2300, 2300, 2300, 2300, 2300, 2300, - /* 980 */ 2300, 2300, 2421, 2300, 2420, 2300, 2300, 2300, 2300, 2300, - /* 990 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 1000 */ 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, 2300, - /* 1010 */ 2300, 2300, 2300, 2300, 2300, 2300, 2329, 2300, 2300, 2300, - /* 1020 */ 2300, 2300, 2300, 2300, 2300, -}; -/********** End of lemon-generated parsing tables *****************************/ - -/* The next table maps tokens (terminal symbols) into fallback tokens. -** If a construct like the following: -** -** %fallback ID X Y Z. -** -** appears in the grammar, then ID becomes a fallback token for X, Y, -** and Z. Whenever one of the tokens X, Y, or Z is input to the parser -** but it does not parse, the type of the token is changed to ID and -** the parse is retried before an error is thrown. -** -** This feature can be used, for example, to cause some keywords in a language -** to revert to identifiers if they keyword does not apply in the context where -** it appears. -*/ -#ifdef YYFALLBACK -static const YYCODETYPE yyFallback[] = { - 0, /* $ => nothing */ - 0, /* OR => nothing */ - 0, /* AND => nothing */ - 0, /* UNION => nothing */ - 0, /* ALL => nothing */ - 0, /* MINUS => nothing */ - 0, /* EXCEPT => nothing */ - 0, /* INTERSECT => nothing */ - 0, /* NK_BITAND => nothing */ - 0, /* NK_BITOR => nothing */ - 0, /* NK_LSHIFT => nothing */ - 0, /* NK_RSHIFT => nothing */ - 0, /* NK_PLUS => nothing */ - 0, /* NK_MINUS => nothing */ - 0, /* NK_STAR => nothing */ - 0, /* NK_SLASH => nothing */ - 0, /* NK_REM => nothing */ - 0, /* NK_CONCAT => nothing */ - 0, /* CREATE => nothing */ - 0, /* ACCOUNT => nothing */ - 0, /* NK_ID => nothing */ - 0, /* PASS => nothing */ - 0, /* NK_STRING => nothing */ - 0, /* ALTER => nothing */ - 0, /* PPS => nothing */ - 0, /* TSERIES => nothing */ - 0, /* STORAGE => nothing */ - 0, /* STREAMS => nothing */ - 0, /* QTIME => nothing */ - 0, /* DBS => nothing */ - 0, /* USERS => nothing */ - 0, /* CONNS => nothing */ - 0, /* STATE => nothing */ - 0, /* NK_COMMA => nothing */ - 0, /* HOST => nothing */ - 0, /* IS_IMPORT => nothing */ - 0, /* NK_INTEGER => nothing */ - 0, /* CREATEDB => nothing */ - 0, /* USER => nothing */ - 0, /* ENABLE => nothing */ - 0, /* SYSINFO => nothing */ - 0, /* ADD => nothing */ - 0, /* DROP => nothing */ - 0, /* GRANT => nothing */ - 0, /* ON => nothing */ - 0, /* TO => nothing */ - 0, /* REVOKE => nothing */ - 0, /* FROM => nothing */ - 0, /* SUBSCRIBE => nothing */ - 0, /* READ => nothing */ - 0, /* WRITE => nothing */ - 0, /* NK_DOT => nothing */ - 0, /* WITH => nothing */ - 0, /* ENCRYPT_KEY => nothing */ - 0, /* ANODE => nothing */ - 0, /* UPDATE => nothing */ - 0, /* ANODES => nothing */ - 0, /* DNODE => nothing */ - 0, /* PORT => nothing */ - 0, /* DNODES => nothing */ - 0, /* RESTORE => nothing */ - 0, /* NK_IPTOKEN => nothing */ - 0, /* FORCE => nothing */ - 0, /* UNSAFE => nothing */ - 0, /* CLUSTER => nothing */ - 0, /* LOCAL => nothing */ - 0, /* QNODE => nothing */ - 0, /* BNODE => nothing */ - 0, /* SNODE => nothing */ - 0, /* MNODE => nothing */ - 0, /* VNODE => nothing */ - 0, /* DATABASE => nothing */ - 0, /* USE => nothing */ - 0, /* FLUSH => nothing */ - 0, /* TRIM => nothing */ - 0, /* S3MIGRATE => nothing */ - 0, /* COMPACT => nothing */ - 0, /* IF => nothing */ - 0, /* NOT => nothing */ - 0, /* EXISTS => nothing */ - 0, /* BUFFER => nothing */ - 0, /* CACHEMODEL => nothing */ - 0, /* CACHESIZE => nothing */ - 0, /* COMP => nothing */ - 0, /* DURATION => nothing */ - 0, /* NK_VARIABLE => nothing */ - 0, /* MAXROWS => nothing */ - 0, /* MINROWS => nothing */ - 0, /* KEEP => nothing */ - 0, /* PAGES => nothing */ - 0, /* PAGESIZE => nothing */ - 0, /* TSDB_PAGESIZE => nothing */ - 0, /* PRECISION => nothing */ - 0, /* REPLICA => nothing */ - 0, /* VGROUPS => nothing */ - 0, /* SINGLE_STABLE => nothing */ - 0, /* RETENTIONS => nothing */ - 0, /* SCHEMALESS => nothing */ - 0, /* WAL_LEVEL => nothing */ - 0, /* WAL_FSYNC_PERIOD => nothing */ - 0, /* WAL_RETENTION_PERIOD => nothing */ - 0, /* WAL_RETENTION_SIZE => nothing */ - 0, /* WAL_ROLL_PERIOD => nothing */ - 0, /* WAL_SEGMENT_SIZE => nothing */ - 0, /* STT_TRIGGER => nothing */ - 0, /* TABLE_PREFIX => nothing */ - 0, /* TABLE_SUFFIX => nothing */ - 0, /* S3_CHUNKSIZE => nothing */ - 0, /* S3_KEEPLOCAL => nothing */ - 0, /* S3_COMPACT => nothing */ - 0, /* KEEP_TIME_OFFSET => nothing */ - 0, /* ENCRYPT_ALGORITHM => nothing */ - 0, /* NK_COLON => nothing */ - 0, /* BWLIMIT => nothing */ - 0, /* START => nothing */ - 0, /* TIMESTAMP => nothing */ - 343, /* END => ABORT */ - 0, /* TABLE => nothing */ - 0, /* NK_LP => nothing */ - 0, /* NK_RP => nothing */ - 0, /* USING => nothing */ - 343, /* FILE => ABORT */ - 0, /* STABLE => nothing */ - 0, /* COLUMN => nothing */ - 0, /* MODIFY => nothing */ - 0, /* RENAME => nothing */ - 0, /* TAG => nothing */ - 0, /* SET => nothing */ - 0, /* NK_EQ => nothing */ - 0, /* TAGS => nothing */ - 0, /* BOOL => nothing */ - 0, /* TINYINT => nothing */ - 0, /* SMALLINT => nothing */ - 0, /* INT => nothing */ - 0, /* INTEGER => nothing */ - 0, /* BIGINT => nothing */ - 0, /* FLOAT => nothing */ - 0, /* DOUBLE => nothing */ - 0, /* BINARY => nothing */ - 0, /* NCHAR => nothing */ - 0, /* UNSIGNED => nothing */ - 0, /* JSON => nothing */ - 0, /* VARCHAR => nothing */ - 0, /* MEDIUMBLOB => nothing */ - 0, /* BLOB => nothing */ - 0, /* VARBINARY => nothing */ - 0, /* GEOMETRY => nothing */ - 0, /* DECIMAL => nothing */ - 0, /* COMMENT => nothing */ - 0, /* MAX_DELAY => nothing */ - 0, /* WATERMARK => nothing */ - 0, /* ROLLUP => nothing */ - 0, /* TTL => nothing */ - 0, /* SMA => nothing */ - 0, /* DELETE_MARK => nothing */ - 0, /* FIRST => nothing */ - 0, /* LAST => nothing */ - 0, /* SHOW => nothing */ - 0, /* FULL => nothing */ - 0, /* PRIVILEGES => nothing */ - 0, /* DATABASES => nothing */ - 0, /* TABLES => nothing */ - 0, /* STABLES => nothing */ - 0, /* MNODES => nothing */ - 0, /* QNODES => nothing */ - 0, /* ARBGROUPS => nothing */ - 0, /* FUNCTIONS => nothing */ - 0, /* INDEXES => nothing */ - 0, /* ACCOUNTS => nothing */ - 0, /* APPS => nothing */ - 0, /* CONNECTIONS => nothing */ - 0, /* LICENCES => nothing */ - 0, /* GRANTS => nothing */ - 0, /* LOGS => nothing */ - 0, /* MACHINES => nothing */ - 0, /* ENCRYPTIONS => nothing */ - 0, /* QUERIES => nothing */ - 0, /* SCORES => nothing */ - 0, /* TOPICS => nothing */ - 0, /* VARIABLES => nothing */ - 0, /* BNODES => nothing */ - 0, /* SNODES => nothing */ - 0, /* TRANSACTIONS => nothing */ - 0, /* DISTRIBUTED => nothing */ - 0, /* CONSUMERS => nothing */ - 0, /* SUBSCRIPTIONS => nothing */ - 0, /* VNODES => nothing */ - 0, /* ALIVE => nothing */ - 0, /* VIEWS => nothing */ - 343, /* VIEW => ABORT */ - 0, /* COMPACTS => nothing */ - 0, /* NORMAL => nothing */ - 0, /* CHILD => nothing */ - 0, /* LIKE => nothing */ - 0, /* TBNAME => nothing */ - 0, /* QTAGS => nothing */ - 0, /* AS => nothing */ - 0, /* SYSTEM => nothing */ - 0, /* TSMA => nothing */ - 0, /* INTERVAL => nothing */ - 0, /* RECURSIVE => nothing */ - 0, /* TSMAS => nothing */ - 0, /* FUNCTION => nothing */ - 0, /* INDEX => nothing */ - 0, /* COUNT => nothing */ - 0, /* LAST_ROW => nothing */ - 0, /* META => nothing */ - 0, /* ONLY => nothing */ - 0, /* TOPIC => nothing */ - 0, /* CONSUMER => nothing */ - 0, /* GROUP => nothing */ - 0, /* DESC => nothing */ - 0, /* DESCRIBE => nothing */ - 0, /* RESET => nothing */ - 0, /* QUERY => nothing */ - 0, /* CACHE => nothing */ - 0, /* EXPLAIN => nothing */ - 0, /* ANALYZE => nothing */ - 0, /* VERBOSE => nothing */ - 0, /* NK_BOOL => nothing */ - 0, /* RATIO => nothing */ - 0, /* NK_FLOAT => nothing */ - 0, /* OUTPUTTYPE => nothing */ - 0, /* AGGREGATE => nothing */ - 0, /* BUFSIZE => nothing */ - 0, /* LANGUAGE => nothing */ - 0, /* REPLACE => nothing */ - 0, /* STREAM => nothing */ - 0, /* INTO => nothing */ - 0, /* PAUSE => nothing */ - 0, /* RESUME => nothing */ - 0, /* PRIMARY => nothing */ - 343, /* KEY => ABORT */ - 0, /* TRIGGER => nothing */ - 0, /* AT_ONCE => nothing */ - 0, /* WINDOW_CLOSE => nothing */ - 0, /* IGNORE => nothing */ - 0, /* EXPIRED => nothing */ - 0, /* FILL_HISTORY => nothing */ - 0, /* SUBTABLE => nothing */ - 0, /* UNTREATED => nothing */ - 0, /* KILL => nothing */ - 0, /* CONNECTION => nothing */ - 0, /* TRANSACTION => nothing */ - 0, /* BALANCE => nothing */ - 0, /* VGROUP => nothing */ - 0, /* LEADER => nothing */ - 0, /* MERGE => nothing */ - 0, /* REDISTRIBUTE => nothing */ - 0, /* SPLIT => nothing */ - 0, /* DELETE => nothing */ - 0, /* INSERT => nothing */ - 0, /* NK_BIN => nothing */ - 0, /* NK_HEX => nothing */ - 0, /* NULL => nothing */ - 0, /* NK_QUESTION => nothing */ - 0, /* NK_ALIAS => nothing */ - 0, /* NK_ARROW => nothing */ - 0, /* ROWTS => nothing */ - 0, /* QSTART => nothing */ - 0, /* QEND => nothing */ - 0, /* QDURATION => nothing */ - 0, /* WSTART => nothing */ - 0, /* WEND => nothing */ - 0, /* WDURATION => nothing */ - 0, /* IROWTS => nothing */ - 0, /* ISFILLED => nothing */ - 0, /* FLOW => nothing */ - 0, /* FHIGH => nothing */ - 0, /* FROWTS => nothing */ - 0, /* CAST => nothing */ - 0, /* POSITION => nothing */ - 0, /* IN => nothing */ - 343, /* FOR => ABORT */ - 0, /* NOW => nothing */ - 0, /* TODAY => nothing */ - 0, /* RAND => nothing */ - 0, /* SUBSTR => nothing */ - 0, /* SUBSTRING => nothing */ - 0, /* BOTH => nothing */ - 0, /* TRAILING => nothing */ - 0, /* LEADING => nothing */ - 0, /* TIMEZONE => nothing */ - 0, /* CLIENT_VERSION => nothing */ - 0, /* SERVER_VERSION => nothing */ - 0, /* SERVER_STATUS => nothing */ - 0, /* CURRENT_USER => nothing */ - 0, /* PI => nothing */ - 0, /* CASE => nothing */ - 0, /* WHEN => nothing */ - 0, /* THEN => nothing */ - 0, /* ELSE => nothing */ - 0, /* BETWEEN => nothing */ - 0, /* IS => nothing */ - 0, /* NK_LT => nothing */ - 0, /* NK_GT => nothing */ - 0, /* NK_LE => nothing */ - 0, /* NK_GE => nothing */ - 0, /* NK_NE => nothing */ - 0, /* MATCH => nothing */ - 0, /* NMATCH => nothing */ - 0, /* CONTAINS => nothing */ - 0, /* JOIN => nothing */ - 0, /* INNER => nothing */ - 0, /* LEFT => nothing */ - 0, /* RIGHT => nothing */ - 0, /* OUTER => nothing */ - 343, /* SEMI => ABORT */ - 0, /* ANTI => nothing */ - 0, /* ASOF => nothing */ - 0, /* WINDOW => nothing */ - 0, /* WINDOW_OFFSET => nothing */ - 0, /* JLIMIT => nothing */ - 0, /* SELECT => nothing */ - 0, /* NK_HINT => nothing */ - 0, /* DISTINCT => nothing */ - 0, /* WHERE => nothing */ - 0, /* PARTITION => nothing */ - 0, /* BY => nothing */ - 0, /* SESSION => nothing */ - 0, /* STATE_WINDOW => nothing */ - 0, /* EVENT_WINDOW => nothing */ - 0, /* COUNT_WINDOW => nothing */ - 0, /* ANOMALY_WINDOW => nothing */ - 0, /* SLIDING => nothing */ - 0, /* FILL => nothing */ - 0, /* VALUE => nothing */ - 0, /* VALUE_F => nothing */ - 0, /* NONE => nothing */ - 0, /* PREV => nothing */ - 0, /* NULL_F => nothing */ - 0, /* LINEAR => nothing */ - 0, /* NEXT => nothing */ - 0, /* HAVING => nothing */ - 0, /* RANGE => nothing */ - 0, /* EVERY => nothing */ - 0, /* ORDER => nothing */ - 0, /* SLIMIT => nothing */ - 0, /* SOFFSET => nothing */ - 0, /* LIMIT => nothing */ - 0, /* OFFSET => nothing */ - 0, /* ASC => nothing */ - 0, /* NULLS => nothing */ - 0, /* ABORT => nothing */ - 343, /* AFTER => ABORT */ - 343, /* ATTACH => ABORT */ - 343, /* BEFORE => ABORT */ - 343, /* BEGIN => ABORT */ - 343, /* BITAND => ABORT */ - 343, /* BITNOT => ABORT */ - 343, /* BITOR => ABORT */ - 343, /* BLOCKS => ABORT */ - 343, /* CHANGE => ABORT */ - 343, /* COMMA => ABORT */ - 343, /* CONCAT => ABORT */ - 343, /* CONFLICT => ABORT */ - 343, /* COPY => ABORT */ - 343, /* DEFERRED => ABORT */ - 343, /* DELIMITERS => ABORT */ - 343, /* DETACH => ABORT */ - 343, /* DIVIDE => ABORT */ - 343, /* DOT => ABORT */ - 343, /* EACH => ABORT */ - 343, /* FAIL => ABORT */ - 343, /* GLOB => ABORT */ - 343, /* ID => ABORT */ - 343, /* IMMEDIATE => ABORT */ - 343, /* IMPORT => ABORT */ - 343, /* INITIALLY => ABORT */ - 343, /* INSTEAD => ABORT */ - 343, /* ISNULL => ABORT */ - 343, /* MODULES => ABORT */ - 343, /* NK_BITNOT => ABORT */ - 343, /* NK_SEMI => ABORT */ - 343, /* NOTNULL => ABORT */ - 343, /* OF => ABORT */ - 343, /* PLUS => ABORT */ - 343, /* PRIVILEGE => ABORT */ - 343, /* RAISE => ABORT */ - 343, /* RESTRICT => ABORT */ - 343, /* ROW => ABORT */ - 343, /* STAR => ABORT */ - 343, /* STATEMENT => ABORT */ - 343, /* STRICT => ABORT */ - 343, /* STRING => ABORT */ - 343, /* TIMES => ABORT */ - 343, /* VALUES => ABORT */ - 343, /* VARIABLE => ABORT */ - 343, /* WAL => ABORT */ -}; -#endif /* YYFALLBACK */ - -/* The following structure represents a single element of the -** parser's stack. Information stored includes: -** -** + The state number for the parser at this level of the stack. -** -** + The value of the token stored at this level of the stack. -** (In other words, the "major" token.) -** -** + The semantic value stored at this level of the stack. This is -** the information used by the action routines in the grammar. -** It is sometimes called the "minor" token. -** -** After the "shift" half of a SHIFTREDUCE action, the stateno field -** actually contains the reduce action for the second half of the -** SHIFTREDUCE. -*/ -struct yyStackEntry { - YYACTIONTYPE stateno; /* The state-number, or reduce action in SHIFTREDUCE */ - YYCODETYPE major; /* The major token value. This is the code - ** number for the token at this stack level */ - YYMINORTYPE minor; /* The user-supplied minor token value. This - ** is the value of the token */ -}; -typedef struct yyStackEntry yyStackEntry; - -/* The state of the parser is completely contained in an instance of -** the following structure */ -struct yyParser { - yyStackEntry *yytos; /* Pointer to top element of the stack */ -#ifdef YYTRACKMAXSTACKDEPTH - int yyhwm; /* High-water mark of the stack */ -#endif -#ifndef YYNOERRORRECOVERY - int yyerrcnt; /* Shifts left before out of the error */ -#endif - ParseARG_SDECL /* A place to hold %extra_argument */ - ParseCTX_SDECL /* A place to hold %extra_context */ -#if YYSTACKDEPTH<=0 - int yystksz; /* Current side of the stack */ - yyStackEntry *yystack; /* The parser's stack */ - yyStackEntry yystk0; /* First stack entry */ -#else - yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */ - yyStackEntry *yystackEnd; /* Last entry in the stack */ -#endif -}; -typedef struct yyParser yyParser; - -#ifndef NDEBUG -#include -static FILE *yyTraceFILE = 0; -static char *yyTracePrompt = 0; -#endif /* NDEBUG */ - -#ifndef NDEBUG -/* -** Turn parser tracing on by giving a stream to which to write the trace -** and a prompt to preface each trace message. Tracing is turned off -** by making either argument NULL -** -** Inputs: -**
    -**
  • A FILE* to which trace output should be written. -** If NULL, then tracing is turned off. -**
  • A prefix string written at the beginning of every -** line of trace output. If NULL, then tracing is -** turned off. -**
-** -** Outputs: -** None. -*/ -void ParseTrace(FILE *TraceFILE, char *zTracePrompt){ - yyTraceFILE = TraceFILE; - yyTracePrompt = zTracePrompt; - if( yyTraceFILE==0 ) yyTracePrompt = 0; - else if( yyTracePrompt==0 ) yyTraceFILE = 0; -} -#endif /* NDEBUG */ - -#if defined(YYCOVERAGE) || !defined(NDEBUG) -/* For tracing shifts, the names of all terminals and nonterminals -** are required. The following table supplies these names */ -static const char *const yyTokenName[] = { - /* 0 */ "$", - /* 1 */ "OR", - /* 2 */ "AND", - /* 3 */ "UNION", - /* 4 */ "ALL", - /* 5 */ "MINUS", - /* 6 */ "EXCEPT", - /* 7 */ "INTERSECT", - /* 8 */ "NK_BITAND", - /* 9 */ "NK_BITOR", - /* 10 */ "NK_LSHIFT", - /* 11 */ "NK_RSHIFT", - /* 12 */ "NK_PLUS", - /* 13 */ "NK_MINUS", - /* 14 */ "NK_STAR", - /* 15 */ "NK_SLASH", - /* 16 */ "NK_REM", - /* 17 */ "NK_CONCAT", - /* 18 */ "CREATE", - /* 19 */ "ACCOUNT", - /* 20 */ "NK_ID", - /* 21 */ "PASS", - /* 22 */ "NK_STRING", - /* 23 */ "ALTER", - /* 24 */ "PPS", - /* 25 */ "TSERIES", - /* 26 */ "STORAGE", - /* 27 */ "STREAMS", - /* 28 */ "QTIME", - /* 29 */ "DBS", - /* 30 */ "USERS", - /* 31 */ "CONNS", - /* 32 */ "STATE", - /* 33 */ "NK_COMMA", - /* 34 */ "HOST", - /* 35 */ "IS_IMPORT", - /* 36 */ "NK_INTEGER", - /* 37 */ "CREATEDB", - /* 38 */ "USER", - /* 39 */ "ENABLE", - /* 40 */ "SYSINFO", - /* 41 */ "ADD", - /* 42 */ "DROP", - /* 43 */ "GRANT", - /* 44 */ "ON", - /* 45 */ "TO", - /* 46 */ "REVOKE", - /* 47 */ "FROM", - /* 48 */ "SUBSCRIBE", - /* 49 */ "READ", - /* 50 */ "WRITE", - /* 51 */ "NK_DOT", - /* 52 */ "WITH", - /* 53 */ "ENCRYPT_KEY", - /* 54 */ "ANODE", - /* 55 */ "UPDATE", - /* 56 */ "ANODES", - /* 57 */ "DNODE", - /* 58 */ "PORT", - /* 59 */ "DNODES", - /* 60 */ "RESTORE", - /* 61 */ "NK_IPTOKEN", - /* 62 */ "FORCE", - /* 63 */ "UNSAFE", - /* 64 */ "CLUSTER", - /* 65 */ "LOCAL", - /* 66 */ "QNODE", - /* 67 */ "BNODE", - /* 68 */ "SNODE", - /* 69 */ "MNODE", - /* 70 */ "VNODE", - /* 71 */ "DATABASE", - /* 72 */ "USE", - /* 73 */ "FLUSH", - /* 74 */ "TRIM", - /* 75 */ "S3MIGRATE", - /* 76 */ "COMPACT", - /* 77 */ "IF", - /* 78 */ "NOT", - /* 79 */ "EXISTS", - /* 80 */ "BUFFER", - /* 81 */ "CACHEMODEL", - /* 82 */ "CACHESIZE", - /* 83 */ "COMP", - /* 84 */ "DURATION", - /* 85 */ "NK_VARIABLE", - /* 86 */ "MAXROWS", - /* 87 */ "MINROWS", - /* 88 */ "KEEP", - /* 89 */ "PAGES", - /* 90 */ "PAGESIZE", - /* 91 */ "TSDB_PAGESIZE", - /* 92 */ "PRECISION", - /* 93 */ "REPLICA", - /* 94 */ "VGROUPS", - /* 95 */ "SINGLE_STABLE", - /* 96 */ "RETENTIONS", - /* 97 */ "SCHEMALESS", - /* 98 */ "WAL_LEVEL", - /* 99 */ "WAL_FSYNC_PERIOD", - /* 100 */ "WAL_RETENTION_PERIOD", - /* 101 */ "WAL_RETENTION_SIZE", - /* 102 */ "WAL_ROLL_PERIOD", - /* 103 */ "WAL_SEGMENT_SIZE", - /* 104 */ "STT_TRIGGER", - /* 105 */ "TABLE_PREFIX", - /* 106 */ "TABLE_SUFFIX", - /* 107 */ "S3_CHUNKSIZE", - /* 108 */ "S3_KEEPLOCAL", - /* 109 */ "S3_COMPACT", - /* 110 */ "KEEP_TIME_OFFSET", - /* 111 */ "ENCRYPT_ALGORITHM", - /* 112 */ "NK_COLON", - /* 113 */ "BWLIMIT", - /* 114 */ "START", - /* 115 */ "TIMESTAMP", - /* 116 */ "END", - /* 117 */ "TABLE", - /* 118 */ "NK_LP", - /* 119 */ "NK_RP", - /* 120 */ "USING", - /* 121 */ "FILE", - /* 122 */ "STABLE", - /* 123 */ "COLUMN", - /* 124 */ "MODIFY", - /* 125 */ "RENAME", - /* 126 */ "TAG", - /* 127 */ "SET", - /* 128 */ "NK_EQ", - /* 129 */ "TAGS", - /* 130 */ "BOOL", - /* 131 */ "TINYINT", - /* 132 */ "SMALLINT", - /* 133 */ "INT", - /* 134 */ "INTEGER", - /* 135 */ "BIGINT", - /* 136 */ "FLOAT", - /* 137 */ "DOUBLE", - /* 138 */ "BINARY", - /* 139 */ "NCHAR", - /* 140 */ "UNSIGNED", - /* 141 */ "JSON", - /* 142 */ "VARCHAR", - /* 143 */ "MEDIUMBLOB", - /* 144 */ "BLOB", - /* 145 */ "VARBINARY", - /* 146 */ "GEOMETRY", - /* 147 */ "DECIMAL", - /* 148 */ "COMMENT", - /* 149 */ "MAX_DELAY", - /* 150 */ "WATERMARK", - /* 151 */ "ROLLUP", - /* 152 */ "TTL", - /* 153 */ "SMA", - /* 154 */ "DELETE_MARK", - /* 155 */ "FIRST", - /* 156 */ "LAST", - /* 157 */ "SHOW", - /* 158 */ "FULL", - /* 159 */ "PRIVILEGES", - /* 160 */ "DATABASES", - /* 161 */ "TABLES", - /* 162 */ "STABLES", - /* 163 */ "MNODES", - /* 164 */ "QNODES", - /* 165 */ "ARBGROUPS", - /* 166 */ "FUNCTIONS", - /* 167 */ "INDEXES", - /* 168 */ "ACCOUNTS", - /* 169 */ "APPS", - /* 170 */ "CONNECTIONS", - /* 171 */ "LICENCES", - /* 172 */ "GRANTS", - /* 173 */ "LOGS", - /* 174 */ "MACHINES", - /* 175 */ "ENCRYPTIONS", - /* 176 */ "QUERIES", - /* 177 */ "SCORES", - /* 178 */ "TOPICS", - /* 179 */ "VARIABLES", - /* 180 */ "BNODES", - /* 181 */ "SNODES", - /* 182 */ "TRANSACTIONS", - /* 183 */ "DISTRIBUTED", - /* 184 */ "CONSUMERS", - /* 185 */ "SUBSCRIPTIONS", - /* 186 */ "VNODES", - /* 187 */ "ALIVE", - /* 188 */ "VIEWS", - /* 189 */ "VIEW", - /* 190 */ "COMPACTS", - /* 191 */ "NORMAL", - /* 192 */ "CHILD", - /* 193 */ "LIKE", - /* 194 */ "TBNAME", - /* 195 */ "QTAGS", - /* 196 */ "AS", - /* 197 */ "SYSTEM", - /* 198 */ "TSMA", - /* 199 */ "INTERVAL", - /* 200 */ "RECURSIVE", - /* 201 */ "TSMAS", - /* 202 */ "FUNCTION", - /* 203 */ "INDEX", - /* 204 */ "COUNT", - /* 205 */ "LAST_ROW", - /* 206 */ "META", - /* 207 */ "ONLY", - /* 208 */ "TOPIC", - /* 209 */ "CONSUMER", - /* 210 */ "GROUP", - /* 211 */ "DESC", - /* 212 */ "DESCRIBE", - /* 213 */ "RESET", - /* 214 */ "QUERY", - /* 215 */ "CACHE", - /* 216 */ "EXPLAIN", - /* 217 */ "ANALYZE", - /* 218 */ "VERBOSE", - /* 219 */ "NK_BOOL", - /* 220 */ "RATIO", - /* 221 */ "NK_FLOAT", - /* 222 */ "OUTPUTTYPE", - /* 223 */ "AGGREGATE", - /* 224 */ "BUFSIZE", - /* 225 */ "LANGUAGE", - /* 226 */ "REPLACE", - /* 227 */ "STREAM", - /* 228 */ "INTO", - /* 229 */ "PAUSE", - /* 230 */ "RESUME", - /* 231 */ "PRIMARY", - /* 232 */ "KEY", - /* 233 */ "TRIGGER", - /* 234 */ "AT_ONCE", - /* 235 */ "WINDOW_CLOSE", - /* 236 */ "IGNORE", - /* 237 */ "EXPIRED", - /* 238 */ "FILL_HISTORY", - /* 239 */ "SUBTABLE", - /* 240 */ "UNTREATED", - /* 241 */ "KILL", - /* 242 */ "CONNECTION", - /* 243 */ "TRANSACTION", - /* 244 */ "BALANCE", - /* 245 */ "VGROUP", - /* 246 */ "LEADER", - /* 247 */ "MERGE", - /* 248 */ "REDISTRIBUTE", - /* 249 */ "SPLIT", - /* 250 */ "DELETE", - /* 251 */ "INSERT", - /* 252 */ "NK_BIN", - /* 253 */ "NK_HEX", - /* 254 */ "NULL", - /* 255 */ "NK_QUESTION", - /* 256 */ "NK_ALIAS", - /* 257 */ "NK_ARROW", - /* 258 */ "ROWTS", - /* 259 */ "QSTART", - /* 260 */ "QEND", - /* 261 */ "QDURATION", - /* 262 */ "WSTART", - /* 263 */ "WEND", - /* 264 */ "WDURATION", - /* 265 */ "IROWTS", - /* 266 */ "ISFILLED", - /* 267 */ "FLOW", - /* 268 */ "FHIGH", - /* 269 */ "FROWTS", - /* 270 */ "CAST", - /* 271 */ "POSITION", - /* 272 */ "IN", - /* 273 */ "FOR", - /* 274 */ "NOW", - /* 275 */ "TODAY", - /* 276 */ "RAND", - /* 277 */ "SUBSTR", - /* 278 */ "SUBSTRING", - /* 279 */ "BOTH", - /* 280 */ "TRAILING", - /* 281 */ "LEADING", - /* 282 */ "TIMEZONE", - /* 283 */ "CLIENT_VERSION", - /* 284 */ "SERVER_VERSION", - /* 285 */ "SERVER_STATUS", - /* 286 */ "CURRENT_USER", - /* 287 */ "PI", - /* 288 */ "CASE", - /* 289 */ "WHEN", - /* 290 */ "THEN", - /* 291 */ "ELSE", - /* 292 */ "BETWEEN", - /* 293 */ "IS", - /* 294 */ "NK_LT", - /* 295 */ "NK_GT", - /* 296 */ "NK_LE", - /* 297 */ "NK_GE", - /* 298 */ "NK_NE", - /* 299 */ "MATCH", - /* 300 */ "NMATCH", - /* 301 */ "CONTAINS", - /* 302 */ "JOIN", - /* 303 */ "INNER", - /* 304 */ "LEFT", - /* 305 */ "RIGHT", - /* 306 */ "OUTER", - /* 307 */ "SEMI", - /* 308 */ "ANTI", - /* 309 */ "ASOF", - /* 310 */ "WINDOW", - /* 311 */ "WINDOW_OFFSET", - /* 312 */ "JLIMIT", - /* 313 */ "SELECT", - /* 314 */ "NK_HINT", - /* 315 */ "DISTINCT", - /* 316 */ "WHERE", - /* 317 */ "PARTITION", - /* 318 */ "BY", - /* 319 */ "SESSION", - /* 320 */ "STATE_WINDOW", - /* 321 */ "EVENT_WINDOW", - /* 322 */ "COUNT_WINDOW", - /* 323 */ "ANOMALY_WINDOW", - /* 324 */ "SLIDING", - /* 325 */ "FILL", - /* 326 */ "VALUE", - /* 327 */ "VALUE_F", - /* 328 */ "NONE", - /* 329 */ "PREV", - /* 330 */ "NULL_F", - /* 331 */ "LINEAR", - /* 332 */ "NEXT", - /* 333 */ "HAVING", - /* 334 */ "RANGE", - /* 335 */ "EVERY", - /* 336 */ "ORDER", - /* 337 */ "SLIMIT", - /* 338 */ "SOFFSET", - /* 339 */ "LIMIT", - /* 340 */ "OFFSET", - /* 341 */ "ASC", - /* 342 */ "NULLS", - /* 343 */ "ABORT", - /* 344 */ "AFTER", - /* 345 */ "ATTACH", - /* 346 */ "BEFORE", - /* 347 */ "BEGIN", - /* 348 */ "BITAND", - /* 349 */ "BITNOT", - /* 350 */ "BITOR", - /* 351 */ "BLOCKS", - /* 352 */ "CHANGE", - /* 353 */ "COMMA", - /* 354 */ "CONCAT", - /* 355 */ "CONFLICT", - /* 356 */ "COPY", - /* 357 */ "DEFERRED", - /* 358 */ "DELIMITERS", - /* 359 */ "DETACH", - /* 360 */ "DIVIDE", - /* 361 */ "DOT", - /* 362 */ "EACH", - /* 363 */ "FAIL", - /* 364 */ "GLOB", - /* 365 */ "ID", - /* 366 */ "IMMEDIATE", - /* 367 */ "IMPORT", - /* 368 */ "INITIALLY", - /* 369 */ "INSTEAD", - /* 370 */ "ISNULL", - /* 371 */ "MODULES", - /* 372 */ "NK_BITNOT", - /* 373 */ "NK_SEMI", - /* 374 */ "NOTNULL", - /* 375 */ "OF", - /* 376 */ "PLUS", - /* 377 */ "PRIVILEGE", - /* 378 */ "RAISE", - /* 379 */ "RESTRICT", - /* 380 */ "ROW", - /* 381 */ "STAR", - /* 382 */ "STATEMENT", - /* 383 */ "STRICT", - /* 384 */ "STRING", - /* 385 */ "TIMES", - /* 386 */ "VALUES", - /* 387 */ "VARIABLE", - /* 388 */ "WAL", - /* 389 */ "cmd", - /* 390 */ "account_options", - /* 391 */ "alter_account_options", - /* 392 */ "literal", - /* 393 */ "alter_account_option", - /* 394 */ "ip_range_list", - /* 395 */ "white_list", - /* 396 */ "white_list_opt", - /* 397 */ "is_import_opt", - /* 398 */ "is_createdb_opt", - /* 399 */ "user_name", - /* 400 */ "sysinfo_opt", - /* 401 */ "privileges", - /* 402 */ "priv_level", - /* 403 */ "with_clause_opt", - /* 404 */ "priv_type_list", - /* 405 */ "priv_type", - /* 406 */ "db_name", - /* 407 */ "table_name", - /* 408 */ "topic_name", - /* 409 */ "search_condition", - /* 410 */ "dnode_endpoint", - /* 411 */ "force_opt", - /* 412 */ "unsafe_opt", - /* 413 */ "not_exists_opt", - /* 414 */ "db_options", - /* 415 */ "exists_opt", - /* 416 */ "alter_db_options", - /* 417 */ "speed_opt", - /* 418 */ "start_opt", - /* 419 */ "end_opt", - /* 420 */ "integer_list", - /* 421 */ "variable_list", - /* 422 */ "retention_list", - /* 423 */ "signed", - /* 424 */ "alter_db_option", - /* 425 */ "retention", - /* 426 */ "full_table_name", - /* 427 */ "column_def_list", - /* 428 */ "tags_def_opt", - /* 429 */ "table_options", - /* 430 */ "multi_create_clause", - /* 431 */ "tag_list_opt", - /* 432 */ "tags_def", - /* 433 */ "with_opt", - /* 434 */ "multi_drop_clause", - /* 435 */ "alter_table_clause", - /* 436 */ "alter_table_options", - /* 437 */ "column_name", - /* 438 */ "type_name", - /* 439 */ "column_options", - /* 440 */ "tags_literal", - /* 441 */ "create_subtable_clause", - /* 442 */ "specific_cols_opt", - /* 443 */ "tags_literal_list", - /* 444 */ "drop_table_clause", - /* 445 */ "col_name_list", - /* 446 */ "tag_def_list", - /* 447 */ "tag_def", - /* 448 */ "column_def", - /* 449 */ "type_name_default_len", - /* 450 */ "duration_list", - /* 451 */ "rollup_func_list", - /* 452 */ "alter_table_option", - /* 453 */ "duration_literal", - /* 454 */ "rollup_func_name", - /* 455 */ "function_name", - /* 456 */ "col_name", - /* 457 */ "db_kind_opt", - /* 458 */ "table_kind_db_name_cond_opt", - /* 459 */ "like_pattern_opt", - /* 460 */ "db_name_cond_opt", - /* 461 */ "table_name_cond", - /* 462 */ "from_db_opt", - /* 463 */ "table_kind", - /* 464 */ "tag_item", - /* 465 */ "column_alias", - /* 466 */ "tsma_name", - /* 467 */ "tsma_func_list", - /* 468 */ "full_tsma_name", - /* 469 */ "func_list", - /* 470 */ "index_options", - /* 471 */ "full_index_name", - /* 472 */ "index_name", - /* 473 */ "sliding_opt", - /* 474 */ "sma_stream_opt", - /* 475 */ "func", - /* 476 */ "sma_func_name", - /* 477 */ "expression_list", - /* 478 */ "with_meta", - /* 479 */ "query_or_subquery", - /* 480 */ "where_clause_opt", - /* 481 */ "cgroup_name", - /* 482 */ "analyze_opt", - /* 483 */ "explain_options", - /* 484 */ "insert_query", - /* 485 */ "or_replace_opt", - /* 486 */ "agg_func_opt", - /* 487 */ "bufsize_opt", - /* 488 */ "language_opt", - /* 489 */ "full_view_name", - /* 490 */ "view_name", - /* 491 */ "stream_name", - /* 492 */ "stream_options", - /* 493 */ "col_list_opt", - /* 494 */ "tag_def_or_ref_opt", - /* 495 */ "subtable_opt", - /* 496 */ "ignore_opt", - /* 497 */ "column_stream_def_list", - /* 498 */ "column_stream_def", - /* 499 */ "stream_col_options", - /* 500 */ "expression", - /* 501 */ "on_vgroup_id", - /* 502 */ "dnode_list", - /* 503 */ "literal_func", - /* 504 */ "signed_literal", - /* 505 */ "literal_list", - /* 506 */ "table_alias", - /* 507 */ "expr_or_subquery", - /* 508 */ "pseudo_column", - /* 509 */ "column_reference", - /* 510 */ "function_expression", - /* 511 */ "case_when_expression", - /* 512 */ "star_func", - /* 513 */ "star_func_para_list", - /* 514 */ "trim_specification_type", - /* 515 */ "substr_func", - /* 516 */ "rand_func", - /* 517 */ "noarg_func", - /* 518 */ "other_para_list", - /* 519 */ "star_func_para", - /* 520 */ "when_then_list", - /* 521 */ "case_when_else_opt", - /* 522 */ "common_expression", - /* 523 */ "when_then_expr", - /* 524 */ "predicate", - /* 525 */ "compare_op", - /* 526 */ "in_op", - /* 527 */ "in_predicate_value", - /* 528 */ "boolean_value_expression", - /* 529 */ "boolean_primary", - /* 530 */ "from_clause_opt", - /* 531 */ "table_reference_list", - /* 532 */ "table_reference", - /* 533 */ "table_primary", - /* 534 */ "joined_table", - /* 535 */ "alias_opt", - /* 536 */ "subquery", - /* 537 */ "parenthesized_joined_table", - /* 538 */ "join_type", - /* 539 */ "join_subtype", - /* 540 */ "join_on_clause_opt", - /* 541 */ "window_offset_clause_opt", - /* 542 */ "jlimit_clause_opt", - /* 543 */ "window_offset_literal", - /* 544 */ "query_specification", - /* 545 */ "hint_list", - /* 546 */ "set_quantifier_opt", - /* 547 */ "tag_mode_opt", - /* 548 */ "select_list", - /* 549 */ "partition_by_clause_opt", - /* 550 */ "range_opt", - /* 551 */ "every_opt", - /* 552 */ "fill_opt", - /* 553 */ "twindow_clause_opt", - /* 554 */ "group_by_clause_opt", - /* 555 */ "having_clause_opt", - /* 556 */ "select_item", - /* 557 */ "partition_list", - /* 558 */ "partition_item", - /* 559 */ "interval_sliding_duration_literal", - /* 560 */ "fill_mode", - /* 561 */ "group_by_list", - /* 562 */ "query_expression", - /* 563 */ "query_simple", - /* 564 */ "order_by_clause_opt", - /* 565 */ "slimit_clause_opt", - /* 566 */ "limit_clause_opt", - /* 567 */ "union_query_expression", - /* 568 */ "query_simple_or_subquery", - /* 569 */ "sort_specification_list", - /* 570 */ "sort_specification", - /* 571 */ "ordering_specification_opt", - /* 572 */ "null_ordering_opt", -}; -#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ - -#ifndef NDEBUG -/* For tracing reduce actions, the names of all rules are required. -*/ -static const char *const yyRuleName[] = { - /* 0 */ "cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options", - /* 1 */ "cmd ::= ALTER ACCOUNT NK_ID alter_account_options", - /* 2 */ "account_options ::=", - /* 3 */ "account_options ::= account_options PPS literal", - /* 4 */ "account_options ::= account_options TSERIES literal", - /* 5 */ "account_options ::= account_options STORAGE literal", - /* 6 */ "account_options ::= account_options STREAMS literal", - /* 7 */ "account_options ::= account_options QTIME literal", - /* 8 */ "account_options ::= account_options DBS literal", - /* 9 */ "account_options ::= account_options USERS literal", - /* 10 */ "account_options ::= account_options CONNS literal", - /* 11 */ "account_options ::= account_options STATE literal", - /* 12 */ "alter_account_options ::= alter_account_option", - /* 13 */ "alter_account_options ::= alter_account_options alter_account_option", - /* 14 */ "alter_account_option ::= PASS literal", - /* 15 */ "alter_account_option ::= PPS literal", - /* 16 */ "alter_account_option ::= TSERIES literal", - /* 17 */ "alter_account_option ::= STORAGE literal", - /* 18 */ "alter_account_option ::= STREAMS literal", - /* 19 */ "alter_account_option ::= QTIME literal", - /* 20 */ "alter_account_option ::= DBS literal", - /* 21 */ "alter_account_option ::= USERS literal", - /* 22 */ "alter_account_option ::= CONNS literal", - /* 23 */ "alter_account_option ::= STATE literal", - /* 24 */ "ip_range_list ::= NK_STRING", - /* 25 */ "ip_range_list ::= ip_range_list NK_COMMA NK_STRING", - /* 26 */ "white_list ::= HOST ip_range_list", - /* 27 */ "white_list_opt ::=", - /* 28 */ "white_list_opt ::= white_list", - /* 29 */ "is_import_opt ::=", - /* 30 */ "is_import_opt ::= IS_IMPORT NK_INTEGER", - /* 31 */ "is_createdb_opt ::=", - /* 32 */ "is_createdb_opt ::= CREATEDB NK_INTEGER", - /* 33 */ "cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt is_createdb_opt is_import_opt white_list_opt", - /* 34 */ "cmd ::= ALTER USER user_name PASS NK_STRING", - /* 35 */ "cmd ::= ALTER USER user_name ENABLE NK_INTEGER", - /* 36 */ "cmd ::= ALTER USER user_name SYSINFO NK_INTEGER", - /* 37 */ "cmd ::= ALTER USER user_name CREATEDB NK_INTEGER", - /* 38 */ "cmd ::= ALTER USER user_name ADD white_list", - /* 39 */ "cmd ::= ALTER USER user_name DROP white_list", - /* 40 */ "cmd ::= DROP USER user_name", - /* 41 */ "sysinfo_opt ::=", - /* 42 */ "sysinfo_opt ::= SYSINFO NK_INTEGER", - /* 43 */ "cmd ::= GRANT privileges ON priv_level with_clause_opt TO user_name", - /* 44 */ "cmd ::= REVOKE privileges ON priv_level with_clause_opt FROM user_name", - /* 45 */ "privileges ::= ALL", - /* 46 */ "privileges ::= priv_type_list", - /* 47 */ "privileges ::= SUBSCRIBE", - /* 48 */ "priv_type_list ::= priv_type", - /* 49 */ "priv_type_list ::= priv_type_list NK_COMMA priv_type", - /* 50 */ "priv_type ::= READ", - /* 51 */ "priv_type ::= WRITE", - /* 52 */ "priv_type ::= ALTER", - /* 53 */ "priv_level ::= NK_STAR NK_DOT NK_STAR", - /* 54 */ "priv_level ::= db_name NK_DOT NK_STAR", - /* 55 */ "priv_level ::= db_name NK_DOT table_name", - /* 56 */ "priv_level ::= topic_name", - /* 57 */ "with_clause_opt ::=", - /* 58 */ "with_clause_opt ::= WITH search_condition", - /* 59 */ "cmd ::= CREATE ENCRYPT_KEY NK_STRING", - /* 60 */ "cmd ::= CREATE ANODE NK_STRING", - /* 61 */ "cmd ::= UPDATE ANODE NK_INTEGER", - /* 62 */ "cmd ::= UPDATE ALL ANODES", - /* 63 */ "cmd ::= DROP ANODE NK_INTEGER", - /* 64 */ "cmd ::= CREATE DNODE dnode_endpoint", - /* 65 */ "cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER", - /* 66 */ "cmd ::= DROP DNODE NK_INTEGER force_opt", - /* 67 */ "cmd ::= DROP DNODE dnode_endpoint force_opt", - /* 68 */ "cmd ::= DROP DNODE NK_INTEGER unsafe_opt", - /* 69 */ "cmd ::= DROP DNODE dnode_endpoint unsafe_opt", - /* 70 */ "cmd ::= ALTER DNODE NK_INTEGER NK_STRING", - /* 71 */ "cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING", - /* 72 */ "cmd ::= ALTER ALL DNODES NK_STRING", - /* 73 */ "cmd ::= ALTER ALL DNODES NK_STRING NK_STRING", - /* 74 */ "cmd ::= RESTORE DNODE NK_INTEGER", - /* 75 */ "dnode_endpoint ::= NK_STRING", - /* 76 */ "dnode_endpoint ::= NK_ID", - /* 77 */ "dnode_endpoint ::= NK_IPTOKEN", - /* 78 */ "force_opt ::=", - /* 79 */ "force_opt ::= FORCE", - /* 80 */ "unsafe_opt ::= UNSAFE", - /* 81 */ "cmd ::= ALTER CLUSTER NK_STRING", - /* 82 */ "cmd ::= ALTER CLUSTER NK_STRING NK_STRING", - /* 83 */ "cmd ::= ALTER LOCAL NK_STRING", - /* 84 */ "cmd ::= ALTER LOCAL NK_STRING NK_STRING", - /* 85 */ "cmd ::= CREATE QNODE ON DNODE NK_INTEGER", - /* 86 */ "cmd ::= DROP QNODE ON DNODE NK_INTEGER", - /* 87 */ "cmd ::= RESTORE QNODE ON DNODE NK_INTEGER", - /* 88 */ "cmd ::= CREATE BNODE ON DNODE NK_INTEGER", - /* 89 */ "cmd ::= DROP BNODE ON DNODE NK_INTEGER", - /* 90 */ "cmd ::= CREATE SNODE ON DNODE NK_INTEGER", - /* 91 */ "cmd ::= DROP SNODE ON DNODE NK_INTEGER", - /* 92 */ "cmd ::= CREATE MNODE ON DNODE NK_INTEGER", - /* 93 */ "cmd ::= DROP MNODE ON DNODE NK_INTEGER", - /* 94 */ "cmd ::= RESTORE MNODE ON DNODE NK_INTEGER", - /* 95 */ "cmd ::= RESTORE VNODE ON DNODE NK_INTEGER", - /* 96 */ "cmd ::= CREATE DATABASE not_exists_opt db_name db_options", - /* 97 */ "cmd ::= DROP DATABASE exists_opt db_name", - /* 98 */ "cmd ::= USE db_name", - /* 99 */ "cmd ::= ALTER DATABASE db_name alter_db_options", - /* 100 */ "cmd ::= FLUSH DATABASE db_name", - /* 101 */ "cmd ::= TRIM DATABASE db_name speed_opt", - /* 102 */ "cmd ::= S3MIGRATE DATABASE db_name", - /* 103 */ "cmd ::= COMPACT DATABASE db_name start_opt end_opt", - /* 104 */ "not_exists_opt ::= IF NOT EXISTS", - /* 105 */ "not_exists_opt ::=", - /* 106 */ "exists_opt ::= IF EXISTS", - /* 107 */ "exists_opt ::=", - /* 108 */ "db_options ::=", - /* 109 */ "db_options ::= db_options BUFFER NK_INTEGER", - /* 110 */ "db_options ::= db_options CACHEMODEL NK_STRING", - /* 111 */ "db_options ::= db_options CACHESIZE NK_INTEGER", - /* 112 */ "db_options ::= db_options COMP NK_INTEGER", - /* 113 */ "db_options ::= db_options DURATION NK_INTEGER", - /* 114 */ "db_options ::= db_options DURATION NK_VARIABLE", - /* 115 */ "db_options ::= db_options MAXROWS NK_INTEGER", - /* 116 */ "db_options ::= db_options MINROWS NK_INTEGER", - /* 117 */ "db_options ::= db_options KEEP integer_list", - /* 118 */ "db_options ::= db_options KEEP variable_list", - /* 119 */ "db_options ::= db_options PAGES NK_INTEGER", - /* 120 */ "db_options ::= db_options PAGESIZE NK_INTEGER", - /* 121 */ "db_options ::= db_options TSDB_PAGESIZE NK_INTEGER", - /* 122 */ "db_options ::= db_options PRECISION NK_STRING", - /* 123 */ "db_options ::= db_options REPLICA NK_INTEGER", - /* 124 */ "db_options ::= db_options VGROUPS NK_INTEGER", - /* 125 */ "db_options ::= db_options SINGLE_STABLE NK_INTEGER", - /* 126 */ "db_options ::= db_options RETENTIONS retention_list", - /* 127 */ "db_options ::= db_options SCHEMALESS NK_INTEGER", - /* 128 */ "db_options ::= db_options WAL_LEVEL NK_INTEGER", - /* 129 */ "db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER", - /* 130 */ "db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER", - /* 131 */ "db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER", - /* 132 */ "db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER", - /* 133 */ "db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER", - /* 134 */ "db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER", - /* 135 */ "db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER", - /* 136 */ "db_options ::= db_options STT_TRIGGER NK_INTEGER", - /* 137 */ "db_options ::= db_options TABLE_PREFIX signed", - /* 138 */ "db_options ::= db_options TABLE_SUFFIX signed", - /* 139 */ "db_options ::= db_options S3_CHUNKSIZE NK_INTEGER", - /* 140 */ "db_options ::= db_options S3_KEEPLOCAL NK_INTEGER", - /* 141 */ "db_options ::= db_options S3_KEEPLOCAL NK_VARIABLE", - /* 142 */ "db_options ::= db_options S3_COMPACT NK_INTEGER", - /* 143 */ "db_options ::= db_options KEEP_TIME_OFFSET NK_INTEGER", - /* 144 */ "db_options ::= db_options ENCRYPT_ALGORITHM NK_STRING", - /* 145 */ "alter_db_options ::= alter_db_option", - /* 146 */ "alter_db_options ::= alter_db_options alter_db_option", - /* 147 */ "alter_db_option ::= BUFFER NK_INTEGER", - /* 148 */ "alter_db_option ::= CACHEMODEL NK_STRING", - /* 149 */ "alter_db_option ::= CACHESIZE NK_INTEGER", - /* 150 */ "alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER", - /* 151 */ "alter_db_option ::= KEEP integer_list", - /* 152 */ "alter_db_option ::= KEEP variable_list", - /* 153 */ "alter_db_option ::= PAGES NK_INTEGER", - /* 154 */ "alter_db_option ::= REPLICA NK_INTEGER", - /* 155 */ "alter_db_option ::= WAL_LEVEL NK_INTEGER", - /* 156 */ "alter_db_option ::= STT_TRIGGER NK_INTEGER", - /* 157 */ "alter_db_option ::= MINROWS NK_INTEGER", - /* 158 */ "alter_db_option ::= WAL_RETENTION_PERIOD NK_INTEGER", - /* 159 */ "alter_db_option ::= WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER", - /* 160 */ "alter_db_option ::= WAL_RETENTION_SIZE NK_INTEGER", - /* 161 */ "alter_db_option ::= WAL_RETENTION_SIZE NK_MINUS NK_INTEGER", - /* 162 */ "alter_db_option ::= S3_KEEPLOCAL NK_INTEGER", - /* 163 */ "alter_db_option ::= S3_KEEPLOCAL NK_VARIABLE", - /* 164 */ "alter_db_option ::= S3_COMPACT NK_INTEGER", - /* 165 */ "alter_db_option ::= KEEP_TIME_OFFSET NK_INTEGER", - /* 166 */ "alter_db_option ::= ENCRYPT_ALGORITHM NK_STRING", - /* 167 */ "integer_list ::= NK_INTEGER", - /* 168 */ "integer_list ::= integer_list NK_COMMA NK_INTEGER", - /* 169 */ "variable_list ::= NK_VARIABLE", - /* 170 */ "variable_list ::= variable_list NK_COMMA NK_VARIABLE", - /* 171 */ "retention_list ::= retention", - /* 172 */ "retention_list ::= retention_list NK_COMMA retention", - /* 173 */ "retention ::= NK_VARIABLE NK_COLON NK_VARIABLE", - /* 174 */ "retention ::= NK_MINUS NK_COLON NK_VARIABLE", - /* 175 */ "speed_opt ::=", - /* 176 */ "speed_opt ::= BWLIMIT NK_INTEGER", - /* 177 */ "start_opt ::=", - /* 178 */ "start_opt ::= START WITH NK_INTEGER", - /* 179 */ "start_opt ::= START WITH NK_STRING", - /* 180 */ "start_opt ::= START WITH TIMESTAMP NK_STRING", - /* 181 */ "end_opt ::=", - /* 182 */ "end_opt ::= END WITH NK_INTEGER", - /* 183 */ "end_opt ::= END WITH NK_STRING", - /* 184 */ "end_opt ::= END WITH TIMESTAMP NK_STRING", - /* 185 */ "cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options", - /* 186 */ "cmd ::= CREATE TABLE multi_create_clause", - /* 187 */ "cmd ::= CREATE TABLE not_exists_opt USING full_table_name NK_LP tag_list_opt NK_RP FILE NK_STRING", - /* 188 */ "cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options", - /* 189 */ "cmd ::= DROP TABLE with_opt multi_drop_clause", - /* 190 */ "cmd ::= DROP STABLE with_opt exists_opt full_table_name", - /* 191 */ "cmd ::= ALTER TABLE alter_table_clause", - /* 192 */ "cmd ::= ALTER STABLE alter_table_clause", - /* 193 */ "alter_table_clause ::= full_table_name alter_table_options", - /* 194 */ "alter_table_clause ::= full_table_name ADD COLUMN column_name type_name column_options", - /* 195 */ "alter_table_clause ::= full_table_name DROP COLUMN column_name", - /* 196 */ "alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name", - /* 197 */ "alter_table_clause ::= full_table_name MODIFY COLUMN column_name column_options", - /* 198 */ "alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name", - /* 199 */ "alter_table_clause ::= full_table_name ADD TAG column_name type_name", - /* 200 */ "alter_table_clause ::= full_table_name DROP TAG column_name", - /* 201 */ "alter_table_clause ::= full_table_name MODIFY TAG column_name type_name", - /* 202 */ "alter_table_clause ::= full_table_name RENAME TAG column_name column_name", - /* 203 */ "alter_table_clause ::= full_table_name SET TAG column_name NK_EQ tags_literal", - /* 204 */ "multi_create_clause ::= create_subtable_clause", - /* 205 */ "multi_create_clause ::= multi_create_clause create_subtable_clause", - /* 206 */ "create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP tags_literal_list NK_RP table_options", - /* 207 */ "multi_drop_clause ::= drop_table_clause", - /* 208 */ "multi_drop_clause ::= multi_drop_clause NK_COMMA drop_table_clause", - /* 209 */ "drop_table_clause ::= exists_opt full_table_name", - /* 210 */ "with_opt ::=", - /* 211 */ "with_opt ::= WITH", - /* 212 */ "specific_cols_opt ::=", - /* 213 */ "specific_cols_opt ::= NK_LP col_name_list NK_RP", - /* 214 */ "full_table_name ::= table_name", - /* 215 */ "full_table_name ::= db_name NK_DOT table_name", - /* 216 */ "tag_def_list ::= tag_def", - /* 217 */ "tag_def_list ::= tag_def_list NK_COMMA tag_def", - /* 218 */ "tag_def ::= column_name type_name", - /* 219 */ "column_def_list ::= column_def", - /* 220 */ "column_def_list ::= column_def_list NK_COMMA column_def", - /* 221 */ "column_def ::= column_name type_name column_options", - /* 222 */ "type_name ::= BOOL", - /* 223 */ "type_name ::= TINYINT", - /* 224 */ "type_name ::= SMALLINT", - /* 225 */ "type_name ::= INT", - /* 226 */ "type_name ::= INTEGER", - /* 227 */ "type_name ::= BIGINT", - /* 228 */ "type_name ::= FLOAT", - /* 229 */ "type_name ::= DOUBLE", - /* 230 */ "type_name ::= BINARY NK_LP NK_INTEGER NK_RP", - /* 231 */ "type_name ::= TIMESTAMP", - /* 232 */ "type_name ::= NCHAR NK_LP NK_INTEGER NK_RP", - /* 233 */ "type_name ::= TINYINT UNSIGNED", - /* 234 */ "type_name ::= SMALLINT UNSIGNED", - /* 235 */ "type_name ::= INT UNSIGNED", - /* 236 */ "type_name ::= BIGINT UNSIGNED", - /* 237 */ "type_name ::= JSON", - /* 238 */ "type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP", - /* 239 */ "type_name ::= MEDIUMBLOB", - /* 240 */ "type_name ::= BLOB", - /* 241 */ "type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP", - /* 242 */ "type_name ::= GEOMETRY NK_LP NK_INTEGER NK_RP", - /* 243 */ "type_name ::= DECIMAL", - /* 244 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP", - /* 245 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP", - /* 246 */ "type_name_default_len ::= BINARY", - /* 247 */ "type_name_default_len ::= NCHAR", - /* 248 */ "type_name_default_len ::= VARCHAR", - /* 249 */ "type_name_default_len ::= VARBINARY", - /* 250 */ "tags_def_opt ::=", - /* 251 */ "tags_def_opt ::= tags_def", - /* 252 */ "tags_def ::= TAGS NK_LP tag_def_list NK_RP", - /* 253 */ "table_options ::=", - /* 254 */ "table_options ::= table_options COMMENT NK_STRING", - /* 255 */ "table_options ::= table_options MAX_DELAY duration_list", - /* 256 */ "table_options ::= table_options WATERMARK duration_list", - /* 257 */ "table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP", - /* 258 */ "table_options ::= table_options TTL NK_INTEGER", - /* 259 */ "table_options ::= table_options SMA NK_LP col_name_list NK_RP", - /* 260 */ "table_options ::= table_options DELETE_MARK duration_list", - /* 261 */ "alter_table_options ::= alter_table_option", - /* 262 */ "alter_table_options ::= alter_table_options alter_table_option", - /* 263 */ "alter_table_option ::= COMMENT NK_STRING", - /* 264 */ "alter_table_option ::= TTL NK_INTEGER", - /* 265 */ "duration_list ::= duration_literal", - /* 266 */ "duration_list ::= duration_list NK_COMMA duration_literal", - /* 267 */ "rollup_func_list ::= rollup_func_name", - /* 268 */ "rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name", - /* 269 */ "rollup_func_name ::= function_name", - /* 270 */ "rollup_func_name ::= FIRST", - /* 271 */ "rollup_func_name ::= LAST", - /* 272 */ "col_name_list ::= col_name", - /* 273 */ "col_name_list ::= col_name_list NK_COMMA col_name", - /* 274 */ "col_name ::= column_name", - /* 275 */ "cmd ::= SHOW DNODES", - /* 276 */ "cmd ::= SHOW USERS", - /* 277 */ "cmd ::= SHOW USERS FULL", - /* 278 */ "cmd ::= SHOW USER PRIVILEGES", - /* 279 */ "cmd ::= SHOW db_kind_opt DATABASES", - /* 280 */ "cmd ::= SHOW table_kind_db_name_cond_opt TABLES like_pattern_opt", - /* 281 */ "cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt", - /* 282 */ "cmd ::= SHOW db_name_cond_opt VGROUPS", - /* 283 */ "cmd ::= SHOW MNODES", - /* 284 */ "cmd ::= SHOW QNODES", - /* 285 */ "cmd ::= SHOW ANODES", - /* 286 */ "cmd ::= SHOW ANODES FULL", - /* 287 */ "cmd ::= SHOW ARBGROUPS", - /* 288 */ "cmd ::= SHOW FUNCTIONS", - /* 289 */ "cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt", - /* 290 */ "cmd ::= SHOW INDEXES FROM db_name NK_DOT table_name", - /* 291 */ "cmd ::= SHOW STREAMS", - /* 292 */ "cmd ::= SHOW ACCOUNTS", - /* 293 */ "cmd ::= SHOW APPS", - /* 294 */ "cmd ::= SHOW CONNECTIONS", - /* 295 */ "cmd ::= SHOW LICENCES", - /* 296 */ "cmd ::= SHOW GRANTS", - /* 297 */ "cmd ::= SHOW GRANTS FULL", - /* 298 */ "cmd ::= SHOW GRANTS LOGS", - /* 299 */ "cmd ::= SHOW CLUSTER MACHINES", - /* 300 */ "cmd ::= SHOW CREATE DATABASE db_name", - /* 301 */ "cmd ::= SHOW CREATE TABLE full_table_name", - /* 302 */ "cmd ::= SHOW CREATE STABLE full_table_name", - /* 303 */ "cmd ::= SHOW ENCRYPTIONS", - /* 304 */ "cmd ::= SHOW QUERIES", - /* 305 */ "cmd ::= SHOW SCORES", - /* 306 */ "cmd ::= SHOW TOPICS", - /* 307 */ "cmd ::= SHOW VARIABLES", - /* 308 */ "cmd ::= SHOW CLUSTER VARIABLES", - /* 309 */ "cmd ::= SHOW LOCAL VARIABLES", - /* 310 */ "cmd ::= SHOW DNODE NK_INTEGER VARIABLES like_pattern_opt", - /* 311 */ "cmd ::= SHOW BNODES", - /* 312 */ "cmd ::= SHOW SNODES", - /* 313 */ "cmd ::= SHOW CLUSTER", - /* 314 */ "cmd ::= SHOW TRANSACTIONS", - /* 315 */ "cmd ::= SHOW TABLE DISTRIBUTED full_table_name", - /* 316 */ "cmd ::= SHOW CONSUMERS", - /* 317 */ "cmd ::= SHOW SUBSCRIPTIONS", - /* 318 */ "cmd ::= SHOW TAGS FROM table_name_cond from_db_opt", - /* 319 */ "cmd ::= SHOW TAGS FROM db_name NK_DOT table_name", - /* 320 */ "cmd ::= SHOW TABLE TAGS tag_list_opt FROM table_name_cond from_db_opt", - /* 321 */ "cmd ::= SHOW TABLE TAGS tag_list_opt FROM db_name NK_DOT table_name", - /* 322 */ "cmd ::= SHOW VNODES ON DNODE NK_INTEGER", - /* 323 */ "cmd ::= SHOW VNODES", - /* 324 */ "cmd ::= SHOW db_name_cond_opt ALIVE", - /* 325 */ "cmd ::= SHOW CLUSTER ALIVE", - /* 326 */ "cmd ::= SHOW db_name_cond_opt VIEWS like_pattern_opt", - /* 327 */ "cmd ::= SHOW CREATE VIEW full_table_name", - /* 328 */ "cmd ::= SHOW COMPACTS", - /* 329 */ "cmd ::= SHOW COMPACT NK_INTEGER", - /* 330 */ "table_kind_db_name_cond_opt ::=", - /* 331 */ "table_kind_db_name_cond_opt ::= table_kind", - /* 332 */ "table_kind_db_name_cond_opt ::= db_name NK_DOT", - /* 333 */ "table_kind_db_name_cond_opt ::= table_kind db_name NK_DOT", - /* 334 */ "table_kind ::= NORMAL", - /* 335 */ "table_kind ::= CHILD", - /* 336 */ "db_name_cond_opt ::=", - /* 337 */ "db_name_cond_opt ::= db_name NK_DOT", - /* 338 */ "like_pattern_opt ::=", - /* 339 */ "like_pattern_opt ::= LIKE NK_STRING", - /* 340 */ "table_name_cond ::= table_name", - /* 341 */ "from_db_opt ::=", - /* 342 */ "from_db_opt ::= FROM db_name", - /* 343 */ "tag_list_opt ::=", - /* 344 */ "tag_list_opt ::= tag_item", - /* 345 */ "tag_list_opt ::= tag_list_opt NK_COMMA tag_item", - /* 346 */ "tag_item ::= TBNAME", - /* 347 */ "tag_item ::= QTAGS", - /* 348 */ "tag_item ::= column_name", - /* 349 */ "tag_item ::= column_name column_alias", - /* 350 */ "tag_item ::= column_name AS column_alias", - /* 351 */ "db_kind_opt ::=", - /* 352 */ "db_kind_opt ::= USER", - /* 353 */ "db_kind_opt ::= SYSTEM", - /* 354 */ "cmd ::= CREATE TSMA not_exists_opt tsma_name ON full_table_name tsma_func_list INTERVAL NK_LP duration_literal NK_RP", - /* 355 */ "cmd ::= CREATE RECURSIVE TSMA not_exists_opt tsma_name ON full_table_name INTERVAL NK_LP duration_literal NK_RP", - /* 356 */ "cmd ::= DROP TSMA exists_opt full_tsma_name", - /* 357 */ "cmd ::= SHOW db_name_cond_opt TSMAS", - /* 358 */ "full_tsma_name ::= tsma_name", - /* 359 */ "full_tsma_name ::= db_name NK_DOT tsma_name", - /* 360 */ "tsma_func_list ::= FUNCTION NK_LP func_list NK_RP", - /* 361 */ "cmd ::= CREATE SMA INDEX not_exists_opt col_name ON full_table_name index_options", - /* 362 */ "cmd ::= CREATE INDEX not_exists_opt col_name ON full_table_name NK_LP col_name_list NK_RP", - /* 363 */ "cmd ::= DROP INDEX exists_opt full_index_name", - /* 364 */ "full_index_name ::= index_name", - /* 365 */ "full_index_name ::= db_name NK_DOT index_name", - /* 366 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt", - /* 367 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt", - /* 368 */ "func_list ::= func", - /* 369 */ "func_list ::= func_list NK_COMMA func", - /* 370 */ "func ::= sma_func_name NK_LP expression_list NK_RP", - /* 371 */ "sma_func_name ::= function_name", - /* 372 */ "sma_func_name ::= COUNT", - /* 373 */ "sma_func_name ::= FIRST", - /* 374 */ "sma_func_name ::= LAST", - /* 375 */ "sma_func_name ::= LAST_ROW", - /* 376 */ "sma_stream_opt ::=", - /* 377 */ "sma_stream_opt ::= sma_stream_opt WATERMARK duration_literal", - /* 378 */ "sma_stream_opt ::= sma_stream_opt MAX_DELAY duration_literal", - /* 379 */ "sma_stream_opt ::= sma_stream_opt DELETE_MARK duration_literal", - /* 380 */ "with_meta ::= AS", - /* 381 */ "with_meta ::= WITH META AS", - /* 382 */ "with_meta ::= ONLY META AS", - /* 383 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery", - /* 384 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name with_meta DATABASE db_name", - /* 385 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name with_meta STABLE full_table_name where_clause_opt", - /* 386 */ "cmd ::= DROP TOPIC exists_opt topic_name", - /* 387 */ "cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name", - /* 388 */ "cmd ::= DESC full_table_name", - /* 389 */ "cmd ::= DESCRIBE full_table_name", - /* 390 */ "cmd ::= RESET QUERY CACHE", - /* 391 */ "cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery", - /* 392 */ "cmd ::= EXPLAIN analyze_opt explain_options insert_query", - /* 393 */ "analyze_opt ::=", - /* 394 */ "analyze_opt ::= ANALYZE", - /* 395 */ "explain_options ::=", - /* 396 */ "explain_options ::= explain_options VERBOSE NK_BOOL", - /* 397 */ "explain_options ::= explain_options RATIO NK_FLOAT", - /* 398 */ "cmd ::= CREATE or_replace_opt agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt language_opt", - /* 399 */ "cmd ::= DROP FUNCTION exists_opt function_name", - /* 400 */ "agg_func_opt ::=", - /* 401 */ "agg_func_opt ::= AGGREGATE", - /* 402 */ "bufsize_opt ::=", - /* 403 */ "bufsize_opt ::= BUFSIZE NK_INTEGER", - /* 404 */ "language_opt ::=", - /* 405 */ "language_opt ::= LANGUAGE NK_STRING", - /* 406 */ "or_replace_opt ::=", - /* 407 */ "or_replace_opt ::= OR REPLACE", - /* 408 */ "cmd ::= CREATE or_replace_opt VIEW full_view_name AS query_or_subquery", - /* 409 */ "cmd ::= DROP VIEW exists_opt full_view_name", - /* 410 */ "full_view_name ::= view_name", - /* 411 */ "full_view_name ::= db_name NK_DOT view_name", - /* 412 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name col_list_opt tag_def_or_ref_opt subtable_opt AS query_or_subquery", - /* 413 */ "cmd ::= DROP STREAM exists_opt stream_name", - /* 414 */ "cmd ::= PAUSE STREAM exists_opt stream_name", - /* 415 */ "cmd ::= RESUME STREAM exists_opt ignore_opt stream_name", - /* 416 */ "col_list_opt ::=", - /* 417 */ "col_list_opt ::= NK_LP column_stream_def_list NK_RP", - /* 418 */ "column_stream_def_list ::= column_stream_def", - /* 419 */ "column_stream_def_list ::= column_stream_def_list NK_COMMA column_stream_def", - /* 420 */ "column_stream_def ::= column_name stream_col_options", - /* 421 */ "stream_col_options ::=", - /* 422 */ "stream_col_options ::= stream_col_options PRIMARY KEY", - /* 423 */ "tag_def_or_ref_opt ::=", - /* 424 */ "tag_def_or_ref_opt ::= tags_def", - /* 425 */ "tag_def_or_ref_opt ::= TAGS NK_LP column_stream_def_list NK_RP", - /* 426 */ "stream_options ::=", - /* 427 */ "stream_options ::= stream_options TRIGGER AT_ONCE", - /* 428 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE", - /* 429 */ "stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal", - /* 430 */ "stream_options ::= stream_options WATERMARK duration_literal", - /* 431 */ "stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER", - /* 432 */ "stream_options ::= stream_options FILL_HISTORY NK_INTEGER", - /* 433 */ "stream_options ::= stream_options DELETE_MARK duration_literal", - /* 434 */ "stream_options ::= stream_options IGNORE UPDATE NK_INTEGER", - /* 435 */ "subtable_opt ::=", - /* 436 */ "subtable_opt ::= SUBTABLE NK_LP expression NK_RP", - /* 437 */ "ignore_opt ::=", - /* 438 */ "ignore_opt ::= IGNORE UNTREATED", - /* 439 */ "cmd ::= KILL CONNECTION NK_INTEGER", - /* 440 */ "cmd ::= KILL QUERY NK_STRING", - /* 441 */ "cmd ::= KILL TRANSACTION NK_INTEGER", - /* 442 */ "cmd ::= KILL COMPACT NK_INTEGER", - /* 443 */ "cmd ::= BALANCE VGROUP", - /* 444 */ "cmd ::= BALANCE VGROUP LEADER on_vgroup_id", - /* 445 */ "cmd ::= BALANCE VGROUP LEADER DATABASE db_name", - /* 446 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", - /* 447 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", - /* 448 */ "cmd ::= SPLIT VGROUP NK_INTEGER", - /* 449 */ "on_vgroup_id ::=", - /* 450 */ "on_vgroup_id ::= ON NK_INTEGER", - /* 451 */ "dnode_list ::= DNODE NK_INTEGER", - /* 452 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", - /* 453 */ "cmd ::= DELETE FROM full_table_name where_clause_opt", - /* 454 */ "cmd ::= query_or_subquery", - /* 455 */ "cmd ::= insert_query", - /* 456 */ "insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery", - /* 457 */ "insert_query ::= INSERT INTO full_table_name query_or_subquery", - /* 458 */ "tags_literal ::= NK_INTEGER", - /* 459 */ "tags_literal ::= NK_INTEGER NK_PLUS duration_literal", - /* 460 */ "tags_literal ::= NK_INTEGER NK_MINUS duration_literal", - /* 461 */ "tags_literal ::= NK_PLUS NK_INTEGER", - /* 462 */ "tags_literal ::= NK_PLUS NK_INTEGER NK_PLUS duration_literal", - /* 463 */ "tags_literal ::= NK_PLUS NK_INTEGER NK_MINUS duration_literal", - /* 464 */ "tags_literal ::= NK_MINUS NK_INTEGER", - /* 465 */ "tags_literal ::= NK_MINUS NK_INTEGER NK_PLUS duration_literal", - /* 466 */ "tags_literal ::= NK_MINUS NK_INTEGER NK_MINUS duration_literal", - /* 467 */ "tags_literal ::= NK_FLOAT", - /* 468 */ "tags_literal ::= NK_PLUS NK_FLOAT", - /* 469 */ "tags_literal ::= NK_MINUS NK_FLOAT", - /* 470 */ "tags_literal ::= NK_BIN", - /* 471 */ "tags_literal ::= NK_BIN NK_PLUS duration_literal", - /* 472 */ "tags_literal ::= NK_BIN NK_MINUS duration_literal", - /* 473 */ "tags_literal ::= NK_PLUS NK_BIN", - /* 474 */ "tags_literal ::= NK_PLUS NK_BIN NK_PLUS duration_literal", - /* 475 */ "tags_literal ::= NK_PLUS NK_BIN NK_MINUS duration_literal", - /* 476 */ "tags_literal ::= NK_MINUS NK_BIN", - /* 477 */ "tags_literal ::= NK_MINUS NK_BIN NK_PLUS duration_literal", - /* 478 */ "tags_literal ::= NK_MINUS NK_BIN NK_MINUS duration_literal", - /* 479 */ "tags_literal ::= NK_HEX", - /* 480 */ "tags_literal ::= NK_HEX NK_PLUS duration_literal", - /* 481 */ "tags_literal ::= NK_HEX NK_MINUS duration_literal", - /* 482 */ "tags_literal ::= NK_PLUS NK_HEX", - /* 483 */ "tags_literal ::= NK_PLUS NK_HEX NK_PLUS duration_literal", - /* 484 */ "tags_literal ::= NK_PLUS NK_HEX NK_MINUS duration_literal", - /* 485 */ "tags_literal ::= NK_MINUS NK_HEX", - /* 486 */ "tags_literal ::= NK_MINUS NK_HEX NK_PLUS duration_literal", - /* 487 */ "tags_literal ::= NK_MINUS NK_HEX NK_MINUS duration_literal", - /* 488 */ "tags_literal ::= NK_STRING", - /* 489 */ "tags_literal ::= NK_STRING NK_PLUS duration_literal", - /* 490 */ "tags_literal ::= NK_STRING NK_MINUS duration_literal", - /* 491 */ "tags_literal ::= NK_BOOL", - /* 492 */ "tags_literal ::= NULL", - /* 493 */ "tags_literal ::= literal_func", - /* 494 */ "tags_literal ::= literal_func NK_PLUS duration_literal", - /* 495 */ "tags_literal ::= literal_func NK_MINUS duration_literal", - /* 496 */ "tags_literal_list ::= tags_literal", - /* 497 */ "tags_literal_list ::= tags_literal_list NK_COMMA tags_literal", - /* 498 */ "literal ::= NK_INTEGER", - /* 499 */ "literal ::= NK_FLOAT", - /* 500 */ "literal ::= NK_STRING", - /* 501 */ "literal ::= NK_BOOL", - /* 502 */ "literal ::= TIMESTAMP NK_STRING", - /* 503 */ "literal ::= duration_literal", - /* 504 */ "literal ::= NULL", - /* 505 */ "literal ::= NK_QUESTION", - /* 506 */ "duration_literal ::= NK_VARIABLE", - /* 507 */ "signed ::= NK_INTEGER", - /* 508 */ "signed ::= NK_PLUS NK_INTEGER", - /* 509 */ "signed ::= NK_MINUS NK_INTEGER", - /* 510 */ "signed ::= NK_FLOAT", - /* 511 */ "signed ::= NK_PLUS NK_FLOAT", - /* 512 */ "signed ::= NK_MINUS NK_FLOAT", - /* 513 */ "signed_literal ::= signed", - /* 514 */ "signed_literal ::= NK_STRING", - /* 515 */ "signed_literal ::= NK_BOOL", - /* 516 */ "signed_literal ::= TIMESTAMP NK_STRING", - /* 517 */ "signed_literal ::= duration_literal", - /* 518 */ "signed_literal ::= NULL", - /* 519 */ "signed_literal ::= literal_func", - /* 520 */ "signed_literal ::= NK_QUESTION", - /* 521 */ "literal_list ::= signed_literal", - /* 522 */ "literal_list ::= literal_list NK_COMMA signed_literal", - /* 523 */ "db_name ::= NK_ID", - /* 524 */ "table_name ::= NK_ID", - /* 525 */ "column_name ::= NK_ID", - /* 526 */ "function_name ::= NK_ID", - /* 527 */ "view_name ::= NK_ID", - /* 528 */ "table_alias ::= NK_ID", - /* 529 */ "column_alias ::= NK_ID", - /* 530 */ "column_alias ::= NK_ALIAS", - /* 531 */ "user_name ::= NK_ID", - /* 532 */ "topic_name ::= NK_ID", - /* 533 */ "stream_name ::= NK_ID", - /* 534 */ "cgroup_name ::= NK_ID", - /* 535 */ "index_name ::= NK_ID", - /* 536 */ "tsma_name ::= NK_ID", - /* 537 */ "expr_or_subquery ::= expression", - /* 538 */ "expression ::= literal", - /* 539 */ "expression ::= pseudo_column", - /* 540 */ "expression ::= column_reference", - /* 541 */ "expression ::= function_expression", - /* 542 */ "expression ::= case_when_expression", - /* 543 */ "expression ::= NK_LP expression NK_RP", - /* 544 */ "expression ::= NK_PLUS expr_or_subquery", - /* 545 */ "expression ::= NK_MINUS expr_or_subquery", - /* 546 */ "expression ::= expr_or_subquery NK_PLUS expr_or_subquery", - /* 547 */ "expression ::= expr_or_subquery NK_MINUS expr_or_subquery", - /* 548 */ "expression ::= expr_or_subquery NK_STAR expr_or_subquery", - /* 549 */ "expression ::= expr_or_subquery NK_SLASH expr_or_subquery", - /* 550 */ "expression ::= expr_or_subquery NK_REM expr_or_subquery", - /* 551 */ "expression ::= column_reference NK_ARROW NK_STRING", - /* 552 */ "expression ::= expr_or_subquery NK_BITAND expr_or_subquery", - /* 553 */ "expression ::= expr_or_subquery NK_BITOR expr_or_subquery", - /* 554 */ "expression_list ::= expr_or_subquery", - /* 555 */ "expression_list ::= expression_list NK_COMMA expr_or_subquery", - /* 556 */ "column_reference ::= column_name", - /* 557 */ "column_reference ::= table_name NK_DOT column_name", - /* 558 */ "column_reference ::= NK_ALIAS", - /* 559 */ "column_reference ::= table_name NK_DOT NK_ALIAS", - /* 560 */ "pseudo_column ::= ROWTS", - /* 561 */ "pseudo_column ::= TBNAME", - /* 562 */ "pseudo_column ::= table_name NK_DOT TBNAME", - /* 563 */ "pseudo_column ::= QSTART", - /* 564 */ "pseudo_column ::= QEND", - /* 565 */ "pseudo_column ::= QDURATION", - /* 566 */ "pseudo_column ::= WSTART", - /* 567 */ "pseudo_column ::= WEND", - /* 568 */ "pseudo_column ::= WDURATION", - /* 569 */ "pseudo_column ::= IROWTS", - /* 570 */ "pseudo_column ::= ISFILLED", - /* 571 */ "pseudo_column ::= QTAGS", - /* 572 */ "pseudo_column ::= FLOW", - /* 573 */ "pseudo_column ::= FHIGH", - /* 574 */ "pseudo_column ::= FROWTS", - /* 575 */ "function_expression ::= function_name NK_LP expression_list NK_RP", - /* 576 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", - /* 577 */ "function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP", - /* 578 */ "function_expression ::= CAST NK_LP expr_or_subquery AS type_name_default_len NK_RP", - /* 579 */ "function_expression ::= POSITION NK_LP expr_or_subquery IN expr_or_subquery NK_RP", - /* 580 */ "function_expression ::= TRIM NK_LP expr_or_subquery NK_RP", - /* 581 */ "function_expression ::= TRIM NK_LP trim_specification_type FROM expr_or_subquery NK_RP", - /* 582 */ "function_expression ::= TRIM NK_LP expr_or_subquery FROM expr_or_subquery NK_RP", - /* 583 */ "function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP", - /* 584 */ "function_expression ::= substr_func NK_LP expression_list NK_RP", - /* 585 */ "function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP", - /* 586 */ "function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP", - /* 587 */ "function_expression ::= REPLACE NK_LP expression_list NK_RP", - /* 588 */ "function_expression ::= literal_func", - /* 589 */ "function_expression ::= rand_func", - /* 590 */ "literal_func ::= noarg_func NK_LP NK_RP", - /* 591 */ "literal_func ::= NOW", - /* 592 */ "literal_func ::= TODAY", - /* 593 */ "rand_func ::= RAND NK_LP NK_RP", - /* 594 */ "rand_func ::= RAND NK_LP expression_list NK_RP", - /* 595 */ "substr_func ::= SUBSTR", - /* 596 */ "substr_func ::= SUBSTRING", - /* 597 */ "trim_specification_type ::= BOTH", - /* 598 */ "trim_specification_type ::= TRAILING", - /* 599 */ "trim_specification_type ::= LEADING", - /* 600 */ "noarg_func ::= NOW", - /* 601 */ "noarg_func ::= TODAY", - /* 602 */ "noarg_func ::= TIMEZONE", - /* 603 */ "noarg_func ::= DATABASE", - /* 604 */ "noarg_func ::= CLIENT_VERSION", - /* 605 */ "noarg_func ::= SERVER_VERSION", - /* 606 */ "noarg_func ::= SERVER_STATUS", - /* 607 */ "noarg_func ::= CURRENT_USER", - /* 608 */ "noarg_func ::= USER", - /* 609 */ "noarg_func ::= PI", - /* 610 */ "star_func ::= COUNT", - /* 611 */ "star_func ::= FIRST", - /* 612 */ "star_func ::= LAST", - /* 613 */ "star_func ::= LAST_ROW", - /* 614 */ "star_func_para_list ::= NK_STAR", - /* 615 */ "star_func_para_list ::= other_para_list", - /* 616 */ "other_para_list ::= star_func_para", - /* 617 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", - /* 618 */ "star_func_para ::= expr_or_subquery", - /* 619 */ "star_func_para ::= table_name NK_DOT NK_STAR", - /* 620 */ "case_when_expression ::= CASE when_then_list case_when_else_opt END", - /* 621 */ "case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END", - /* 622 */ "when_then_list ::= when_then_expr", - /* 623 */ "when_then_list ::= when_then_list when_then_expr", - /* 624 */ "when_then_expr ::= WHEN common_expression THEN common_expression", - /* 625 */ "case_when_else_opt ::=", - /* 626 */ "case_when_else_opt ::= ELSE common_expression", - /* 627 */ "predicate ::= expr_or_subquery compare_op expr_or_subquery", - /* 628 */ "predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery", - /* 629 */ "predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery", - /* 630 */ "predicate ::= expr_or_subquery IS NULL", - /* 631 */ "predicate ::= expr_or_subquery IS NOT NULL", - /* 632 */ "predicate ::= expr_or_subquery in_op in_predicate_value", - /* 633 */ "compare_op ::= NK_LT", - /* 634 */ "compare_op ::= NK_GT", - /* 635 */ "compare_op ::= NK_LE", - /* 636 */ "compare_op ::= NK_GE", - /* 637 */ "compare_op ::= NK_NE", - /* 638 */ "compare_op ::= NK_EQ", - /* 639 */ "compare_op ::= LIKE", - /* 640 */ "compare_op ::= NOT LIKE", - /* 641 */ "compare_op ::= MATCH", - /* 642 */ "compare_op ::= NMATCH", - /* 643 */ "compare_op ::= CONTAINS", - /* 644 */ "in_op ::= IN", - /* 645 */ "in_op ::= NOT IN", - /* 646 */ "in_predicate_value ::= NK_LP literal_list NK_RP", - /* 647 */ "boolean_value_expression ::= boolean_primary", - /* 648 */ "boolean_value_expression ::= NOT boolean_primary", - /* 649 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", - /* 650 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", - /* 651 */ "boolean_primary ::= predicate", - /* 652 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", - /* 653 */ "common_expression ::= expr_or_subquery", - /* 654 */ "common_expression ::= boolean_value_expression", - /* 655 */ "from_clause_opt ::=", - /* 656 */ "from_clause_opt ::= FROM table_reference_list", - /* 657 */ "table_reference_list ::= table_reference", - /* 658 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", - /* 659 */ "table_reference ::= table_primary", - /* 660 */ "table_reference ::= joined_table", - /* 661 */ "table_primary ::= table_name alias_opt", - /* 662 */ "table_primary ::= db_name NK_DOT table_name alias_opt", - /* 663 */ "table_primary ::= subquery alias_opt", - /* 664 */ "table_primary ::= parenthesized_joined_table", - /* 665 */ "alias_opt ::=", - /* 666 */ "alias_opt ::= table_alias", - /* 667 */ "alias_opt ::= AS table_alias", - /* 668 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", - /* 669 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", - /* 670 */ "joined_table ::= table_reference join_type join_subtype JOIN table_reference join_on_clause_opt window_offset_clause_opt jlimit_clause_opt", - /* 671 */ "join_type ::=", - /* 672 */ "join_type ::= INNER", - /* 673 */ "join_type ::= LEFT", - /* 674 */ "join_type ::= RIGHT", - /* 675 */ "join_type ::= FULL", - /* 676 */ "join_subtype ::=", - /* 677 */ "join_subtype ::= OUTER", - /* 678 */ "join_subtype ::= SEMI", - /* 679 */ "join_subtype ::= ANTI", - /* 680 */ "join_subtype ::= ASOF", - /* 681 */ "join_subtype ::= WINDOW", - /* 682 */ "join_on_clause_opt ::=", - /* 683 */ "join_on_clause_opt ::= ON search_condition", - /* 684 */ "window_offset_clause_opt ::=", - /* 685 */ "window_offset_clause_opt ::= WINDOW_OFFSET NK_LP window_offset_literal NK_COMMA window_offset_literal NK_RP", - /* 686 */ "window_offset_literal ::= NK_VARIABLE", - /* 687 */ "window_offset_literal ::= NK_MINUS NK_VARIABLE", - /* 688 */ "jlimit_clause_opt ::=", - /* 689 */ "jlimit_clause_opt ::= JLIMIT NK_INTEGER", - /* 690 */ "query_specification ::= SELECT hint_list set_quantifier_opt tag_mode_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt", - /* 691 */ "hint_list ::=", - /* 692 */ "hint_list ::= NK_HINT", - /* 693 */ "tag_mode_opt ::=", - /* 694 */ "tag_mode_opt ::= TAGS", - /* 695 */ "set_quantifier_opt ::=", - /* 696 */ "set_quantifier_opt ::= DISTINCT", - /* 697 */ "set_quantifier_opt ::= ALL", - /* 698 */ "select_list ::= select_item", - /* 699 */ "select_list ::= select_list NK_COMMA select_item", - /* 700 */ "select_item ::= NK_STAR", - /* 701 */ "select_item ::= common_expression", - /* 702 */ "select_item ::= common_expression column_alias", - /* 703 */ "select_item ::= common_expression AS column_alias", - /* 704 */ "select_item ::= table_name NK_DOT NK_STAR", - /* 705 */ "where_clause_opt ::=", - /* 706 */ "where_clause_opt ::= WHERE search_condition", - /* 707 */ "partition_by_clause_opt ::=", - /* 708 */ "partition_by_clause_opt ::= PARTITION BY partition_list", - /* 709 */ "partition_list ::= partition_item", - /* 710 */ "partition_list ::= partition_list NK_COMMA partition_item", - /* 711 */ "partition_item ::= expr_or_subquery", - /* 712 */ "partition_item ::= expr_or_subquery column_alias", - /* 713 */ "partition_item ::= expr_or_subquery AS column_alias", - /* 714 */ "twindow_clause_opt ::=", - /* 715 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA interval_sliding_duration_literal NK_RP", - /* 716 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP", - /* 717 */ "twindow_clause_opt ::= INTERVAL NK_LP interval_sliding_duration_literal NK_RP sliding_opt fill_opt", - /* 718 */ "twindow_clause_opt ::= INTERVAL NK_LP interval_sliding_duration_literal NK_COMMA interval_sliding_duration_literal NK_RP sliding_opt fill_opt", - /* 719 */ "twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition", - /* 720 */ "twindow_clause_opt ::= COUNT_WINDOW NK_LP NK_INTEGER NK_RP", - /* 721 */ "twindow_clause_opt ::= COUNT_WINDOW NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP", - /* 722 */ "twindow_clause_opt ::= ANOMALY_WINDOW NK_LP expr_or_subquery NK_RP", - /* 723 */ "twindow_clause_opt ::= ANOMALY_WINDOW NK_LP expr_or_subquery NK_COMMA NK_STRING NK_RP", - /* 724 */ "sliding_opt ::=", - /* 725 */ "sliding_opt ::= SLIDING NK_LP interval_sliding_duration_literal NK_RP", - /* 726 */ "interval_sliding_duration_literal ::= NK_VARIABLE", - /* 727 */ "interval_sliding_duration_literal ::= NK_STRING", - /* 728 */ "interval_sliding_duration_literal ::= NK_INTEGER", - /* 729 */ "fill_opt ::=", - /* 730 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", - /* 731 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA expression_list NK_RP", - /* 732 */ "fill_opt ::= FILL NK_LP VALUE_F NK_COMMA expression_list NK_RP", - /* 733 */ "fill_mode ::= NONE", - /* 734 */ "fill_mode ::= PREV", - /* 735 */ "fill_mode ::= NULL", - /* 736 */ "fill_mode ::= NULL_F", - /* 737 */ "fill_mode ::= LINEAR", - /* 738 */ "fill_mode ::= NEXT", - /* 739 */ "group_by_clause_opt ::=", - /* 740 */ "group_by_clause_opt ::= GROUP BY group_by_list", - /* 741 */ "group_by_list ::= expr_or_subquery", - /* 742 */ "group_by_list ::= group_by_list NK_COMMA expr_or_subquery", - /* 743 */ "having_clause_opt ::=", - /* 744 */ "having_clause_opt ::= HAVING search_condition", - /* 745 */ "range_opt ::=", - /* 746 */ "range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP", - /* 747 */ "range_opt ::= RANGE NK_LP expr_or_subquery NK_RP", - /* 748 */ "every_opt ::=", - /* 749 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP", - /* 750 */ "query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt", - /* 751 */ "query_simple ::= query_specification", - /* 752 */ "query_simple ::= union_query_expression", - /* 753 */ "union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery", - /* 754 */ "union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery", - /* 755 */ "query_simple_or_subquery ::= query_simple", - /* 756 */ "query_simple_or_subquery ::= subquery", - /* 757 */ "query_or_subquery ::= query_expression", - /* 758 */ "query_or_subquery ::= subquery", - /* 759 */ "order_by_clause_opt ::=", - /* 760 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", - /* 761 */ "slimit_clause_opt ::=", - /* 762 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", - /* 763 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", - /* 764 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 765 */ "limit_clause_opt ::=", - /* 766 */ "limit_clause_opt ::= LIMIT NK_INTEGER", - /* 767 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", - /* 768 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 769 */ "subquery ::= NK_LP query_expression NK_RP", - /* 770 */ "subquery ::= NK_LP subquery NK_RP", - /* 771 */ "search_condition ::= common_expression", - /* 772 */ "sort_specification_list ::= sort_specification", - /* 773 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", - /* 774 */ "sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt", - /* 775 */ "ordering_specification_opt ::=", - /* 776 */ "ordering_specification_opt ::= ASC", - /* 777 */ "ordering_specification_opt ::= DESC", - /* 778 */ "null_ordering_opt ::=", - /* 779 */ "null_ordering_opt ::= NULLS FIRST", - /* 780 */ "null_ordering_opt ::= NULLS LAST", - /* 781 */ "column_options ::=", - /* 782 */ "column_options ::= column_options PRIMARY KEY", - /* 783 */ "column_options ::= column_options NK_ID NK_STRING", -}; -#endif /* NDEBUG */ - - -#if YYSTACKDEPTH<=0 -/* -** Try to increase the size of the parser stack. Return the number -** of errors. Return 0 on success. -*/ -static int yyGrowStack(yyParser *p){ - int newSize; - int idx; - yyStackEntry *pNew; - - newSize = p->yystksz*2 + 100; - idx = p->yytos ? (int)(p->yytos - p->yystack) : 0; - if( p->yystack==&p->yystk0 ){ - pNew = malloc(newSize*sizeof(pNew[0])); - if( pNew ) pNew[0] = p->yystk0; - }else{ - pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); - } - if( pNew ){ - p->yystack = pNew; - p->yytos = &p->yystack[idx]; -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", - yyTracePrompt, p->yystksz, newSize); - } -#endif - p->yystksz = newSize; - } - return pNew==0; -} -#endif - -/* Datatype of the argument to the memory allocated passed as the -** second argument to ParseAlloc() below. This can be changed by -** putting an appropriate #define in the %include section of the input -** grammar. -*/ -#ifndef YYMALLOCARGTYPE -# define YYMALLOCARGTYPE size_t -#endif - -/* Initialize a new parser that has already been allocated. -*/ -void ParseInit(void *yypRawParser ParseCTX_PDECL){ - yyParser *yypParser = (yyParser*)yypRawParser; - ParseCTX_STORE -#ifdef YYTRACKMAXSTACKDEPTH - yypParser->yyhwm = 0; -#endif -#if YYSTACKDEPTH<=0 - yypParser->yytos = NULL; - yypParser->yystack = NULL; - yypParser->yystksz = 0; - if( yyGrowStack(yypParser) ){ - yypParser->yystack = &yypParser->yystk0; - yypParser->yystksz = 1; - } -#endif -#ifndef YYNOERRORRECOVERY - yypParser->yyerrcnt = -1; -#endif - yypParser->yytos = yypParser->yystack; - yypParser->yystack[0].stateno = 0; - yypParser->yystack[0].major = 0; -#if YYSTACKDEPTH>0 - yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; -#endif -} - -#ifndef Parse_ENGINEALWAYSONSTACK -/* -** This function allocates a new parser. -** The only argument is a pointer to a function which works like -** malloc. -** -** Inputs: -** A pointer to the function used to allocate memory. -** -** Outputs: -** A pointer to a parser. This pointer is used in subsequent calls -** to Parse and ParseFree. -*/ -void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE) ParseCTX_PDECL){ - yyParser *yypParser; - yypParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); - if( yypParser ){ - ParseCTX_STORE - ParseInit(yypParser ParseCTX_PARAM); - } - return (void*)yypParser; -} -#endif /* Parse_ENGINEALWAYSONSTACK */ - - -/* The following function deletes the "minor type" or semantic value -** associated with a symbol. The symbol can be either a terminal -** or nonterminal. "yymajor" is the symbol code, and "yypminor" is -** a pointer to the value to be deleted. The code used to do the -** deletions is derived from the %destructor and/or %token_destructor -** directives of the input grammar. -*/ -static void yy_destructor( - yyParser *yypParser, /* The parser */ - YYCODETYPE yymajor, /* Type code for object to destroy */ - YYMINORTYPE *yypminor /* The object to be destroyed */ -){ - ParseARG_FETCH - ParseCTX_FETCH - switch( yymajor ){ - /* Here is inserted the actions which take place when a - ** terminal or non-terminal is destroyed. This can happen - ** when the symbol is popped from the stack during a - ** reduce or during error processing or when a parser is - ** being destroyed before it is finished parsing. - ** - ** Note: during a reduce, the only symbols destroyed are those - ** which appear on the RHS of the rule, but which are *not* used - ** inside the C code. - */ -/********* Begin destructor definitions ***************************************/ - /* Default NON-TERMINAL Destructor */ - case 389: /* cmd */ - case 392: /* literal */ - case 403: /* with_clause_opt */ - case 409: /* search_condition */ - case 414: /* db_options */ - case 416: /* alter_db_options */ - case 418: /* start_opt */ - case 419: /* end_opt */ - case 423: /* signed */ - case 425: /* retention */ - case 426: /* full_table_name */ - case 429: /* table_options */ - case 435: /* alter_table_clause */ - case 436: /* alter_table_options */ - case 439: /* column_options */ - case 440: /* tags_literal */ - case 441: /* create_subtable_clause */ - case 444: /* drop_table_clause */ - case 447: /* tag_def */ - case 448: /* column_def */ - case 453: /* duration_literal */ - case 454: /* rollup_func_name */ - case 456: /* col_name */ - case 459: /* like_pattern_opt */ - case 460: /* db_name_cond_opt */ - case 461: /* table_name_cond */ - case 462: /* from_db_opt */ - case 464: /* tag_item */ - case 468: /* full_tsma_name */ - case 470: /* index_options */ - case 471: /* full_index_name */ - case 473: /* sliding_opt */ - case 474: /* sma_stream_opt */ - case 475: /* func */ - case 479: /* query_or_subquery */ - case 480: /* where_clause_opt */ - case 483: /* explain_options */ - case 484: /* insert_query */ - case 489: /* full_view_name */ - case 492: /* stream_options */ - case 495: /* subtable_opt */ - case 498: /* column_stream_def */ - case 499: /* stream_col_options */ - case 500: /* expression */ - case 503: /* literal_func */ - case 504: /* signed_literal */ - case 507: /* expr_or_subquery */ - case 508: /* pseudo_column */ - case 509: /* column_reference */ - case 510: /* function_expression */ - case 511: /* case_when_expression */ - case 516: /* rand_func */ - case 519: /* star_func_para */ - case 521: /* case_when_else_opt */ - case 522: /* common_expression */ - case 523: /* when_then_expr */ - case 524: /* predicate */ - case 527: /* in_predicate_value */ - case 528: /* boolean_value_expression */ - case 529: /* boolean_primary */ - case 530: /* from_clause_opt */ - case 531: /* table_reference_list */ - case 532: /* table_reference */ - case 533: /* table_primary */ - case 534: /* joined_table */ - case 536: /* subquery */ - case 537: /* parenthesized_joined_table */ - case 540: /* join_on_clause_opt */ - case 541: /* window_offset_clause_opt */ - case 542: /* jlimit_clause_opt */ - case 543: /* window_offset_literal */ - case 544: /* query_specification */ - case 550: /* range_opt */ - case 551: /* every_opt */ - case 552: /* fill_opt */ - case 553: /* twindow_clause_opt */ - case 555: /* having_clause_opt */ - case 556: /* select_item */ - case 558: /* partition_item */ - case 559: /* interval_sliding_duration_literal */ - case 562: /* query_expression */ - case 563: /* query_simple */ - case 565: /* slimit_clause_opt */ - case 566: /* limit_clause_opt */ - case 567: /* union_query_expression */ - case 568: /* query_simple_or_subquery */ - case 570: /* sort_specification */ -{ - nodesDestroyNode((yypminor->yy974)); -} - break; - case 390: /* account_options */ - case 391: /* alter_account_options */ - case 393: /* alter_account_option */ - case 417: /* speed_opt */ - case 478: /* with_meta */ - case 487: /* bufsize_opt */ -{ - -} - break; - case 394: /* ip_range_list */ - case 395: /* white_list */ - case 396: /* white_list_opt */ - case 420: /* integer_list */ - case 421: /* variable_list */ - case 422: /* retention_list */ - case 427: /* column_def_list */ - case 428: /* tags_def_opt */ - case 430: /* multi_create_clause */ - case 431: /* tag_list_opt */ - case 432: /* tags_def */ - case 434: /* multi_drop_clause */ - case 442: /* specific_cols_opt */ - case 443: /* tags_literal_list */ - case 445: /* col_name_list */ - case 446: /* tag_def_list */ - case 450: /* duration_list */ - case 451: /* rollup_func_list */ - case 469: /* func_list */ - case 477: /* expression_list */ - case 493: /* col_list_opt */ - case 494: /* tag_def_or_ref_opt */ - case 497: /* column_stream_def_list */ - case 502: /* dnode_list */ - case 505: /* literal_list */ - case 513: /* star_func_para_list */ - case 518: /* other_para_list */ - case 520: /* when_then_list */ - case 545: /* hint_list */ - case 548: /* select_list */ - case 549: /* partition_by_clause_opt */ - case 554: /* group_by_clause_opt */ - case 557: /* partition_list */ - case 561: /* group_by_list */ - case 564: /* order_by_clause_opt */ - case 569: /* sort_specification_list */ -{ - nodesDestroyList((yypminor->yy946)); -} - break; - case 397: /* is_import_opt */ - case 398: /* is_createdb_opt */ - case 400: /* sysinfo_opt */ -{ - -} - break; - case 399: /* user_name */ - case 406: /* db_name */ - case 407: /* table_name */ - case 408: /* topic_name */ - case 410: /* dnode_endpoint */ - case 437: /* column_name */ - case 455: /* function_name */ - case 465: /* column_alias */ - case 466: /* tsma_name */ - case 472: /* index_name */ - case 476: /* sma_func_name */ - case 481: /* cgroup_name */ - case 488: /* language_opt */ - case 490: /* view_name */ - case 491: /* stream_name */ - case 501: /* on_vgroup_id */ - case 506: /* table_alias */ - case 512: /* star_func */ - case 515: /* substr_func */ - case 517: /* noarg_func */ - case 535: /* alias_opt */ -{ - -} - break; - case 401: /* privileges */ - case 404: /* priv_type_list */ - case 405: /* priv_type */ -{ - -} - break; - case 402: /* priv_level */ -{ - -} - break; - case 411: /* force_opt */ - case 412: /* unsafe_opt */ - case 413: /* not_exists_opt */ - case 415: /* exists_opt */ - case 433: /* with_opt */ - case 482: /* analyze_opt */ - case 485: /* or_replace_opt */ - case 486: /* agg_func_opt */ - case 496: /* ignore_opt */ - case 546: /* set_quantifier_opt */ - case 547: /* tag_mode_opt */ -{ - -} - break; - case 424: /* alter_db_option */ - case 452: /* alter_table_option */ -{ - -} - break; - case 438: /* type_name */ - case 449: /* type_name_default_len */ -{ - -} - break; - case 457: /* db_kind_opt */ - case 463: /* table_kind */ -{ - -} - break; - case 458: /* table_kind_db_name_cond_opt */ -{ - -} - break; - case 467: /* tsma_func_list */ -{ - nodesDestroyNode((yypminor->yy974)); -} - break; - case 514: /* trim_specification_type */ -{ - -} - break; - case 525: /* compare_op */ - case 526: /* in_op */ -{ - -} - break; - case 538: /* join_type */ -{ - -} - break; - case 539: /* join_subtype */ -{ - -} - break; - case 560: /* fill_mode */ -{ - -} - break; - case 571: /* ordering_specification_opt */ -{ - -} - break; - case 572: /* null_ordering_opt */ -{ - -} - break; -/********* End destructor definitions *****************************************/ - default: break; /* If no destructor action specified: do nothing */ - } -} - -/* -** Pop the parser's stack once. -** -** If there is a destructor routine associated with the token which -** is popped from the stack, then call it. -*/ -static void yy_pop_parser_stack(yyParser *pParser){ - yyStackEntry *yytos; - assert( pParser->yytos!=0 ); - assert( pParser->yytos > pParser->yystack ); - yytos = pParser->yytos--; -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sPopping %s\n", - yyTracePrompt, - yyTokenName[yytos->major]); - } -#endif - yy_destructor(pParser, yytos->major, &yytos->minor); -} - -/* -** Clear all secondary memory allocations from the parser -*/ -void ParseFinalize(void *p){ - yyParser *pParser = (yyParser*)p; - while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser); -#if YYSTACKDEPTH<=0 - if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack); -#endif -} - -#ifndef Parse_ENGINEALWAYSONSTACK -/* -** Deallocate and destroy a parser. Destructors are called for -** all stack elements before shutting the parser down. -** -** If the YYPARSEFREENEVERNULL macro exists (for example because it -** is defined in a %include section of the input grammar) then it is -** assumed that the input pointer is never NULL. -*/ -void ParseFree( - void *p, /* The parser to be deleted */ - void (*freeProc)(void*) /* Function used to reclaim memory */ -){ -#ifndef YYPARSEFREENEVERNULL - if( p==0 ) return; -#endif - ParseFinalize(p); - (*freeProc)(p); -} -#endif /* Parse_ENGINEALWAYSONSTACK */ - -/* -** Return the peak depth of the stack for a parser. -*/ -#ifdef YYTRACKMAXSTACKDEPTH -int ParseStackPeak(void *p){ - yyParser *pParser = (yyParser*)p; - return pParser->yyhwm; -} -#endif - -/* This array of booleans keeps track of the parser statement -** coverage. The element yycoverage[X][Y] is set when the parser -** is in state X and has a lookahead token Y. In a well-tested -** systems, every element of this matrix should end up being set. -*/ -#if defined(YYCOVERAGE) -static unsigned char yycoverage[YYNSTATE][YYNTOKEN]; -#endif - -/* -** Write into out a description of every state/lookahead combination that -** -** (1) has not been used by the parser, and -** (2) is not a syntax error. -** -** Return the number of missed state/lookahead combinations. -*/ -#if defined(YYCOVERAGE) -int ParseCoverage(FILE *out){ - int stateno, iLookAhead, i; - int nMissed = 0; - for(stateno=0; statenoYY_MAX_SHIFT ) return stateno; - assert( stateno <= YY_SHIFT_COUNT ); -#if defined(YYCOVERAGE) - yycoverage[stateno][iLookAhead] = 1; -#endif - do{ - i = yy_shift_ofst[stateno]; - assert( i>=0 ); - assert( i<=YY_ACTTAB_COUNT ); - assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD ); - assert( iLookAhead!=YYNOCODE ); - assert( iLookAhead < YYNTOKEN ); - i += iLookAhead; - assert( i<(int)YY_NLOOKAHEAD ); - if( yy_lookahead[i]!=iLookAhead ){ -#ifdef YYFALLBACK - YYCODETYPE iFallback; /* Fallback token */ - assert( iLookAhead %s\n", - yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]); - } -#endif - assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */ - iLookAhead = iFallback; - continue; - } -#endif -#ifdef YYWILDCARD - { - int j = i - iLookAhead + YYWILDCARD; - assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) ); - if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){ -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", - yyTracePrompt, yyTokenName[iLookAhead], - yyTokenName[YYWILDCARD]); - } -#endif /* NDEBUG */ - return yy_action[j]; - } - } -#endif /* YYWILDCARD */ - return yy_default[stateno]; - }else{ - assert( i>=0 && iYY_REDUCE_COUNT ){ - return yy_default[stateno]; - } -#else - assert( stateno<=YY_REDUCE_COUNT ); -#endif - i = yy_reduce_ofst[stateno]; - assert( iLookAhead!=YYNOCODE ); - i += iLookAhead; -#ifdef YYERRORSYMBOL - if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ - return yy_default[stateno]; - } -#else - assert( i>=0 && iyytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); - /* Here code is inserted which will execute if the parser - ** stack every overflows */ -/******** Begin %stack_overflow code ******************************************/ -/******** End %stack_overflow code ********************************************/ - ParseARG_STORE /* Suppress warning about unused %extra_argument var */ - ParseCTX_STORE -} - -/* -** Print tracing information for a SHIFT action -*/ -#ifndef NDEBUG -static void yyTraceShift(yyParser *yypParser, int yyNewState, const char *zTag){ - if( yyTraceFILE ){ - if( yyNewStateyytos->major], - yyNewState); - }else{ - fprintf(yyTraceFILE,"%s%s '%s', pending reduce %d\n", - yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major], - yyNewState - YY_MIN_REDUCE); - } - } -} -#else -# define yyTraceShift(X,Y,Z) -#endif - -/* -** Perform a shift action. -*/ -static void yy_shift( - yyParser *yypParser, /* The parser to be shifted */ - YYACTIONTYPE yyNewState, /* The new state to shift in */ - YYCODETYPE yyMajor, /* The major token to shift in */ - ParseTOKENTYPE yyMinor /* The minor token to shift in */ -){ - yyStackEntry *yytos; - yypParser->yytos++; -#ifdef YYTRACKMAXSTACKDEPTH - if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ - yypParser->yyhwm++; - assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) ); - } -#endif -#if YYSTACKDEPTH>0 - if( yypParser->yytos>yypParser->yystackEnd ){ - yypParser->yytos--; - yyStackOverflow(yypParser); - return; - } -#else - if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){ - if( yyGrowStack(yypParser) ){ - yypParser->yytos--; - yyStackOverflow(yypParser); - return; - } - } -#endif - if( yyNewState > YY_MAX_SHIFT ){ - yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; - } - yytos = yypParser->yytos; - yytos->stateno = yyNewState; - yytos->major = yyMajor; - yytos->minor.yy0 = yyMinor; - yyTraceShift(yypParser, yyNewState, "Shift"); -} - -/* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side -** of that rule */ -static const YYCODETYPE yyRuleInfoLhs[] = { - 389, /* (0) cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */ - 389, /* (1) cmd ::= ALTER ACCOUNT NK_ID alter_account_options */ - 390, /* (2) account_options ::= */ - 390, /* (3) account_options ::= account_options PPS literal */ - 390, /* (4) account_options ::= account_options TSERIES literal */ - 390, /* (5) account_options ::= account_options STORAGE literal */ - 390, /* (6) account_options ::= account_options STREAMS literal */ - 390, /* (7) account_options ::= account_options QTIME literal */ - 390, /* (8) account_options ::= account_options DBS literal */ - 390, /* (9) account_options ::= account_options USERS literal */ - 390, /* (10) account_options ::= account_options CONNS literal */ - 390, /* (11) account_options ::= account_options STATE literal */ - 391, /* (12) alter_account_options ::= alter_account_option */ - 391, /* (13) alter_account_options ::= alter_account_options alter_account_option */ - 393, /* (14) alter_account_option ::= PASS literal */ - 393, /* (15) alter_account_option ::= PPS literal */ - 393, /* (16) alter_account_option ::= TSERIES literal */ - 393, /* (17) alter_account_option ::= STORAGE literal */ - 393, /* (18) alter_account_option ::= STREAMS literal */ - 393, /* (19) alter_account_option ::= QTIME literal */ - 393, /* (20) alter_account_option ::= DBS literal */ - 393, /* (21) alter_account_option ::= USERS literal */ - 393, /* (22) alter_account_option ::= CONNS literal */ - 393, /* (23) alter_account_option ::= STATE literal */ - 394, /* (24) ip_range_list ::= NK_STRING */ - 394, /* (25) ip_range_list ::= ip_range_list NK_COMMA NK_STRING */ - 395, /* (26) white_list ::= HOST ip_range_list */ - 396, /* (27) white_list_opt ::= */ - 396, /* (28) white_list_opt ::= white_list */ - 397, /* (29) is_import_opt ::= */ - 397, /* (30) is_import_opt ::= IS_IMPORT NK_INTEGER */ - 398, /* (31) is_createdb_opt ::= */ - 398, /* (32) is_createdb_opt ::= CREATEDB NK_INTEGER */ - 389, /* (33) cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt is_createdb_opt is_import_opt white_list_opt */ - 389, /* (34) cmd ::= ALTER USER user_name PASS NK_STRING */ - 389, /* (35) cmd ::= ALTER USER user_name ENABLE NK_INTEGER */ - 389, /* (36) cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */ - 389, /* (37) cmd ::= ALTER USER user_name CREATEDB NK_INTEGER */ - 389, /* (38) cmd ::= ALTER USER user_name ADD white_list */ - 389, /* (39) cmd ::= ALTER USER user_name DROP white_list */ - 389, /* (40) cmd ::= DROP USER user_name */ - 400, /* (41) sysinfo_opt ::= */ - 400, /* (42) sysinfo_opt ::= SYSINFO NK_INTEGER */ - 389, /* (43) cmd ::= GRANT privileges ON priv_level with_clause_opt TO user_name */ - 389, /* (44) cmd ::= REVOKE privileges ON priv_level with_clause_opt FROM user_name */ - 401, /* (45) privileges ::= ALL */ - 401, /* (46) privileges ::= priv_type_list */ - 401, /* (47) privileges ::= SUBSCRIBE */ - 404, /* (48) priv_type_list ::= priv_type */ - 404, /* (49) priv_type_list ::= priv_type_list NK_COMMA priv_type */ - 405, /* (50) priv_type ::= READ */ - 405, /* (51) priv_type ::= WRITE */ - 405, /* (52) priv_type ::= ALTER */ - 402, /* (53) priv_level ::= NK_STAR NK_DOT NK_STAR */ - 402, /* (54) priv_level ::= db_name NK_DOT NK_STAR */ - 402, /* (55) priv_level ::= db_name NK_DOT table_name */ - 402, /* (56) priv_level ::= topic_name */ - 403, /* (57) with_clause_opt ::= */ - 403, /* (58) with_clause_opt ::= WITH search_condition */ - 389, /* (59) cmd ::= CREATE ENCRYPT_KEY NK_STRING */ - 389, /* (60) cmd ::= CREATE ANODE NK_STRING */ - 389, /* (61) cmd ::= UPDATE ANODE NK_INTEGER */ - 389, /* (62) cmd ::= UPDATE ALL ANODES */ - 389, /* (63) cmd ::= DROP ANODE NK_INTEGER */ - 389, /* (64) cmd ::= CREATE DNODE dnode_endpoint */ - 389, /* (65) cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */ - 389, /* (66) cmd ::= DROP DNODE NK_INTEGER force_opt */ - 389, /* (67) cmd ::= DROP DNODE dnode_endpoint force_opt */ - 389, /* (68) cmd ::= DROP DNODE NK_INTEGER unsafe_opt */ - 389, /* (69) cmd ::= DROP DNODE dnode_endpoint unsafe_opt */ - 389, /* (70) cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ - 389, /* (71) cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING */ - 389, /* (72) cmd ::= ALTER ALL DNODES NK_STRING */ - 389, /* (73) cmd ::= ALTER ALL DNODES NK_STRING NK_STRING */ - 389, /* (74) cmd ::= RESTORE DNODE NK_INTEGER */ - 410, /* (75) dnode_endpoint ::= NK_STRING */ - 410, /* (76) dnode_endpoint ::= NK_ID */ - 410, /* (77) dnode_endpoint ::= NK_IPTOKEN */ - 411, /* (78) force_opt ::= */ - 411, /* (79) force_opt ::= FORCE */ - 412, /* (80) unsafe_opt ::= UNSAFE */ - 389, /* (81) cmd ::= ALTER CLUSTER NK_STRING */ - 389, /* (82) cmd ::= ALTER CLUSTER NK_STRING NK_STRING */ - 389, /* (83) cmd ::= ALTER LOCAL NK_STRING */ - 389, /* (84) cmd ::= ALTER LOCAL NK_STRING NK_STRING */ - 389, /* (85) cmd ::= CREATE QNODE ON DNODE NK_INTEGER */ - 389, /* (86) cmd ::= DROP QNODE ON DNODE NK_INTEGER */ - 389, /* (87) cmd ::= RESTORE QNODE ON DNODE NK_INTEGER */ - 389, /* (88) cmd ::= CREATE BNODE ON DNODE NK_INTEGER */ - 389, /* (89) cmd ::= DROP BNODE ON DNODE NK_INTEGER */ - 389, /* (90) cmd ::= CREATE SNODE ON DNODE NK_INTEGER */ - 389, /* (91) cmd ::= DROP SNODE ON DNODE NK_INTEGER */ - 389, /* (92) cmd ::= CREATE MNODE ON DNODE NK_INTEGER */ - 389, /* (93) cmd ::= DROP MNODE ON DNODE NK_INTEGER */ - 389, /* (94) cmd ::= RESTORE MNODE ON DNODE NK_INTEGER */ - 389, /* (95) cmd ::= RESTORE VNODE ON DNODE NK_INTEGER */ - 389, /* (96) cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ - 389, /* (97) cmd ::= DROP DATABASE exists_opt db_name */ - 389, /* (98) cmd ::= USE db_name */ - 389, /* (99) cmd ::= ALTER DATABASE db_name alter_db_options */ - 389, /* (100) cmd ::= FLUSH DATABASE db_name */ - 389, /* (101) cmd ::= TRIM DATABASE db_name speed_opt */ - 389, /* (102) cmd ::= S3MIGRATE DATABASE db_name */ - 389, /* (103) cmd ::= COMPACT DATABASE db_name start_opt end_opt */ - 413, /* (104) not_exists_opt ::= IF NOT EXISTS */ - 413, /* (105) not_exists_opt ::= */ - 415, /* (106) exists_opt ::= IF EXISTS */ - 415, /* (107) exists_opt ::= */ - 414, /* (108) db_options ::= */ - 414, /* (109) db_options ::= db_options BUFFER NK_INTEGER */ - 414, /* (110) db_options ::= db_options CACHEMODEL NK_STRING */ - 414, /* (111) db_options ::= db_options CACHESIZE NK_INTEGER */ - 414, /* (112) db_options ::= db_options COMP NK_INTEGER */ - 414, /* (113) db_options ::= db_options DURATION NK_INTEGER */ - 414, /* (114) db_options ::= db_options DURATION NK_VARIABLE */ - 414, /* (115) db_options ::= db_options MAXROWS NK_INTEGER */ - 414, /* (116) db_options ::= db_options MINROWS NK_INTEGER */ - 414, /* (117) db_options ::= db_options KEEP integer_list */ - 414, /* (118) db_options ::= db_options KEEP variable_list */ - 414, /* (119) db_options ::= db_options PAGES NK_INTEGER */ - 414, /* (120) db_options ::= db_options PAGESIZE NK_INTEGER */ - 414, /* (121) db_options ::= db_options TSDB_PAGESIZE NK_INTEGER */ - 414, /* (122) db_options ::= db_options PRECISION NK_STRING */ - 414, /* (123) db_options ::= db_options REPLICA NK_INTEGER */ - 414, /* (124) db_options ::= db_options VGROUPS NK_INTEGER */ - 414, /* (125) db_options ::= db_options SINGLE_STABLE NK_INTEGER */ - 414, /* (126) db_options ::= db_options RETENTIONS retention_list */ - 414, /* (127) db_options ::= db_options SCHEMALESS NK_INTEGER */ - 414, /* (128) db_options ::= db_options WAL_LEVEL NK_INTEGER */ - 414, /* (129) db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */ - 414, /* (130) db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */ - 414, /* (131) db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */ - 414, /* (132) db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */ - 414, /* (133) db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */ - 414, /* (134) db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */ - 414, /* (135) db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */ - 414, /* (136) db_options ::= db_options STT_TRIGGER NK_INTEGER */ - 414, /* (137) db_options ::= db_options TABLE_PREFIX signed */ - 414, /* (138) db_options ::= db_options TABLE_SUFFIX signed */ - 414, /* (139) db_options ::= db_options S3_CHUNKSIZE NK_INTEGER */ - 414, /* (140) db_options ::= db_options S3_KEEPLOCAL NK_INTEGER */ - 414, /* (141) db_options ::= db_options S3_KEEPLOCAL NK_VARIABLE */ - 414, /* (142) db_options ::= db_options S3_COMPACT NK_INTEGER */ - 414, /* (143) db_options ::= db_options KEEP_TIME_OFFSET NK_INTEGER */ - 414, /* (144) db_options ::= db_options ENCRYPT_ALGORITHM NK_STRING */ - 416, /* (145) alter_db_options ::= alter_db_option */ - 416, /* (146) alter_db_options ::= alter_db_options alter_db_option */ - 424, /* (147) alter_db_option ::= BUFFER NK_INTEGER */ - 424, /* (148) alter_db_option ::= CACHEMODEL NK_STRING */ - 424, /* (149) alter_db_option ::= CACHESIZE NK_INTEGER */ - 424, /* (150) alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */ - 424, /* (151) alter_db_option ::= KEEP integer_list */ - 424, /* (152) alter_db_option ::= KEEP variable_list */ - 424, /* (153) alter_db_option ::= PAGES NK_INTEGER */ - 424, /* (154) alter_db_option ::= REPLICA NK_INTEGER */ - 424, /* (155) alter_db_option ::= WAL_LEVEL NK_INTEGER */ - 424, /* (156) alter_db_option ::= STT_TRIGGER NK_INTEGER */ - 424, /* (157) alter_db_option ::= MINROWS NK_INTEGER */ - 424, /* (158) alter_db_option ::= WAL_RETENTION_PERIOD NK_INTEGER */ - 424, /* (159) alter_db_option ::= WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */ - 424, /* (160) alter_db_option ::= WAL_RETENTION_SIZE NK_INTEGER */ - 424, /* (161) alter_db_option ::= WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */ - 424, /* (162) alter_db_option ::= S3_KEEPLOCAL NK_INTEGER */ - 424, /* (163) alter_db_option ::= S3_KEEPLOCAL NK_VARIABLE */ - 424, /* (164) alter_db_option ::= S3_COMPACT NK_INTEGER */ - 424, /* (165) alter_db_option ::= KEEP_TIME_OFFSET NK_INTEGER */ - 424, /* (166) alter_db_option ::= ENCRYPT_ALGORITHM NK_STRING */ - 420, /* (167) integer_list ::= NK_INTEGER */ - 420, /* (168) integer_list ::= integer_list NK_COMMA NK_INTEGER */ - 421, /* (169) variable_list ::= NK_VARIABLE */ - 421, /* (170) variable_list ::= variable_list NK_COMMA NK_VARIABLE */ - 422, /* (171) retention_list ::= retention */ - 422, /* (172) retention_list ::= retention_list NK_COMMA retention */ - 425, /* (173) retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ - 425, /* (174) retention ::= NK_MINUS NK_COLON NK_VARIABLE */ - 417, /* (175) speed_opt ::= */ - 417, /* (176) speed_opt ::= BWLIMIT NK_INTEGER */ - 418, /* (177) start_opt ::= */ - 418, /* (178) start_opt ::= START WITH NK_INTEGER */ - 418, /* (179) start_opt ::= START WITH NK_STRING */ - 418, /* (180) start_opt ::= START WITH TIMESTAMP NK_STRING */ - 419, /* (181) end_opt ::= */ - 419, /* (182) end_opt ::= END WITH NK_INTEGER */ - 419, /* (183) end_opt ::= END WITH NK_STRING */ - 419, /* (184) end_opt ::= END WITH TIMESTAMP NK_STRING */ - 389, /* (185) cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ - 389, /* (186) cmd ::= CREATE TABLE multi_create_clause */ - 389, /* (187) cmd ::= CREATE TABLE not_exists_opt USING full_table_name NK_LP tag_list_opt NK_RP FILE NK_STRING */ - 389, /* (188) cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ - 389, /* (189) cmd ::= DROP TABLE with_opt multi_drop_clause */ - 389, /* (190) cmd ::= DROP STABLE with_opt exists_opt full_table_name */ - 389, /* (191) cmd ::= ALTER TABLE alter_table_clause */ - 389, /* (192) cmd ::= ALTER STABLE alter_table_clause */ - 435, /* (193) alter_table_clause ::= full_table_name alter_table_options */ - 435, /* (194) alter_table_clause ::= full_table_name ADD COLUMN column_name type_name column_options */ - 435, /* (195) alter_table_clause ::= full_table_name DROP COLUMN column_name */ - 435, /* (196) alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ - 435, /* (197) alter_table_clause ::= full_table_name MODIFY COLUMN column_name column_options */ - 435, /* (198) alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ - 435, /* (199) alter_table_clause ::= full_table_name ADD TAG column_name type_name */ - 435, /* (200) alter_table_clause ::= full_table_name DROP TAG column_name */ - 435, /* (201) alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ - 435, /* (202) alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ - 435, /* (203) alter_table_clause ::= full_table_name SET TAG column_name NK_EQ tags_literal */ - 430, /* (204) multi_create_clause ::= create_subtable_clause */ - 430, /* (205) multi_create_clause ::= multi_create_clause create_subtable_clause */ - 441, /* (206) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP tags_literal_list NK_RP table_options */ - 434, /* (207) multi_drop_clause ::= drop_table_clause */ - 434, /* (208) multi_drop_clause ::= multi_drop_clause NK_COMMA drop_table_clause */ - 444, /* (209) drop_table_clause ::= exists_opt full_table_name */ - 433, /* (210) with_opt ::= */ - 433, /* (211) with_opt ::= WITH */ - 442, /* (212) specific_cols_opt ::= */ - 442, /* (213) specific_cols_opt ::= NK_LP col_name_list NK_RP */ - 426, /* (214) full_table_name ::= table_name */ - 426, /* (215) full_table_name ::= db_name NK_DOT table_name */ - 446, /* (216) tag_def_list ::= tag_def */ - 446, /* (217) tag_def_list ::= tag_def_list NK_COMMA tag_def */ - 447, /* (218) tag_def ::= column_name type_name */ - 427, /* (219) column_def_list ::= column_def */ - 427, /* (220) column_def_list ::= column_def_list NK_COMMA column_def */ - 448, /* (221) column_def ::= column_name type_name column_options */ - 438, /* (222) type_name ::= BOOL */ - 438, /* (223) type_name ::= TINYINT */ - 438, /* (224) type_name ::= SMALLINT */ - 438, /* (225) type_name ::= INT */ - 438, /* (226) type_name ::= INTEGER */ - 438, /* (227) type_name ::= BIGINT */ - 438, /* (228) type_name ::= FLOAT */ - 438, /* (229) type_name ::= DOUBLE */ - 438, /* (230) type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ - 438, /* (231) type_name ::= TIMESTAMP */ - 438, /* (232) type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ - 438, /* (233) type_name ::= TINYINT UNSIGNED */ - 438, /* (234) type_name ::= SMALLINT UNSIGNED */ - 438, /* (235) type_name ::= INT UNSIGNED */ - 438, /* (236) type_name ::= BIGINT UNSIGNED */ - 438, /* (237) type_name ::= JSON */ - 438, /* (238) type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ - 438, /* (239) type_name ::= MEDIUMBLOB */ - 438, /* (240) type_name ::= BLOB */ - 438, /* (241) type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ - 438, /* (242) type_name ::= GEOMETRY NK_LP NK_INTEGER NK_RP */ - 438, /* (243) type_name ::= DECIMAL */ - 438, /* (244) type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ - 438, /* (245) type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ - 449, /* (246) type_name_default_len ::= BINARY */ - 449, /* (247) type_name_default_len ::= NCHAR */ - 449, /* (248) type_name_default_len ::= VARCHAR */ - 449, /* (249) type_name_default_len ::= VARBINARY */ - 428, /* (250) tags_def_opt ::= */ - 428, /* (251) tags_def_opt ::= tags_def */ - 432, /* (252) tags_def ::= TAGS NK_LP tag_def_list NK_RP */ - 429, /* (253) table_options ::= */ - 429, /* (254) table_options ::= table_options COMMENT NK_STRING */ - 429, /* (255) table_options ::= table_options MAX_DELAY duration_list */ - 429, /* (256) table_options ::= table_options WATERMARK duration_list */ - 429, /* (257) table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */ - 429, /* (258) table_options ::= table_options TTL NK_INTEGER */ - 429, /* (259) table_options ::= table_options SMA NK_LP col_name_list NK_RP */ - 429, /* (260) table_options ::= table_options DELETE_MARK duration_list */ - 436, /* (261) alter_table_options ::= alter_table_option */ - 436, /* (262) alter_table_options ::= alter_table_options alter_table_option */ - 452, /* (263) alter_table_option ::= COMMENT NK_STRING */ - 452, /* (264) alter_table_option ::= TTL NK_INTEGER */ - 450, /* (265) duration_list ::= duration_literal */ - 450, /* (266) duration_list ::= duration_list NK_COMMA duration_literal */ - 451, /* (267) rollup_func_list ::= rollup_func_name */ - 451, /* (268) rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */ - 454, /* (269) rollup_func_name ::= function_name */ - 454, /* (270) rollup_func_name ::= FIRST */ - 454, /* (271) rollup_func_name ::= LAST */ - 445, /* (272) col_name_list ::= col_name */ - 445, /* (273) col_name_list ::= col_name_list NK_COMMA col_name */ - 456, /* (274) col_name ::= column_name */ - 389, /* (275) cmd ::= SHOW DNODES */ - 389, /* (276) cmd ::= SHOW USERS */ - 389, /* (277) cmd ::= SHOW USERS FULL */ - 389, /* (278) cmd ::= SHOW USER PRIVILEGES */ - 389, /* (279) cmd ::= SHOW db_kind_opt DATABASES */ - 389, /* (280) cmd ::= SHOW table_kind_db_name_cond_opt TABLES like_pattern_opt */ - 389, /* (281) cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ - 389, /* (282) cmd ::= SHOW db_name_cond_opt VGROUPS */ - 389, /* (283) cmd ::= SHOW MNODES */ - 389, /* (284) cmd ::= SHOW QNODES */ - 389, /* (285) cmd ::= SHOW ANODES */ - 389, /* (286) cmd ::= SHOW ANODES FULL */ - 389, /* (287) cmd ::= SHOW ARBGROUPS */ - 389, /* (288) cmd ::= SHOW FUNCTIONS */ - 389, /* (289) cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ - 389, /* (290) cmd ::= SHOW INDEXES FROM db_name NK_DOT table_name */ - 389, /* (291) cmd ::= SHOW STREAMS */ - 389, /* (292) cmd ::= SHOW ACCOUNTS */ - 389, /* (293) cmd ::= SHOW APPS */ - 389, /* (294) cmd ::= SHOW CONNECTIONS */ - 389, /* (295) cmd ::= SHOW LICENCES */ - 389, /* (296) cmd ::= SHOW GRANTS */ - 389, /* (297) cmd ::= SHOW GRANTS FULL */ - 389, /* (298) cmd ::= SHOW GRANTS LOGS */ - 389, /* (299) cmd ::= SHOW CLUSTER MACHINES */ - 389, /* (300) cmd ::= SHOW CREATE DATABASE db_name */ - 389, /* (301) cmd ::= SHOW CREATE TABLE full_table_name */ - 389, /* (302) cmd ::= SHOW CREATE STABLE full_table_name */ - 389, /* (303) cmd ::= SHOW ENCRYPTIONS */ - 389, /* (304) cmd ::= SHOW QUERIES */ - 389, /* (305) cmd ::= SHOW SCORES */ - 389, /* (306) cmd ::= SHOW TOPICS */ - 389, /* (307) cmd ::= SHOW VARIABLES */ - 389, /* (308) cmd ::= SHOW CLUSTER VARIABLES */ - 389, /* (309) cmd ::= SHOW LOCAL VARIABLES */ - 389, /* (310) cmd ::= SHOW DNODE NK_INTEGER VARIABLES like_pattern_opt */ - 389, /* (311) cmd ::= SHOW BNODES */ - 389, /* (312) cmd ::= SHOW SNODES */ - 389, /* (313) cmd ::= SHOW CLUSTER */ - 389, /* (314) cmd ::= SHOW TRANSACTIONS */ - 389, /* (315) cmd ::= SHOW TABLE DISTRIBUTED full_table_name */ - 389, /* (316) cmd ::= SHOW CONSUMERS */ - 389, /* (317) cmd ::= SHOW SUBSCRIPTIONS */ - 389, /* (318) cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */ - 389, /* (319) cmd ::= SHOW TAGS FROM db_name NK_DOT table_name */ - 389, /* (320) cmd ::= SHOW TABLE TAGS tag_list_opt FROM table_name_cond from_db_opt */ - 389, /* (321) cmd ::= SHOW TABLE TAGS tag_list_opt FROM db_name NK_DOT table_name */ - 389, /* (322) cmd ::= SHOW VNODES ON DNODE NK_INTEGER */ - 389, /* (323) cmd ::= SHOW VNODES */ - 389, /* (324) cmd ::= SHOW db_name_cond_opt ALIVE */ - 389, /* (325) cmd ::= SHOW CLUSTER ALIVE */ - 389, /* (326) cmd ::= SHOW db_name_cond_opt VIEWS like_pattern_opt */ - 389, /* (327) cmd ::= SHOW CREATE VIEW full_table_name */ - 389, /* (328) cmd ::= SHOW COMPACTS */ - 389, /* (329) cmd ::= SHOW COMPACT NK_INTEGER */ - 458, /* (330) table_kind_db_name_cond_opt ::= */ - 458, /* (331) table_kind_db_name_cond_opt ::= table_kind */ - 458, /* (332) table_kind_db_name_cond_opt ::= db_name NK_DOT */ - 458, /* (333) table_kind_db_name_cond_opt ::= table_kind db_name NK_DOT */ - 463, /* (334) table_kind ::= NORMAL */ - 463, /* (335) table_kind ::= CHILD */ - 460, /* (336) db_name_cond_opt ::= */ - 460, /* (337) db_name_cond_opt ::= db_name NK_DOT */ - 459, /* (338) like_pattern_opt ::= */ - 459, /* (339) like_pattern_opt ::= LIKE NK_STRING */ - 461, /* (340) table_name_cond ::= table_name */ - 462, /* (341) from_db_opt ::= */ - 462, /* (342) from_db_opt ::= FROM db_name */ - 431, /* (343) tag_list_opt ::= */ - 431, /* (344) tag_list_opt ::= tag_item */ - 431, /* (345) tag_list_opt ::= tag_list_opt NK_COMMA tag_item */ - 464, /* (346) tag_item ::= TBNAME */ - 464, /* (347) tag_item ::= QTAGS */ - 464, /* (348) tag_item ::= column_name */ - 464, /* (349) tag_item ::= column_name column_alias */ - 464, /* (350) tag_item ::= column_name AS column_alias */ - 457, /* (351) db_kind_opt ::= */ - 457, /* (352) db_kind_opt ::= USER */ - 457, /* (353) db_kind_opt ::= SYSTEM */ - 389, /* (354) cmd ::= CREATE TSMA not_exists_opt tsma_name ON full_table_name tsma_func_list INTERVAL NK_LP duration_literal NK_RP */ - 389, /* (355) cmd ::= CREATE RECURSIVE TSMA not_exists_opt tsma_name ON full_table_name INTERVAL NK_LP duration_literal NK_RP */ - 389, /* (356) cmd ::= DROP TSMA exists_opt full_tsma_name */ - 389, /* (357) cmd ::= SHOW db_name_cond_opt TSMAS */ - 468, /* (358) full_tsma_name ::= tsma_name */ - 468, /* (359) full_tsma_name ::= db_name NK_DOT tsma_name */ - 467, /* (360) tsma_func_list ::= FUNCTION NK_LP func_list NK_RP */ - 389, /* (361) cmd ::= CREATE SMA INDEX not_exists_opt col_name ON full_table_name index_options */ - 389, /* (362) cmd ::= CREATE INDEX not_exists_opt col_name ON full_table_name NK_LP col_name_list NK_RP */ - 389, /* (363) cmd ::= DROP INDEX exists_opt full_index_name */ - 471, /* (364) full_index_name ::= index_name */ - 471, /* (365) full_index_name ::= db_name NK_DOT index_name */ - 470, /* (366) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */ - 470, /* (367) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */ - 469, /* (368) func_list ::= func */ - 469, /* (369) func_list ::= func_list NK_COMMA func */ - 475, /* (370) func ::= sma_func_name NK_LP expression_list NK_RP */ - 476, /* (371) sma_func_name ::= function_name */ - 476, /* (372) sma_func_name ::= COUNT */ - 476, /* (373) sma_func_name ::= FIRST */ - 476, /* (374) sma_func_name ::= LAST */ - 476, /* (375) sma_func_name ::= LAST_ROW */ - 474, /* (376) sma_stream_opt ::= */ - 474, /* (377) sma_stream_opt ::= sma_stream_opt WATERMARK duration_literal */ - 474, /* (378) sma_stream_opt ::= sma_stream_opt MAX_DELAY duration_literal */ - 474, /* (379) sma_stream_opt ::= sma_stream_opt DELETE_MARK duration_literal */ - 478, /* (380) with_meta ::= AS */ - 478, /* (381) with_meta ::= WITH META AS */ - 478, /* (382) with_meta ::= ONLY META AS */ - 389, /* (383) cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery */ - 389, /* (384) cmd ::= CREATE TOPIC not_exists_opt topic_name with_meta DATABASE db_name */ - 389, /* (385) cmd ::= CREATE TOPIC not_exists_opt topic_name with_meta STABLE full_table_name where_clause_opt */ - 389, /* (386) cmd ::= DROP TOPIC exists_opt topic_name */ - 389, /* (387) cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */ - 389, /* (388) cmd ::= DESC full_table_name */ - 389, /* (389) cmd ::= DESCRIBE full_table_name */ - 389, /* (390) cmd ::= RESET QUERY CACHE */ - 389, /* (391) cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery */ - 389, /* (392) cmd ::= EXPLAIN analyze_opt explain_options insert_query */ - 482, /* (393) analyze_opt ::= */ - 482, /* (394) analyze_opt ::= ANALYZE */ - 483, /* (395) explain_options ::= */ - 483, /* (396) explain_options ::= explain_options VERBOSE NK_BOOL */ - 483, /* (397) explain_options ::= explain_options RATIO NK_FLOAT */ - 389, /* (398) cmd ::= CREATE or_replace_opt agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt language_opt */ - 389, /* (399) cmd ::= DROP FUNCTION exists_opt function_name */ - 486, /* (400) agg_func_opt ::= */ - 486, /* (401) agg_func_opt ::= AGGREGATE */ - 487, /* (402) bufsize_opt ::= */ - 487, /* (403) bufsize_opt ::= BUFSIZE NK_INTEGER */ - 488, /* (404) language_opt ::= */ - 488, /* (405) language_opt ::= LANGUAGE NK_STRING */ - 485, /* (406) or_replace_opt ::= */ - 485, /* (407) or_replace_opt ::= OR REPLACE */ - 389, /* (408) cmd ::= CREATE or_replace_opt VIEW full_view_name AS query_or_subquery */ - 389, /* (409) cmd ::= DROP VIEW exists_opt full_view_name */ - 489, /* (410) full_view_name ::= view_name */ - 489, /* (411) full_view_name ::= db_name NK_DOT view_name */ - 389, /* (412) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name col_list_opt tag_def_or_ref_opt subtable_opt AS query_or_subquery */ - 389, /* (413) cmd ::= DROP STREAM exists_opt stream_name */ - 389, /* (414) cmd ::= PAUSE STREAM exists_opt stream_name */ - 389, /* (415) cmd ::= RESUME STREAM exists_opt ignore_opt stream_name */ - 493, /* (416) col_list_opt ::= */ - 493, /* (417) col_list_opt ::= NK_LP column_stream_def_list NK_RP */ - 497, /* (418) column_stream_def_list ::= column_stream_def */ - 497, /* (419) column_stream_def_list ::= column_stream_def_list NK_COMMA column_stream_def */ - 498, /* (420) column_stream_def ::= column_name stream_col_options */ - 499, /* (421) stream_col_options ::= */ - 499, /* (422) stream_col_options ::= stream_col_options PRIMARY KEY */ - 494, /* (423) tag_def_or_ref_opt ::= */ - 494, /* (424) tag_def_or_ref_opt ::= tags_def */ - 494, /* (425) tag_def_or_ref_opt ::= TAGS NK_LP column_stream_def_list NK_RP */ - 492, /* (426) stream_options ::= */ - 492, /* (427) stream_options ::= stream_options TRIGGER AT_ONCE */ - 492, /* (428) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ - 492, /* (429) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */ - 492, /* (430) stream_options ::= stream_options WATERMARK duration_literal */ - 492, /* (431) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */ - 492, /* (432) stream_options ::= stream_options FILL_HISTORY NK_INTEGER */ - 492, /* (433) stream_options ::= stream_options DELETE_MARK duration_literal */ - 492, /* (434) stream_options ::= stream_options IGNORE UPDATE NK_INTEGER */ - 495, /* (435) subtable_opt ::= */ - 495, /* (436) subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ - 496, /* (437) ignore_opt ::= */ - 496, /* (438) ignore_opt ::= IGNORE UNTREATED */ - 389, /* (439) cmd ::= KILL CONNECTION NK_INTEGER */ - 389, /* (440) cmd ::= KILL QUERY NK_STRING */ - 389, /* (441) cmd ::= KILL TRANSACTION NK_INTEGER */ - 389, /* (442) cmd ::= KILL COMPACT NK_INTEGER */ - 389, /* (443) cmd ::= BALANCE VGROUP */ - 389, /* (444) cmd ::= BALANCE VGROUP LEADER on_vgroup_id */ - 389, /* (445) cmd ::= BALANCE VGROUP LEADER DATABASE db_name */ - 389, /* (446) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ - 389, /* (447) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ - 389, /* (448) cmd ::= SPLIT VGROUP NK_INTEGER */ - 501, /* (449) on_vgroup_id ::= */ - 501, /* (450) on_vgroup_id ::= ON NK_INTEGER */ - 502, /* (451) dnode_list ::= DNODE NK_INTEGER */ - 502, /* (452) dnode_list ::= dnode_list DNODE NK_INTEGER */ - 389, /* (453) cmd ::= DELETE FROM full_table_name where_clause_opt */ - 389, /* (454) cmd ::= query_or_subquery */ - 389, /* (455) cmd ::= insert_query */ - 484, /* (456) insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ - 484, /* (457) insert_query ::= INSERT INTO full_table_name query_or_subquery */ - 440, /* (458) tags_literal ::= NK_INTEGER */ - 440, /* (459) tags_literal ::= NK_INTEGER NK_PLUS duration_literal */ - 440, /* (460) tags_literal ::= NK_INTEGER NK_MINUS duration_literal */ - 440, /* (461) tags_literal ::= NK_PLUS NK_INTEGER */ - 440, /* (462) tags_literal ::= NK_PLUS NK_INTEGER NK_PLUS duration_literal */ - 440, /* (463) tags_literal ::= NK_PLUS NK_INTEGER NK_MINUS duration_literal */ - 440, /* (464) tags_literal ::= NK_MINUS NK_INTEGER */ - 440, /* (465) tags_literal ::= NK_MINUS NK_INTEGER NK_PLUS duration_literal */ - 440, /* (466) tags_literal ::= NK_MINUS NK_INTEGER NK_MINUS duration_literal */ - 440, /* (467) tags_literal ::= NK_FLOAT */ - 440, /* (468) tags_literal ::= NK_PLUS NK_FLOAT */ - 440, /* (469) tags_literal ::= NK_MINUS NK_FLOAT */ - 440, /* (470) tags_literal ::= NK_BIN */ - 440, /* (471) tags_literal ::= NK_BIN NK_PLUS duration_literal */ - 440, /* (472) tags_literal ::= NK_BIN NK_MINUS duration_literal */ - 440, /* (473) tags_literal ::= NK_PLUS NK_BIN */ - 440, /* (474) tags_literal ::= NK_PLUS NK_BIN NK_PLUS duration_literal */ - 440, /* (475) tags_literal ::= NK_PLUS NK_BIN NK_MINUS duration_literal */ - 440, /* (476) tags_literal ::= NK_MINUS NK_BIN */ - 440, /* (477) tags_literal ::= NK_MINUS NK_BIN NK_PLUS duration_literal */ - 440, /* (478) tags_literal ::= NK_MINUS NK_BIN NK_MINUS duration_literal */ - 440, /* (479) tags_literal ::= NK_HEX */ - 440, /* (480) tags_literal ::= NK_HEX NK_PLUS duration_literal */ - 440, /* (481) tags_literal ::= NK_HEX NK_MINUS duration_literal */ - 440, /* (482) tags_literal ::= NK_PLUS NK_HEX */ - 440, /* (483) tags_literal ::= NK_PLUS NK_HEX NK_PLUS duration_literal */ - 440, /* (484) tags_literal ::= NK_PLUS NK_HEX NK_MINUS duration_literal */ - 440, /* (485) tags_literal ::= NK_MINUS NK_HEX */ - 440, /* (486) tags_literal ::= NK_MINUS NK_HEX NK_PLUS duration_literal */ - 440, /* (487) tags_literal ::= NK_MINUS NK_HEX NK_MINUS duration_literal */ - 440, /* (488) tags_literal ::= NK_STRING */ - 440, /* (489) tags_literal ::= NK_STRING NK_PLUS duration_literal */ - 440, /* (490) tags_literal ::= NK_STRING NK_MINUS duration_literal */ - 440, /* (491) tags_literal ::= NK_BOOL */ - 440, /* (492) tags_literal ::= NULL */ - 440, /* (493) tags_literal ::= literal_func */ - 440, /* (494) tags_literal ::= literal_func NK_PLUS duration_literal */ - 440, /* (495) tags_literal ::= literal_func NK_MINUS duration_literal */ - 443, /* (496) tags_literal_list ::= tags_literal */ - 443, /* (497) tags_literal_list ::= tags_literal_list NK_COMMA tags_literal */ - 392, /* (498) literal ::= NK_INTEGER */ - 392, /* (499) literal ::= NK_FLOAT */ - 392, /* (500) literal ::= NK_STRING */ - 392, /* (501) literal ::= NK_BOOL */ - 392, /* (502) literal ::= TIMESTAMP NK_STRING */ - 392, /* (503) literal ::= duration_literal */ - 392, /* (504) literal ::= NULL */ - 392, /* (505) literal ::= NK_QUESTION */ - 453, /* (506) duration_literal ::= NK_VARIABLE */ - 423, /* (507) signed ::= NK_INTEGER */ - 423, /* (508) signed ::= NK_PLUS NK_INTEGER */ - 423, /* (509) signed ::= NK_MINUS NK_INTEGER */ - 423, /* (510) signed ::= NK_FLOAT */ - 423, /* (511) signed ::= NK_PLUS NK_FLOAT */ - 423, /* (512) signed ::= NK_MINUS NK_FLOAT */ - 504, /* (513) signed_literal ::= signed */ - 504, /* (514) signed_literal ::= NK_STRING */ - 504, /* (515) signed_literal ::= NK_BOOL */ - 504, /* (516) signed_literal ::= TIMESTAMP NK_STRING */ - 504, /* (517) signed_literal ::= duration_literal */ - 504, /* (518) signed_literal ::= NULL */ - 504, /* (519) signed_literal ::= literal_func */ - 504, /* (520) signed_literal ::= NK_QUESTION */ - 505, /* (521) literal_list ::= signed_literal */ - 505, /* (522) literal_list ::= literal_list NK_COMMA signed_literal */ - 406, /* (523) db_name ::= NK_ID */ - 407, /* (524) table_name ::= NK_ID */ - 437, /* (525) column_name ::= NK_ID */ - 455, /* (526) function_name ::= NK_ID */ - 490, /* (527) view_name ::= NK_ID */ - 506, /* (528) table_alias ::= NK_ID */ - 465, /* (529) column_alias ::= NK_ID */ - 465, /* (530) column_alias ::= NK_ALIAS */ - 399, /* (531) user_name ::= NK_ID */ - 408, /* (532) topic_name ::= NK_ID */ - 491, /* (533) stream_name ::= NK_ID */ - 481, /* (534) cgroup_name ::= NK_ID */ - 472, /* (535) index_name ::= NK_ID */ - 466, /* (536) tsma_name ::= NK_ID */ - 507, /* (537) expr_or_subquery ::= expression */ - 500, /* (538) expression ::= literal */ - 500, /* (539) expression ::= pseudo_column */ - 500, /* (540) expression ::= column_reference */ - 500, /* (541) expression ::= function_expression */ - 500, /* (542) expression ::= case_when_expression */ - 500, /* (543) expression ::= NK_LP expression NK_RP */ - 500, /* (544) expression ::= NK_PLUS expr_or_subquery */ - 500, /* (545) expression ::= NK_MINUS expr_or_subquery */ - 500, /* (546) expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ - 500, /* (547) expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ - 500, /* (548) expression ::= expr_or_subquery NK_STAR expr_or_subquery */ - 500, /* (549) expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ - 500, /* (550) expression ::= expr_or_subquery NK_REM expr_or_subquery */ - 500, /* (551) expression ::= column_reference NK_ARROW NK_STRING */ - 500, /* (552) expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ - 500, /* (553) expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ - 477, /* (554) expression_list ::= expr_or_subquery */ - 477, /* (555) expression_list ::= expression_list NK_COMMA expr_or_subquery */ - 509, /* (556) column_reference ::= column_name */ - 509, /* (557) column_reference ::= table_name NK_DOT column_name */ - 509, /* (558) column_reference ::= NK_ALIAS */ - 509, /* (559) column_reference ::= table_name NK_DOT NK_ALIAS */ - 508, /* (560) pseudo_column ::= ROWTS */ - 508, /* (561) pseudo_column ::= TBNAME */ - 508, /* (562) pseudo_column ::= table_name NK_DOT TBNAME */ - 508, /* (563) pseudo_column ::= QSTART */ - 508, /* (564) pseudo_column ::= QEND */ - 508, /* (565) pseudo_column ::= QDURATION */ - 508, /* (566) pseudo_column ::= WSTART */ - 508, /* (567) pseudo_column ::= WEND */ - 508, /* (568) pseudo_column ::= WDURATION */ - 508, /* (569) pseudo_column ::= IROWTS */ - 508, /* (570) pseudo_column ::= ISFILLED */ - 508, /* (571) pseudo_column ::= QTAGS */ - 508, /* (572) pseudo_column ::= FLOW */ - 508, /* (573) pseudo_column ::= FHIGH */ - 508, /* (574) pseudo_column ::= FROWTS */ - 510, /* (575) function_expression ::= function_name NK_LP expression_list NK_RP */ - 510, /* (576) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ - 510, /* (577) function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ - 510, /* (578) function_expression ::= CAST NK_LP expr_or_subquery AS type_name_default_len NK_RP */ - 510, /* (579) function_expression ::= POSITION NK_LP expr_or_subquery IN expr_or_subquery NK_RP */ - 510, /* (580) function_expression ::= TRIM NK_LP expr_or_subquery NK_RP */ - 510, /* (581) function_expression ::= TRIM NK_LP trim_specification_type FROM expr_or_subquery NK_RP */ - 510, /* (582) function_expression ::= TRIM NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ - 510, /* (583) function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP */ - 510, /* (584) function_expression ::= substr_func NK_LP expression_list NK_RP */ - 510, /* (585) function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ - 510, /* (586) function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP */ - 510, /* (587) function_expression ::= REPLACE NK_LP expression_list NK_RP */ - 510, /* (588) function_expression ::= literal_func */ - 510, /* (589) function_expression ::= rand_func */ - 503, /* (590) literal_func ::= noarg_func NK_LP NK_RP */ - 503, /* (591) literal_func ::= NOW */ - 503, /* (592) literal_func ::= TODAY */ - 516, /* (593) rand_func ::= RAND NK_LP NK_RP */ - 516, /* (594) rand_func ::= RAND NK_LP expression_list NK_RP */ - 515, /* (595) substr_func ::= SUBSTR */ - 515, /* (596) substr_func ::= SUBSTRING */ - 514, /* (597) trim_specification_type ::= BOTH */ - 514, /* (598) trim_specification_type ::= TRAILING */ - 514, /* (599) trim_specification_type ::= LEADING */ - 517, /* (600) noarg_func ::= NOW */ - 517, /* (601) noarg_func ::= TODAY */ - 517, /* (602) noarg_func ::= TIMEZONE */ - 517, /* (603) noarg_func ::= DATABASE */ - 517, /* (604) noarg_func ::= CLIENT_VERSION */ - 517, /* (605) noarg_func ::= SERVER_VERSION */ - 517, /* (606) noarg_func ::= SERVER_STATUS */ - 517, /* (607) noarg_func ::= CURRENT_USER */ - 517, /* (608) noarg_func ::= USER */ - 517, /* (609) noarg_func ::= PI */ - 512, /* (610) star_func ::= COUNT */ - 512, /* (611) star_func ::= FIRST */ - 512, /* (612) star_func ::= LAST */ - 512, /* (613) star_func ::= LAST_ROW */ - 513, /* (614) star_func_para_list ::= NK_STAR */ - 513, /* (615) star_func_para_list ::= other_para_list */ - 518, /* (616) other_para_list ::= star_func_para */ - 518, /* (617) other_para_list ::= other_para_list NK_COMMA star_func_para */ - 519, /* (618) star_func_para ::= expr_or_subquery */ - 519, /* (619) star_func_para ::= table_name NK_DOT NK_STAR */ - 511, /* (620) case_when_expression ::= CASE when_then_list case_when_else_opt END */ - 511, /* (621) case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ - 520, /* (622) when_then_list ::= when_then_expr */ - 520, /* (623) when_then_list ::= when_then_list when_then_expr */ - 523, /* (624) when_then_expr ::= WHEN common_expression THEN common_expression */ - 521, /* (625) case_when_else_opt ::= */ - 521, /* (626) case_when_else_opt ::= ELSE common_expression */ - 524, /* (627) predicate ::= expr_or_subquery compare_op expr_or_subquery */ - 524, /* (628) predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ - 524, /* (629) predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ - 524, /* (630) predicate ::= expr_or_subquery IS NULL */ - 524, /* (631) predicate ::= expr_or_subquery IS NOT NULL */ - 524, /* (632) predicate ::= expr_or_subquery in_op in_predicate_value */ - 525, /* (633) compare_op ::= NK_LT */ - 525, /* (634) compare_op ::= NK_GT */ - 525, /* (635) compare_op ::= NK_LE */ - 525, /* (636) compare_op ::= NK_GE */ - 525, /* (637) compare_op ::= NK_NE */ - 525, /* (638) compare_op ::= NK_EQ */ - 525, /* (639) compare_op ::= LIKE */ - 525, /* (640) compare_op ::= NOT LIKE */ - 525, /* (641) compare_op ::= MATCH */ - 525, /* (642) compare_op ::= NMATCH */ - 525, /* (643) compare_op ::= CONTAINS */ - 526, /* (644) in_op ::= IN */ - 526, /* (645) in_op ::= NOT IN */ - 527, /* (646) in_predicate_value ::= NK_LP literal_list NK_RP */ - 528, /* (647) boolean_value_expression ::= boolean_primary */ - 528, /* (648) boolean_value_expression ::= NOT boolean_primary */ - 528, /* (649) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ - 528, /* (650) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ - 529, /* (651) boolean_primary ::= predicate */ - 529, /* (652) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ - 522, /* (653) common_expression ::= expr_or_subquery */ - 522, /* (654) common_expression ::= boolean_value_expression */ - 530, /* (655) from_clause_opt ::= */ - 530, /* (656) from_clause_opt ::= FROM table_reference_list */ - 531, /* (657) table_reference_list ::= table_reference */ - 531, /* (658) table_reference_list ::= table_reference_list NK_COMMA table_reference */ - 532, /* (659) table_reference ::= table_primary */ - 532, /* (660) table_reference ::= joined_table */ - 533, /* (661) table_primary ::= table_name alias_opt */ - 533, /* (662) table_primary ::= db_name NK_DOT table_name alias_opt */ - 533, /* (663) table_primary ::= subquery alias_opt */ - 533, /* (664) table_primary ::= parenthesized_joined_table */ - 535, /* (665) alias_opt ::= */ - 535, /* (666) alias_opt ::= table_alias */ - 535, /* (667) alias_opt ::= AS table_alias */ - 537, /* (668) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - 537, /* (669) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ - 534, /* (670) joined_table ::= table_reference join_type join_subtype JOIN table_reference join_on_clause_opt window_offset_clause_opt jlimit_clause_opt */ - 538, /* (671) join_type ::= */ - 538, /* (672) join_type ::= INNER */ - 538, /* (673) join_type ::= LEFT */ - 538, /* (674) join_type ::= RIGHT */ - 538, /* (675) join_type ::= FULL */ - 539, /* (676) join_subtype ::= */ - 539, /* (677) join_subtype ::= OUTER */ - 539, /* (678) join_subtype ::= SEMI */ - 539, /* (679) join_subtype ::= ANTI */ - 539, /* (680) join_subtype ::= ASOF */ - 539, /* (681) join_subtype ::= WINDOW */ - 540, /* (682) join_on_clause_opt ::= */ - 540, /* (683) join_on_clause_opt ::= ON search_condition */ - 541, /* (684) window_offset_clause_opt ::= */ - 541, /* (685) window_offset_clause_opt ::= WINDOW_OFFSET NK_LP window_offset_literal NK_COMMA window_offset_literal NK_RP */ - 543, /* (686) window_offset_literal ::= NK_VARIABLE */ - 543, /* (687) window_offset_literal ::= NK_MINUS NK_VARIABLE */ - 542, /* (688) jlimit_clause_opt ::= */ - 542, /* (689) jlimit_clause_opt ::= JLIMIT NK_INTEGER */ - 544, /* (690) query_specification ::= SELECT hint_list set_quantifier_opt tag_mode_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ - 545, /* (691) hint_list ::= */ - 545, /* (692) hint_list ::= NK_HINT */ - 547, /* (693) tag_mode_opt ::= */ - 547, /* (694) tag_mode_opt ::= TAGS */ - 546, /* (695) set_quantifier_opt ::= */ - 546, /* (696) set_quantifier_opt ::= DISTINCT */ - 546, /* (697) set_quantifier_opt ::= ALL */ - 548, /* (698) select_list ::= select_item */ - 548, /* (699) select_list ::= select_list NK_COMMA select_item */ - 556, /* (700) select_item ::= NK_STAR */ - 556, /* (701) select_item ::= common_expression */ - 556, /* (702) select_item ::= common_expression column_alias */ - 556, /* (703) select_item ::= common_expression AS column_alias */ - 556, /* (704) select_item ::= table_name NK_DOT NK_STAR */ - 480, /* (705) where_clause_opt ::= */ - 480, /* (706) where_clause_opt ::= WHERE search_condition */ - 549, /* (707) partition_by_clause_opt ::= */ - 549, /* (708) partition_by_clause_opt ::= PARTITION BY partition_list */ - 557, /* (709) partition_list ::= partition_item */ - 557, /* (710) partition_list ::= partition_list NK_COMMA partition_item */ - 558, /* (711) partition_item ::= expr_or_subquery */ - 558, /* (712) partition_item ::= expr_or_subquery column_alias */ - 558, /* (713) partition_item ::= expr_or_subquery AS column_alias */ - 553, /* (714) twindow_clause_opt ::= */ - 553, /* (715) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA interval_sliding_duration_literal NK_RP */ - 553, /* (716) twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ - 553, /* (717) twindow_clause_opt ::= INTERVAL NK_LP interval_sliding_duration_literal NK_RP sliding_opt fill_opt */ - 553, /* (718) twindow_clause_opt ::= INTERVAL NK_LP interval_sliding_duration_literal NK_COMMA interval_sliding_duration_literal NK_RP sliding_opt fill_opt */ - 553, /* (719) twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition */ - 553, /* (720) twindow_clause_opt ::= COUNT_WINDOW NK_LP NK_INTEGER NK_RP */ - 553, /* (721) twindow_clause_opt ::= COUNT_WINDOW NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ - 553, /* (722) twindow_clause_opt ::= ANOMALY_WINDOW NK_LP expr_or_subquery NK_RP */ - 553, /* (723) twindow_clause_opt ::= ANOMALY_WINDOW NK_LP expr_or_subquery NK_COMMA NK_STRING NK_RP */ - 473, /* (724) sliding_opt ::= */ - 473, /* (725) sliding_opt ::= SLIDING NK_LP interval_sliding_duration_literal NK_RP */ - 559, /* (726) interval_sliding_duration_literal ::= NK_VARIABLE */ - 559, /* (727) interval_sliding_duration_literal ::= NK_STRING */ - 559, /* (728) interval_sliding_duration_literal ::= NK_INTEGER */ - 552, /* (729) fill_opt ::= */ - 552, /* (730) fill_opt ::= FILL NK_LP fill_mode NK_RP */ - 552, /* (731) fill_opt ::= FILL NK_LP VALUE NK_COMMA expression_list NK_RP */ - 552, /* (732) fill_opt ::= FILL NK_LP VALUE_F NK_COMMA expression_list NK_RP */ - 560, /* (733) fill_mode ::= NONE */ - 560, /* (734) fill_mode ::= PREV */ - 560, /* (735) fill_mode ::= NULL */ - 560, /* (736) fill_mode ::= NULL_F */ - 560, /* (737) fill_mode ::= LINEAR */ - 560, /* (738) fill_mode ::= NEXT */ - 554, /* (739) group_by_clause_opt ::= */ - 554, /* (740) group_by_clause_opt ::= GROUP BY group_by_list */ - 561, /* (741) group_by_list ::= expr_or_subquery */ - 561, /* (742) group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ - 555, /* (743) having_clause_opt ::= */ - 555, /* (744) having_clause_opt ::= HAVING search_condition */ - 550, /* (745) range_opt ::= */ - 550, /* (746) range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ - 550, /* (747) range_opt ::= RANGE NK_LP expr_or_subquery NK_RP */ - 551, /* (748) every_opt ::= */ - 551, /* (749) every_opt ::= EVERY NK_LP duration_literal NK_RP */ - 562, /* (750) query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ - 563, /* (751) query_simple ::= query_specification */ - 563, /* (752) query_simple ::= union_query_expression */ - 567, /* (753) union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ - 567, /* (754) union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ - 568, /* (755) query_simple_or_subquery ::= query_simple */ - 568, /* (756) query_simple_or_subquery ::= subquery */ - 479, /* (757) query_or_subquery ::= query_expression */ - 479, /* (758) query_or_subquery ::= subquery */ - 564, /* (759) order_by_clause_opt ::= */ - 564, /* (760) order_by_clause_opt ::= ORDER BY sort_specification_list */ - 565, /* (761) slimit_clause_opt ::= */ - 565, /* (762) slimit_clause_opt ::= SLIMIT NK_INTEGER */ - 565, /* (763) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - 565, /* (764) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - 566, /* (765) limit_clause_opt ::= */ - 566, /* (766) limit_clause_opt ::= LIMIT NK_INTEGER */ - 566, /* (767) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ - 566, /* (768) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - 536, /* (769) subquery ::= NK_LP query_expression NK_RP */ - 536, /* (770) subquery ::= NK_LP subquery NK_RP */ - 409, /* (771) search_condition ::= common_expression */ - 569, /* (772) sort_specification_list ::= sort_specification */ - 569, /* (773) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ - 570, /* (774) sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ - 571, /* (775) ordering_specification_opt ::= */ - 571, /* (776) ordering_specification_opt ::= ASC */ - 571, /* (777) ordering_specification_opt ::= DESC */ - 572, /* (778) null_ordering_opt ::= */ - 572, /* (779) null_ordering_opt ::= NULLS FIRST */ - 572, /* (780) null_ordering_opt ::= NULLS LAST */ - 439, /* (781) column_options ::= */ - 439, /* (782) column_options ::= column_options PRIMARY KEY */ - 439, /* (783) column_options ::= column_options NK_ID NK_STRING */ -}; - -/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number -** of symbols on the right-hand side of that rule. */ -static const signed char yyRuleInfoNRhs[] = { - -6, /* (0) cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */ - -4, /* (1) cmd ::= ALTER ACCOUNT NK_ID alter_account_options */ - 0, /* (2) account_options ::= */ - -3, /* (3) account_options ::= account_options PPS literal */ - -3, /* (4) account_options ::= account_options TSERIES literal */ - -3, /* (5) account_options ::= account_options STORAGE literal */ - -3, /* (6) account_options ::= account_options STREAMS literal */ - -3, /* (7) account_options ::= account_options QTIME literal */ - -3, /* (8) account_options ::= account_options DBS literal */ - -3, /* (9) account_options ::= account_options USERS literal */ - -3, /* (10) account_options ::= account_options CONNS literal */ - -3, /* (11) account_options ::= account_options STATE literal */ - -1, /* (12) alter_account_options ::= alter_account_option */ - -2, /* (13) alter_account_options ::= alter_account_options alter_account_option */ - -2, /* (14) alter_account_option ::= PASS literal */ - -2, /* (15) alter_account_option ::= PPS literal */ - -2, /* (16) alter_account_option ::= TSERIES literal */ - -2, /* (17) alter_account_option ::= STORAGE literal */ - -2, /* (18) alter_account_option ::= STREAMS literal */ - -2, /* (19) alter_account_option ::= QTIME literal */ - -2, /* (20) alter_account_option ::= DBS literal */ - -2, /* (21) alter_account_option ::= USERS literal */ - -2, /* (22) alter_account_option ::= CONNS literal */ - -2, /* (23) alter_account_option ::= STATE literal */ - -1, /* (24) ip_range_list ::= NK_STRING */ - -3, /* (25) ip_range_list ::= ip_range_list NK_COMMA NK_STRING */ - -2, /* (26) white_list ::= HOST ip_range_list */ - 0, /* (27) white_list_opt ::= */ - -1, /* (28) white_list_opt ::= white_list */ - 0, /* (29) is_import_opt ::= */ - -2, /* (30) is_import_opt ::= IS_IMPORT NK_INTEGER */ - 0, /* (31) is_createdb_opt ::= */ - -2, /* (32) is_createdb_opt ::= CREATEDB NK_INTEGER */ - -9, /* (33) cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt is_createdb_opt is_import_opt white_list_opt */ - -5, /* (34) cmd ::= ALTER USER user_name PASS NK_STRING */ - -5, /* (35) cmd ::= ALTER USER user_name ENABLE NK_INTEGER */ - -5, /* (36) cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */ - -5, /* (37) cmd ::= ALTER USER user_name CREATEDB NK_INTEGER */ - -5, /* (38) cmd ::= ALTER USER user_name ADD white_list */ - -5, /* (39) cmd ::= ALTER USER user_name DROP white_list */ - -3, /* (40) cmd ::= DROP USER user_name */ - 0, /* (41) sysinfo_opt ::= */ - -2, /* (42) sysinfo_opt ::= SYSINFO NK_INTEGER */ - -7, /* (43) cmd ::= GRANT privileges ON priv_level with_clause_opt TO user_name */ - -7, /* (44) cmd ::= REVOKE privileges ON priv_level with_clause_opt FROM user_name */ - -1, /* (45) privileges ::= ALL */ - -1, /* (46) privileges ::= priv_type_list */ - -1, /* (47) privileges ::= SUBSCRIBE */ - -1, /* (48) priv_type_list ::= priv_type */ - -3, /* (49) priv_type_list ::= priv_type_list NK_COMMA priv_type */ - -1, /* (50) priv_type ::= READ */ - -1, /* (51) priv_type ::= WRITE */ - -1, /* (52) priv_type ::= ALTER */ - -3, /* (53) priv_level ::= NK_STAR NK_DOT NK_STAR */ - -3, /* (54) priv_level ::= db_name NK_DOT NK_STAR */ - -3, /* (55) priv_level ::= db_name NK_DOT table_name */ - -1, /* (56) priv_level ::= topic_name */ - 0, /* (57) with_clause_opt ::= */ - -2, /* (58) with_clause_opt ::= WITH search_condition */ - -3, /* (59) cmd ::= CREATE ENCRYPT_KEY NK_STRING */ - -3, /* (60) cmd ::= CREATE ANODE NK_STRING */ - -3, /* (61) cmd ::= UPDATE ANODE NK_INTEGER */ - -3, /* (62) cmd ::= UPDATE ALL ANODES */ - -3, /* (63) cmd ::= DROP ANODE NK_INTEGER */ - -3, /* (64) cmd ::= CREATE DNODE dnode_endpoint */ - -5, /* (65) cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */ - -4, /* (66) cmd ::= DROP DNODE NK_INTEGER force_opt */ - -4, /* (67) cmd ::= DROP DNODE dnode_endpoint force_opt */ - -4, /* (68) cmd ::= DROP DNODE NK_INTEGER unsafe_opt */ - -4, /* (69) cmd ::= DROP DNODE dnode_endpoint unsafe_opt */ - -4, /* (70) cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ - -5, /* (71) cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING */ - -4, /* (72) cmd ::= ALTER ALL DNODES NK_STRING */ - -5, /* (73) cmd ::= ALTER ALL DNODES NK_STRING NK_STRING */ - -3, /* (74) cmd ::= RESTORE DNODE NK_INTEGER */ - -1, /* (75) dnode_endpoint ::= NK_STRING */ - -1, /* (76) dnode_endpoint ::= NK_ID */ - -1, /* (77) dnode_endpoint ::= NK_IPTOKEN */ - 0, /* (78) force_opt ::= */ - -1, /* (79) force_opt ::= FORCE */ - -1, /* (80) unsafe_opt ::= UNSAFE */ - -3, /* (81) cmd ::= ALTER CLUSTER NK_STRING */ - -4, /* (82) cmd ::= ALTER CLUSTER NK_STRING NK_STRING */ - -3, /* (83) cmd ::= ALTER LOCAL NK_STRING */ - -4, /* (84) cmd ::= ALTER LOCAL NK_STRING NK_STRING */ - -5, /* (85) cmd ::= CREATE QNODE ON DNODE NK_INTEGER */ - -5, /* (86) cmd ::= DROP QNODE ON DNODE NK_INTEGER */ - -5, /* (87) cmd ::= RESTORE QNODE ON DNODE NK_INTEGER */ - -5, /* (88) cmd ::= CREATE BNODE ON DNODE NK_INTEGER */ - -5, /* (89) cmd ::= DROP BNODE ON DNODE NK_INTEGER */ - -5, /* (90) cmd ::= CREATE SNODE ON DNODE NK_INTEGER */ - -5, /* (91) cmd ::= DROP SNODE ON DNODE NK_INTEGER */ - -5, /* (92) cmd ::= CREATE MNODE ON DNODE NK_INTEGER */ - -5, /* (93) cmd ::= DROP MNODE ON DNODE NK_INTEGER */ - -5, /* (94) cmd ::= RESTORE MNODE ON DNODE NK_INTEGER */ - -5, /* (95) cmd ::= RESTORE VNODE ON DNODE NK_INTEGER */ - -5, /* (96) cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ - -4, /* (97) cmd ::= DROP DATABASE exists_opt db_name */ - -2, /* (98) cmd ::= USE db_name */ - -4, /* (99) cmd ::= ALTER DATABASE db_name alter_db_options */ - -3, /* (100) cmd ::= FLUSH DATABASE db_name */ - -4, /* (101) cmd ::= TRIM DATABASE db_name speed_opt */ - -3, /* (102) cmd ::= S3MIGRATE DATABASE db_name */ - -5, /* (103) cmd ::= COMPACT DATABASE db_name start_opt end_opt */ - -3, /* (104) not_exists_opt ::= IF NOT EXISTS */ - 0, /* (105) not_exists_opt ::= */ - -2, /* (106) exists_opt ::= IF EXISTS */ - 0, /* (107) exists_opt ::= */ - 0, /* (108) db_options ::= */ - -3, /* (109) db_options ::= db_options BUFFER NK_INTEGER */ - -3, /* (110) db_options ::= db_options CACHEMODEL NK_STRING */ - -3, /* (111) db_options ::= db_options CACHESIZE NK_INTEGER */ - -3, /* (112) db_options ::= db_options COMP NK_INTEGER */ - -3, /* (113) db_options ::= db_options DURATION NK_INTEGER */ - -3, /* (114) db_options ::= db_options DURATION NK_VARIABLE */ - -3, /* (115) db_options ::= db_options MAXROWS NK_INTEGER */ - -3, /* (116) db_options ::= db_options MINROWS NK_INTEGER */ - -3, /* (117) db_options ::= db_options KEEP integer_list */ - -3, /* (118) db_options ::= db_options KEEP variable_list */ - -3, /* (119) db_options ::= db_options PAGES NK_INTEGER */ - -3, /* (120) db_options ::= db_options PAGESIZE NK_INTEGER */ - -3, /* (121) db_options ::= db_options TSDB_PAGESIZE NK_INTEGER */ - -3, /* (122) db_options ::= db_options PRECISION NK_STRING */ - -3, /* (123) db_options ::= db_options REPLICA NK_INTEGER */ - -3, /* (124) db_options ::= db_options VGROUPS NK_INTEGER */ - -3, /* (125) db_options ::= db_options SINGLE_STABLE NK_INTEGER */ - -3, /* (126) db_options ::= db_options RETENTIONS retention_list */ - -3, /* (127) db_options ::= db_options SCHEMALESS NK_INTEGER */ - -3, /* (128) db_options ::= db_options WAL_LEVEL NK_INTEGER */ - -3, /* (129) db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */ - -3, /* (130) db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */ - -4, /* (131) db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */ - -3, /* (132) db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */ - -4, /* (133) db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */ - -3, /* (134) db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */ - -3, /* (135) db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */ - -3, /* (136) db_options ::= db_options STT_TRIGGER NK_INTEGER */ - -3, /* (137) db_options ::= db_options TABLE_PREFIX signed */ - -3, /* (138) db_options ::= db_options TABLE_SUFFIX signed */ - -3, /* (139) db_options ::= db_options S3_CHUNKSIZE NK_INTEGER */ - -3, /* (140) db_options ::= db_options S3_KEEPLOCAL NK_INTEGER */ - -3, /* (141) db_options ::= db_options S3_KEEPLOCAL NK_VARIABLE */ - -3, /* (142) db_options ::= db_options S3_COMPACT NK_INTEGER */ - -3, /* (143) db_options ::= db_options KEEP_TIME_OFFSET NK_INTEGER */ - -3, /* (144) db_options ::= db_options ENCRYPT_ALGORITHM NK_STRING */ - -1, /* (145) alter_db_options ::= alter_db_option */ - -2, /* (146) alter_db_options ::= alter_db_options alter_db_option */ - -2, /* (147) alter_db_option ::= BUFFER NK_INTEGER */ - -2, /* (148) alter_db_option ::= CACHEMODEL NK_STRING */ - -2, /* (149) alter_db_option ::= CACHESIZE NK_INTEGER */ - -2, /* (150) alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */ - -2, /* (151) alter_db_option ::= KEEP integer_list */ - -2, /* (152) alter_db_option ::= KEEP variable_list */ - -2, /* (153) alter_db_option ::= PAGES NK_INTEGER */ - -2, /* (154) alter_db_option ::= REPLICA NK_INTEGER */ - -2, /* (155) alter_db_option ::= WAL_LEVEL NK_INTEGER */ - -2, /* (156) alter_db_option ::= STT_TRIGGER NK_INTEGER */ - -2, /* (157) alter_db_option ::= MINROWS NK_INTEGER */ - -2, /* (158) alter_db_option ::= WAL_RETENTION_PERIOD NK_INTEGER */ - -3, /* (159) alter_db_option ::= WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */ - -2, /* (160) alter_db_option ::= WAL_RETENTION_SIZE NK_INTEGER */ - -3, /* (161) alter_db_option ::= WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */ - -2, /* (162) alter_db_option ::= S3_KEEPLOCAL NK_INTEGER */ - -2, /* (163) alter_db_option ::= S3_KEEPLOCAL NK_VARIABLE */ - -2, /* (164) alter_db_option ::= S3_COMPACT NK_INTEGER */ - -2, /* (165) alter_db_option ::= KEEP_TIME_OFFSET NK_INTEGER */ - -2, /* (166) alter_db_option ::= ENCRYPT_ALGORITHM NK_STRING */ - -1, /* (167) integer_list ::= NK_INTEGER */ - -3, /* (168) integer_list ::= integer_list NK_COMMA NK_INTEGER */ - -1, /* (169) variable_list ::= NK_VARIABLE */ - -3, /* (170) variable_list ::= variable_list NK_COMMA NK_VARIABLE */ - -1, /* (171) retention_list ::= retention */ - -3, /* (172) retention_list ::= retention_list NK_COMMA retention */ - -3, /* (173) retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ - -3, /* (174) retention ::= NK_MINUS NK_COLON NK_VARIABLE */ - 0, /* (175) speed_opt ::= */ - -2, /* (176) speed_opt ::= BWLIMIT NK_INTEGER */ - 0, /* (177) start_opt ::= */ - -3, /* (178) start_opt ::= START WITH NK_INTEGER */ - -3, /* (179) start_opt ::= START WITH NK_STRING */ - -4, /* (180) start_opt ::= START WITH TIMESTAMP NK_STRING */ - 0, /* (181) end_opt ::= */ - -3, /* (182) end_opt ::= END WITH NK_INTEGER */ - -3, /* (183) end_opt ::= END WITH NK_STRING */ - -4, /* (184) end_opt ::= END WITH TIMESTAMP NK_STRING */ - -9, /* (185) cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ - -3, /* (186) cmd ::= CREATE TABLE multi_create_clause */ - -10, /* (187) cmd ::= CREATE TABLE not_exists_opt USING full_table_name NK_LP tag_list_opt NK_RP FILE NK_STRING */ - -9, /* (188) cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ - -4, /* (189) cmd ::= DROP TABLE with_opt multi_drop_clause */ - -5, /* (190) cmd ::= DROP STABLE with_opt exists_opt full_table_name */ - -3, /* (191) cmd ::= ALTER TABLE alter_table_clause */ - -3, /* (192) cmd ::= ALTER STABLE alter_table_clause */ - -2, /* (193) alter_table_clause ::= full_table_name alter_table_options */ - -6, /* (194) alter_table_clause ::= full_table_name ADD COLUMN column_name type_name column_options */ - -4, /* (195) alter_table_clause ::= full_table_name DROP COLUMN column_name */ - -5, /* (196) alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ - -5, /* (197) alter_table_clause ::= full_table_name MODIFY COLUMN column_name column_options */ - -5, /* (198) alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ - -5, /* (199) alter_table_clause ::= full_table_name ADD TAG column_name type_name */ - -4, /* (200) alter_table_clause ::= full_table_name DROP TAG column_name */ - -5, /* (201) alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ - -5, /* (202) alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ - -6, /* (203) alter_table_clause ::= full_table_name SET TAG column_name NK_EQ tags_literal */ - -1, /* (204) multi_create_clause ::= create_subtable_clause */ - -2, /* (205) multi_create_clause ::= multi_create_clause create_subtable_clause */ - -10, /* (206) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP tags_literal_list NK_RP table_options */ - -1, /* (207) multi_drop_clause ::= drop_table_clause */ - -3, /* (208) multi_drop_clause ::= multi_drop_clause NK_COMMA drop_table_clause */ - -2, /* (209) drop_table_clause ::= exists_opt full_table_name */ - 0, /* (210) with_opt ::= */ - -1, /* (211) with_opt ::= WITH */ - 0, /* (212) specific_cols_opt ::= */ - -3, /* (213) specific_cols_opt ::= NK_LP col_name_list NK_RP */ - -1, /* (214) full_table_name ::= table_name */ - -3, /* (215) full_table_name ::= db_name NK_DOT table_name */ - -1, /* (216) tag_def_list ::= tag_def */ - -3, /* (217) tag_def_list ::= tag_def_list NK_COMMA tag_def */ - -2, /* (218) tag_def ::= column_name type_name */ - -1, /* (219) column_def_list ::= column_def */ - -3, /* (220) column_def_list ::= column_def_list NK_COMMA column_def */ - -3, /* (221) column_def ::= column_name type_name column_options */ - -1, /* (222) type_name ::= BOOL */ - -1, /* (223) type_name ::= TINYINT */ - -1, /* (224) type_name ::= SMALLINT */ - -1, /* (225) type_name ::= INT */ - -1, /* (226) type_name ::= INTEGER */ - -1, /* (227) type_name ::= BIGINT */ - -1, /* (228) type_name ::= FLOAT */ - -1, /* (229) type_name ::= DOUBLE */ - -4, /* (230) type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ - -1, /* (231) type_name ::= TIMESTAMP */ - -4, /* (232) type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ - -2, /* (233) type_name ::= TINYINT UNSIGNED */ - -2, /* (234) type_name ::= SMALLINT UNSIGNED */ - -2, /* (235) type_name ::= INT UNSIGNED */ - -2, /* (236) type_name ::= BIGINT UNSIGNED */ - -1, /* (237) type_name ::= JSON */ - -4, /* (238) type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ - -1, /* (239) type_name ::= MEDIUMBLOB */ - -1, /* (240) type_name ::= BLOB */ - -4, /* (241) type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ - -4, /* (242) type_name ::= GEOMETRY NK_LP NK_INTEGER NK_RP */ - -1, /* (243) type_name ::= DECIMAL */ - -4, /* (244) type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ - -6, /* (245) type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ - -1, /* (246) type_name_default_len ::= BINARY */ - -1, /* (247) type_name_default_len ::= NCHAR */ - -1, /* (248) type_name_default_len ::= VARCHAR */ - -1, /* (249) type_name_default_len ::= VARBINARY */ - 0, /* (250) tags_def_opt ::= */ - -1, /* (251) tags_def_opt ::= tags_def */ - -4, /* (252) tags_def ::= TAGS NK_LP tag_def_list NK_RP */ - 0, /* (253) table_options ::= */ - -3, /* (254) table_options ::= table_options COMMENT NK_STRING */ - -3, /* (255) table_options ::= table_options MAX_DELAY duration_list */ - -3, /* (256) table_options ::= table_options WATERMARK duration_list */ - -5, /* (257) table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */ - -3, /* (258) table_options ::= table_options TTL NK_INTEGER */ - -5, /* (259) table_options ::= table_options SMA NK_LP col_name_list NK_RP */ - -3, /* (260) table_options ::= table_options DELETE_MARK duration_list */ - -1, /* (261) alter_table_options ::= alter_table_option */ - -2, /* (262) alter_table_options ::= alter_table_options alter_table_option */ - -2, /* (263) alter_table_option ::= COMMENT NK_STRING */ - -2, /* (264) alter_table_option ::= TTL NK_INTEGER */ - -1, /* (265) duration_list ::= duration_literal */ - -3, /* (266) duration_list ::= duration_list NK_COMMA duration_literal */ - -1, /* (267) rollup_func_list ::= rollup_func_name */ - -3, /* (268) rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */ - -1, /* (269) rollup_func_name ::= function_name */ - -1, /* (270) rollup_func_name ::= FIRST */ - -1, /* (271) rollup_func_name ::= LAST */ - -1, /* (272) col_name_list ::= col_name */ - -3, /* (273) col_name_list ::= col_name_list NK_COMMA col_name */ - -1, /* (274) col_name ::= column_name */ - -2, /* (275) cmd ::= SHOW DNODES */ - -2, /* (276) cmd ::= SHOW USERS */ - -3, /* (277) cmd ::= SHOW USERS FULL */ - -3, /* (278) cmd ::= SHOW USER PRIVILEGES */ - -3, /* (279) cmd ::= SHOW db_kind_opt DATABASES */ - -4, /* (280) cmd ::= SHOW table_kind_db_name_cond_opt TABLES like_pattern_opt */ - -4, /* (281) cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ - -3, /* (282) cmd ::= SHOW db_name_cond_opt VGROUPS */ - -2, /* (283) cmd ::= SHOW MNODES */ - -2, /* (284) cmd ::= SHOW QNODES */ - -2, /* (285) cmd ::= SHOW ANODES */ - -3, /* (286) cmd ::= SHOW ANODES FULL */ - -2, /* (287) cmd ::= SHOW ARBGROUPS */ - -2, /* (288) cmd ::= SHOW FUNCTIONS */ - -5, /* (289) cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ - -6, /* (290) cmd ::= SHOW INDEXES FROM db_name NK_DOT table_name */ - -2, /* (291) cmd ::= SHOW STREAMS */ - -2, /* (292) cmd ::= SHOW ACCOUNTS */ - -2, /* (293) cmd ::= SHOW APPS */ - -2, /* (294) cmd ::= SHOW CONNECTIONS */ - -2, /* (295) cmd ::= SHOW LICENCES */ - -2, /* (296) cmd ::= SHOW GRANTS */ - -3, /* (297) cmd ::= SHOW GRANTS FULL */ - -3, /* (298) cmd ::= SHOW GRANTS LOGS */ - -3, /* (299) cmd ::= SHOW CLUSTER MACHINES */ - -4, /* (300) cmd ::= SHOW CREATE DATABASE db_name */ - -4, /* (301) cmd ::= SHOW CREATE TABLE full_table_name */ - -4, /* (302) cmd ::= SHOW CREATE STABLE full_table_name */ - -2, /* (303) cmd ::= SHOW ENCRYPTIONS */ - -2, /* (304) cmd ::= SHOW QUERIES */ - -2, /* (305) cmd ::= SHOW SCORES */ - -2, /* (306) cmd ::= SHOW TOPICS */ - -2, /* (307) cmd ::= SHOW VARIABLES */ - -3, /* (308) cmd ::= SHOW CLUSTER VARIABLES */ - -3, /* (309) cmd ::= SHOW LOCAL VARIABLES */ - -5, /* (310) cmd ::= SHOW DNODE NK_INTEGER VARIABLES like_pattern_opt */ - -2, /* (311) cmd ::= SHOW BNODES */ - -2, /* (312) cmd ::= SHOW SNODES */ - -2, /* (313) cmd ::= SHOW CLUSTER */ - -2, /* (314) cmd ::= SHOW TRANSACTIONS */ - -4, /* (315) cmd ::= SHOW TABLE DISTRIBUTED full_table_name */ - -2, /* (316) cmd ::= SHOW CONSUMERS */ - -2, /* (317) cmd ::= SHOW SUBSCRIPTIONS */ - -5, /* (318) cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */ - -6, /* (319) cmd ::= SHOW TAGS FROM db_name NK_DOT table_name */ - -7, /* (320) cmd ::= SHOW TABLE TAGS tag_list_opt FROM table_name_cond from_db_opt */ - -8, /* (321) cmd ::= SHOW TABLE TAGS tag_list_opt FROM db_name NK_DOT table_name */ - -5, /* (322) cmd ::= SHOW VNODES ON DNODE NK_INTEGER */ - -2, /* (323) cmd ::= SHOW VNODES */ - -3, /* (324) cmd ::= SHOW db_name_cond_opt ALIVE */ - -3, /* (325) cmd ::= SHOW CLUSTER ALIVE */ - -4, /* (326) cmd ::= SHOW db_name_cond_opt VIEWS like_pattern_opt */ - -4, /* (327) cmd ::= SHOW CREATE VIEW full_table_name */ - -2, /* (328) cmd ::= SHOW COMPACTS */ - -3, /* (329) cmd ::= SHOW COMPACT NK_INTEGER */ - 0, /* (330) table_kind_db_name_cond_opt ::= */ - -1, /* (331) table_kind_db_name_cond_opt ::= table_kind */ - -2, /* (332) table_kind_db_name_cond_opt ::= db_name NK_DOT */ - -3, /* (333) table_kind_db_name_cond_opt ::= table_kind db_name NK_DOT */ - -1, /* (334) table_kind ::= NORMAL */ - -1, /* (335) table_kind ::= CHILD */ - 0, /* (336) db_name_cond_opt ::= */ - -2, /* (337) db_name_cond_opt ::= db_name NK_DOT */ - 0, /* (338) like_pattern_opt ::= */ - -2, /* (339) like_pattern_opt ::= LIKE NK_STRING */ - -1, /* (340) table_name_cond ::= table_name */ - 0, /* (341) from_db_opt ::= */ - -2, /* (342) from_db_opt ::= FROM db_name */ - 0, /* (343) tag_list_opt ::= */ - -1, /* (344) tag_list_opt ::= tag_item */ - -3, /* (345) tag_list_opt ::= tag_list_opt NK_COMMA tag_item */ - -1, /* (346) tag_item ::= TBNAME */ - -1, /* (347) tag_item ::= QTAGS */ - -1, /* (348) tag_item ::= column_name */ - -2, /* (349) tag_item ::= column_name column_alias */ - -3, /* (350) tag_item ::= column_name AS column_alias */ - 0, /* (351) db_kind_opt ::= */ - -1, /* (352) db_kind_opt ::= USER */ - -1, /* (353) db_kind_opt ::= SYSTEM */ - -11, /* (354) cmd ::= CREATE TSMA not_exists_opt tsma_name ON full_table_name tsma_func_list INTERVAL NK_LP duration_literal NK_RP */ - -11, /* (355) cmd ::= CREATE RECURSIVE TSMA not_exists_opt tsma_name ON full_table_name INTERVAL NK_LP duration_literal NK_RP */ - -4, /* (356) cmd ::= DROP TSMA exists_opt full_tsma_name */ - -3, /* (357) cmd ::= SHOW db_name_cond_opt TSMAS */ - -1, /* (358) full_tsma_name ::= tsma_name */ - -3, /* (359) full_tsma_name ::= db_name NK_DOT tsma_name */ - -4, /* (360) tsma_func_list ::= FUNCTION NK_LP func_list NK_RP */ - -8, /* (361) cmd ::= CREATE SMA INDEX not_exists_opt col_name ON full_table_name index_options */ - -9, /* (362) cmd ::= CREATE INDEX not_exists_opt col_name ON full_table_name NK_LP col_name_list NK_RP */ - -4, /* (363) cmd ::= DROP INDEX exists_opt full_index_name */ - -1, /* (364) full_index_name ::= index_name */ - -3, /* (365) full_index_name ::= db_name NK_DOT index_name */ - -10, /* (366) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */ - -12, /* (367) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */ - -1, /* (368) func_list ::= func */ - -3, /* (369) func_list ::= func_list NK_COMMA func */ - -4, /* (370) func ::= sma_func_name NK_LP expression_list NK_RP */ - -1, /* (371) sma_func_name ::= function_name */ - -1, /* (372) sma_func_name ::= COUNT */ - -1, /* (373) sma_func_name ::= FIRST */ - -1, /* (374) sma_func_name ::= LAST */ - -1, /* (375) sma_func_name ::= LAST_ROW */ - 0, /* (376) sma_stream_opt ::= */ - -3, /* (377) sma_stream_opt ::= sma_stream_opt WATERMARK duration_literal */ - -3, /* (378) sma_stream_opt ::= sma_stream_opt MAX_DELAY duration_literal */ - -3, /* (379) sma_stream_opt ::= sma_stream_opt DELETE_MARK duration_literal */ - -1, /* (380) with_meta ::= AS */ - -3, /* (381) with_meta ::= WITH META AS */ - -3, /* (382) with_meta ::= ONLY META AS */ - -6, /* (383) cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery */ - -7, /* (384) cmd ::= CREATE TOPIC not_exists_opt topic_name with_meta DATABASE db_name */ - -8, /* (385) cmd ::= CREATE TOPIC not_exists_opt topic_name with_meta STABLE full_table_name where_clause_opt */ - -4, /* (386) cmd ::= DROP TOPIC exists_opt topic_name */ - -7, /* (387) cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */ - -2, /* (388) cmd ::= DESC full_table_name */ - -2, /* (389) cmd ::= DESCRIBE full_table_name */ - -3, /* (390) cmd ::= RESET QUERY CACHE */ - -4, /* (391) cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery */ - -4, /* (392) cmd ::= EXPLAIN analyze_opt explain_options insert_query */ - 0, /* (393) analyze_opt ::= */ - -1, /* (394) analyze_opt ::= ANALYZE */ - 0, /* (395) explain_options ::= */ - -3, /* (396) explain_options ::= explain_options VERBOSE NK_BOOL */ - -3, /* (397) explain_options ::= explain_options RATIO NK_FLOAT */ - -12, /* (398) cmd ::= CREATE or_replace_opt agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt language_opt */ - -4, /* (399) cmd ::= DROP FUNCTION exists_opt function_name */ - 0, /* (400) agg_func_opt ::= */ - -1, /* (401) agg_func_opt ::= AGGREGATE */ - 0, /* (402) bufsize_opt ::= */ - -2, /* (403) bufsize_opt ::= BUFSIZE NK_INTEGER */ - 0, /* (404) language_opt ::= */ - -2, /* (405) language_opt ::= LANGUAGE NK_STRING */ - 0, /* (406) or_replace_opt ::= */ - -2, /* (407) or_replace_opt ::= OR REPLACE */ - -6, /* (408) cmd ::= CREATE or_replace_opt VIEW full_view_name AS query_or_subquery */ - -4, /* (409) cmd ::= DROP VIEW exists_opt full_view_name */ - -1, /* (410) full_view_name ::= view_name */ - -3, /* (411) full_view_name ::= db_name NK_DOT view_name */ - -12, /* (412) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name col_list_opt tag_def_or_ref_opt subtable_opt AS query_or_subquery */ - -4, /* (413) cmd ::= DROP STREAM exists_opt stream_name */ - -4, /* (414) cmd ::= PAUSE STREAM exists_opt stream_name */ - -5, /* (415) cmd ::= RESUME STREAM exists_opt ignore_opt stream_name */ - 0, /* (416) col_list_opt ::= */ - -3, /* (417) col_list_opt ::= NK_LP column_stream_def_list NK_RP */ - -1, /* (418) column_stream_def_list ::= column_stream_def */ - -3, /* (419) column_stream_def_list ::= column_stream_def_list NK_COMMA column_stream_def */ - -2, /* (420) column_stream_def ::= column_name stream_col_options */ - 0, /* (421) stream_col_options ::= */ - -3, /* (422) stream_col_options ::= stream_col_options PRIMARY KEY */ - 0, /* (423) tag_def_or_ref_opt ::= */ - -1, /* (424) tag_def_or_ref_opt ::= tags_def */ - -4, /* (425) tag_def_or_ref_opt ::= TAGS NK_LP column_stream_def_list NK_RP */ - 0, /* (426) stream_options ::= */ - -3, /* (427) stream_options ::= stream_options TRIGGER AT_ONCE */ - -3, /* (428) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ - -4, /* (429) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */ - -3, /* (430) stream_options ::= stream_options WATERMARK duration_literal */ - -4, /* (431) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */ - -3, /* (432) stream_options ::= stream_options FILL_HISTORY NK_INTEGER */ - -3, /* (433) stream_options ::= stream_options DELETE_MARK duration_literal */ - -4, /* (434) stream_options ::= stream_options IGNORE UPDATE NK_INTEGER */ - 0, /* (435) subtable_opt ::= */ - -4, /* (436) subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ - 0, /* (437) ignore_opt ::= */ - -2, /* (438) ignore_opt ::= IGNORE UNTREATED */ - -3, /* (439) cmd ::= KILL CONNECTION NK_INTEGER */ - -3, /* (440) cmd ::= KILL QUERY NK_STRING */ - -3, /* (441) cmd ::= KILL TRANSACTION NK_INTEGER */ - -3, /* (442) cmd ::= KILL COMPACT NK_INTEGER */ - -2, /* (443) cmd ::= BALANCE VGROUP */ - -4, /* (444) cmd ::= BALANCE VGROUP LEADER on_vgroup_id */ - -5, /* (445) cmd ::= BALANCE VGROUP LEADER DATABASE db_name */ - -4, /* (446) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ - -4, /* (447) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ - -3, /* (448) cmd ::= SPLIT VGROUP NK_INTEGER */ - 0, /* (449) on_vgroup_id ::= */ - -2, /* (450) on_vgroup_id ::= ON NK_INTEGER */ - -2, /* (451) dnode_list ::= DNODE NK_INTEGER */ - -3, /* (452) dnode_list ::= dnode_list DNODE NK_INTEGER */ - -4, /* (453) cmd ::= DELETE FROM full_table_name where_clause_opt */ - -1, /* (454) cmd ::= query_or_subquery */ - -1, /* (455) cmd ::= insert_query */ - -7, /* (456) insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ - -4, /* (457) insert_query ::= INSERT INTO full_table_name query_or_subquery */ - -1, /* (458) tags_literal ::= NK_INTEGER */ - -3, /* (459) tags_literal ::= NK_INTEGER NK_PLUS duration_literal */ - -3, /* (460) tags_literal ::= NK_INTEGER NK_MINUS duration_literal */ - -2, /* (461) tags_literal ::= NK_PLUS NK_INTEGER */ - -4, /* (462) tags_literal ::= NK_PLUS NK_INTEGER NK_PLUS duration_literal */ - -4, /* (463) tags_literal ::= NK_PLUS NK_INTEGER NK_MINUS duration_literal */ - -2, /* (464) tags_literal ::= NK_MINUS NK_INTEGER */ - -4, /* (465) tags_literal ::= NK_MINUS NK_INTEGER NK_PLUS duration_literal */ - -4, /* (466) tags_literal ::= NK_MINUS NK_INTEGER NK_MINUS duration_literal */ - -1, /* (467) tags_literal ::= NK_FLOAT */ - -2, /* (468) tags_literal ::= NK_PLUS NK_FLOAT */ - -2, /* (469) tags_literal ::= NK_MINUS NK_FLOAT */ - -1, /* (470) tags_literal ::= NK_BIN */ - -3, /* (471) tags_literal ::= NK_BIN NK_PLUS duration_literal */ - -3, /* (472) tags_literal ::= NK_BIN NK_MINUS duration_literal */ - -2, /* (473) tags_literal ::= NK_PLUS NK_BIN */ - -4, /* (474) tags_literal ::= NK_PLUS NK_BIN NK_PLUS duration_literal */ - -4, /* (475) tags_literal ::= NK_PLUS NK_BIN NK_MINUS duration_literal */ - -2, /* (476) tags_literal ::= NK_MINUS NK_BIN */ - -4, /* (477) tags_literal ::= NK_MINUS NK_BIN NK_PLUS duration_literal */ - -4, /* (478) tags_literal ::= NK_MINUS NK_BIN NK_MINUS duration_literal */ - -1, /* (479) tags_literal ::= NK_HEX */ - -3, /* (480) tags_literal ::= NK_HEX NK_PLUS duration_literal */ - -3, /* (481) tags_literal ::= NK_HEX NK_MINUS duration_literal */ - -2, /* (482) tags_literal ::= NK_PLUS NK_HEX */ - -4, /* (483) tags_literal ::= NK_PLUS NK_HEX NK_PLUS duration_literal */ - -4, /* (484) tags_literal ::= NK_PLUS NK_HEX NK_MINUS duration_literal */ - -2, /* (485) tags_literal ::= NK_MINUS NK_HEX */ - -4, /* (486) tags_literal ::= NK_MINUS NK_HEX NK_PLUS duration_literal */ - -4, /* (487) tags_literal ::= NK_MINUS NK_HEX NK_MINUS duration_literal */ - -1, /* (488) tags_literal ::= NK_STRING */ - -3, /* (489) tags_literal ::= NK_STRING NK_PLUS duration_literal */ - -3, /* (490) tags_literal ::= NK_STRING NK_MINUS duration_literal */ - -1, /* (491) tags_literal ::= NK_BOOL */ - -1, /* (492) tags_literal ::= NULL */ - -1, /* (493) tags_literal ::= literal_func */ - -3, /* (494) tags_literal ::= literal_func NK_PLUS duration_literal */ - -3, /* (495) tags_literal ::= literal_func NK_MINUS duration_literal */ - -1, /* (496) tags_literal_list ::= tags_literal */ - -3, /* (497) tags_literal_list ::= tags_literal_list NK_COMMA tags_literal */ - -1, /* (498) literal ::= NK_INTEGER */ - -1, /* (499) literal ::= NK_FLOAT */ - -1, /* (500) literal ::= NK_STRING */ - -1, /* (501) literal ::= NK_BOOL */ - -2, /* (502) literal ::= TIMESTAMP NK_STRING */ - -1, /* (503) literal ::= duration_literal */ - -1, /* (504) literal ::= NULL */ - -1, /* (505) literal ::= NK_QUESTION */ - -1, /* (506) duration_literal ::= NK_VARIABLE */ - -1, /* (507) signed ::= NK_INTEGER */ - -2, /* (508) signed ::= NK_PLUS NK_INTEGER */ - -2, /* (509) signed ::= NK_MINUS NK_INTEGER */ - -1, /* (510) signed ::= NK_FLOAT */ - -2, /* (511) signed ::= NK_PLUS NK_FLOAT */ - -2, /* (512) signed ::= NK_MINUS NK_FLOAT */ - -1, /* (513) signed_literal ::= signed */ - -1, /* (514) signed_literal ::= NK_STRING */ - -1, /* (515) signed_literal ::= NK_BOOL */ - -2, /* (516) signed_literal ::= TIMESTAMP NK_STRING */ - -1, /* (517) signed_literal ::= duration_literal */ - -1, /* (518) signed_literal ::= NULL */ - -1, /* (519) signed_literal ::= literal_func */ - -1, /* (520) signed_literal ::= NK_QUESTION */ - -1, /* (521) literal_list ::= signed_literal */ - -3, /* (522) literal_list ::= literal_list NK_COMMA signed_literal */ - -1, /* (523) db_name ::= NK_ID */ - -1, /* (524) table_name ::= NK_ID */ - -1, /* (525) column_name ::= NK_ID */ - -1, /* (526) function_name ::= NK_ID */ - -1, /* (527) view_name ::= NK_ID */ - -1, /* (528) table_alias ::= NK_ID */ - -1, /* (529) column_alias ::= NK_ID */ - -1, /* (530) column_alias ::= NK_ALIAS */ - -1, /* (531) user_name ::= NK_ID */ - -1, /* (532) topic_name ::= NK_ID */ - -1, /* (533) stream_name ::= NK_ID */ - -1, /* (534) cgroup_name ::= NK_ID */ - -1, /* (535) index_name ::= NK_ID */ - -1, /* (536) tsma_name ::= NK_ID */ - -1, /* (537) expr_or_subquery ::= expression */ - -1, /* (538) expression ::= literal */ - -1, /* (539) expression ::= pseudo_column */ - -1, /* (540) expression ::= column_reference */ - -1, /* (541) expression ::= function_expression */ - -1, /* (542) expression ::= case_when_expression */ - -3, /* (543) expression ::= NK_LP expression NK_RP */ - -2, /* (544) expression ::= NK_PLUS expr_or_subquery */ - -2, /* (545) expression ::= NK_MINUS expr_or_subquery */ - -3, /* (546) expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ - -3, /* (547) expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ - -3, /* (548) expression ::= expr_or_subquery NK_STAR expr_or_subquery */ - -3, /* (549) expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ - -3, /* (550) expression ::= expr_or_subquery NK_REM expr_or_subquery */ - -3, /* (551) expression ::= column_reference NK_ARROW NK_STRING */ - -3, /* (552) expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ - -3, /* (553) expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ - -1, /* (554) expression_list ::= expr_or_subquery */ - -3, /* (555) expression_list ::= expression_list NK_COMMA expr_or_subquery */ - -1, /* (556) column_reference ::= column_name */ - -3, /* (557) column_reference ::= table_name NK_DOT column_name */ - -1, /* (558) column_reference ::= NK_ALIAS */ - -3, /* (559) column_reference ::= table_name NK_DOT NK_ALIAS */ - -1, /* (560) pseudo_column ::= ROWTS */ - -1, /* (561) pseudo_column ::= TBNAME */ - -3, /* (562) pseudo_column ::= table_name NK_DOT TBNAME */ - -1, /* (563) pseudo_column ::= QSTART */ - -1, /* (564) pseudo_column ::= QEND */ - -1, /* (565) pseudo_column ::= QDURATION */ - -1, /* (566) pseudo_column ::= WSTART */ - -1, /* (567) pseudo_column ::= WEND */ - -1, /* (568) pseudo_column ::= WDURATION */ - -1, /* (569) pseudo_column ::= IROWTS */ - -1, /* (570) pseudo_column ::= ISFILLED */ - -1, /* (571) pseudo_column ::= QTAGS */ - -1, /* (572) pseudo_column ::= FLOW */ - -1, /* (573) pseudo_column ::= FHIGH */ - -1, /* (574) pseudo_column ::= FROWTS */ - -4, /* (575) function_expression ::= function_name NK_LP expression_list NK_RP */ - -4, /* (576) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ - -6, /* (577) function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ - -6, /* (578) function_expression ::= CAST NK_LP expr_or_subquery AS type_name_default_len NK_RP */ - -6, /* (579) function_expression ::= POSITION NK_LP expr_or_subquery IN expr_or_subquery NK_RP */ - -4, /* (580) function_expression ::= TRIM NK_LP expr_or_subquery NK_RP */ - -6, /* (581) function_expression ::= TRIM NK_LP trim_specification_type FROM expr_or_subquery NK_RP */ - -6, /* (582) function_expression ::= TRIM NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ - -7, /* (583) function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP */ - -4, /* (584) function_expression ::= substr_func NK_LP expression_list NK_RP */ - -6, /* (585) function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ - -8, /* (586) function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP */ - -4, /* (587) function_expression ::= REPLACE NK_LP expression_list NK_RP */ - -1, /* (588) function_expression ::= literal_func */ - -1, /* (589) function_expression ::= rand_func */ - -3, /* (590) literal_func ::= noarg_func NK_LP NK_RP */ - -1, /* (591) literal_func ::= NOW */ - -1, /* (592) literal_func ::= TODAY */ - -3, /* (593) rand_func ::= RAND NK_LP NK_RP */ - -4, /* (594) rand_func ::= RAND NK_LP expression_list NK_RP */ - -1, /* (595) substr_func ::= SUBSTR */ - -1, /* (596) substr_func ::= SUBSTRING */ - -1, /* (597) trim_specification_type ::= BOTH */ - -1, /* (598) trim_specification_type ::= TRAILING */ - -1, /* (599) trim_specification_type ::= LEADING */ - -1, /* (600) noarg_func ::= NOW */ - -1, /* (601) noarg_func ::= TODAY */ - -1, /* (602) noarg_func ::= TIMEZONE */ - -1, /* (603) noarg_func ::= DATABASE */ - -1, /* (604) noarg_func ::= CLIENT_VERSION */ - -1, /* (605) noarg_func ::= SERVER_VERSION */ - -1, /* (606) noarg_func ::= SERVER_STATUS */ - -1, /* (607) noarg_func ::= CURRENT_USER */ - -1, /* (608) noarg_func ::= USER */ - -1, /* (609) noarg_func ::= PI */ - -1, /* (610) star_func ::= COUNT */ - -1, /* (611) star_func ::= FIRST */ - -1, /* (612) star_func ::= LAST */ - -1, /* (613) star_func ::= LAST_ROW */ - -1, /* (614) star_func_para_list ::= NK_STAR */ - -1, /* (615) star_func_para_list ::= other_para_list */ - -1, /* (616) other_para_list ::= star_func_para */ - -3, /* (617) other_para_list ::= other_para_list NK_COMMA star_func_para */ - -1, /* (618) star_func_para ::= expr_or_subquery */ - -3, /* (619) star_func_para ::= table_name NK_DOT NK_STAR */ - -4, /* (620) case_when_expression ::= CASE when_then_list case_when_else_opt END */ - -5, /* (621) case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ - -1, /* (622) when_then_list ::= when_then_expr */ - -2, /* (623) when_then_list ::= when_then_list when_then_expr */ - -4, /* (624) when_then_expr ::= WHEN common_expression THEN common_expression */ - 0, /* (625) case_when_else_opt ::= */ - -2, /* (626) case_when_else_opt ::= ELSE common_expression */ - -3, /* (627) predicate ::= expr_or_subquery compare_op expr_or_subquery */ - -5, /* (628) predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ - -6, /* (629) predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ - -3, /* (630) predicate ::= expr_or_subquery IS NULL */ - -4, /* (631) predicate ::= expr_or_subquery IS NOT NULL */ - -3, /* (632) predicate ::= expr_or_subquery in_op in_predicate_value */ - -1, /* (633) compare_op ::= NK_LT */ - -1, /* (634) compare_op ::= NK_GT */ - -1, /* (635) compare_op ::= NK_LE */ - -1, /* (636) compare_op ::= NK_GE */ - -1, /* (637) compare_op ::= NK_NE */ - -1, /* (638) compare_op ::= NK_EQ */ - -1, /* (639) compare_op ::= LIKE */ - -2, /* (640) compare_op ::= NOT LIKE */ - -1, /* (641) compare_op ::= MATCH */ - -1, /* (642) compare_op ::= NMATCH */ - -1, /* (643) compare_op ::= CONTAINS */ - -1, /* (644) in_op ::= IN */ - -2, /* (645) in_op ::= NOT IN */ - -3, /* (646) in_predicate_value ::= NK_LP literal_list NK_RP */ - -1, /* (647) boolean_value_expression ::= boolean_primary */ - -2, /* (648) boolean_value_expression ::= NOT boolean_primary */ - -3, /* (649) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ - -3, /* (650) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ - -1, /* (651) boolean_primary ::= predicate */ - -3, /* (652) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ - -1, /* (653) common_expression ::= expr_or_subquery */ - -1, /* (654) common_expression ::= boolean_value_expression */ - 0, /* (655) from_clause_opt ::= */ - -2, /* (656) from_clause_opt ::= FROM table_reference_list */ - -1, /* (657) table_reference_list ::= table_reference */ - -3, /* (658) table_reference_list ::= table_reference_list NK_COMMA table_reference */ - -1, /* (659) table_reference ::= table_primary */ - -1, /* (660) table_reference ::= joined_table */ - -2, /* (661) table_primary ::= table_name alias_opt */ - -4, /* (662) table_primary ::= db_name NK_DOT table_name alias_opt */ - -2, /* (663) table_primary ::= subquery alias_opt */ - -1, /* (664) table_primary ::= parenthesized_joined_table */ - 0, /* (665) alias_opt ::= */ - -1, /* (666) alias_opt ::= table_alias */ - -2, /* (667) alias_opt ::= AS table_alias */ - -3, /* (668) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - -3, /* (669) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ - -8, /* (670) joined_table ::= table_reference join_type join_subtype JOIN table_reference join_on_clause_opt window_offset_clause_opt jlimit_clause_opt */ - 0, /* (671) join_type ::= */ - -1, /* (672) join_type ::= INNER */ - -1, /* (673) join_type ::= LEFT */ - -1, /* (674) join_type ::= RIGHT */ - -1, /* (675) join_type ::= FULL */ - 0, /* (676) join_subtype ::= */ - -1, /* (677) join_subtype ::= OUTER */ - -1, /* (678) join_subtype ::= SEMI */ - -1, /* (679) join_subtype ::= ANTI */ - -1, /* (680) join_subtype ::= ASOF */ - -1, /* (681) join_subtype ::= WINDOW */ - 0, /* (682) join_on_clause_opt ::= */ - -2, /* (683) join_on_clause_opt ::= ON search_condition */ - 0, /* (684) window_offset_clause_opt ::= */ - -6, /* (685) window_offset_clause_opt ::= WINDOW_OFFSET NK_LP window_offset_literal NK_COMMA window_offset_literal NK_RP */ - -1, /* (686) window_offset_literal ::= NK_VARIABLE */ - -2, /* (687) window_offset_literal ::= NK_MINUS NK_VARIABLE */ - 0, /* (688) jlimit_clause_opt ::= */ - -2, /* (689) jlimit_clause_opt ::= JLIMIT NK_INTEGER */ - -14, /* (690) query_specification ::= SELECT hint_list set_quantifier_opt tag_mode_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ - 0, /* (691) hint_list ::= */ - -1, /* (692) hint_list ::= NK_HINT */ - 0, /* (693) tag_mode_opt ::= */ - -1, /* (694) tag_mode_opt ::= TAGS */ - 0, /* (695) set_quantifier_opt ::= */ - -1, /* (696) set_quantifier_opt ::= DISTINCT */ - -1, /* (697) set_quantifier_opt ::= ALL */ - -1, /* (698) select_list ::= select_item */ - -3, /* (699) select_list ::= select_list NK_COMMA select_item */ - -1, /* (700) select_item ::= NK_STAR */ - -1, /* (701) select_item ::= common_expression */ - -2, /* (702) select_item ::= common_expression column_alias */ - -3, /* (703) select_item ::= common_expression AS column_alias */ - -3, /* (704) select_item ::= table_name NK_DOT NK_STAR */ - 0, /* (705) where_clause_opt ::= */ - -2, /* (706) where_clause_opt ::= WHERE search_condition */ - 0, /* (707) partition_by_clause_opt ::= */ - -3, /* (708) partition_by_clause_opt ::= PARTITION BY partition_list */ - -1, /* (709) partition_list ::= partition_item */ - -3, /* (710) partition_list ::= partition_list NK_COMMA partition_item */ - -1, /* (711) partition_item ::= expr_or_subquery */ - -2, /* (712) partition_item ::= expr_or_subquery column_alias */ - -3, /* (713) partition_item ::= expr_or_subquery AS column_alias */ - 0, /* (714) twindow_clause_opt ::= */ - -6, /* (715) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA interval_sliding_duration_literal NK_RP */ - -4, /* (716) twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ - -6, /* (717) twindow_clause_opt ::= INTERVAL NK_LP interval_sliding_duration_literal NK_RP sliding_opt fill_opt */ - -8, /* (718) twindow_clause_opt ::= INTERVAL NK_LP interval_sliding_duration_literal NK_COMMA interval_sliding_duration_literal NK_RP sliding_opt fill_opt */ - -7, /* (719) twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition */ - -4, /* (720) twindow_clause_opt ::= COUNT_WINDOW NK_LP NK_INTEGER NK_RP */ - -6, /* (721) twindow_clause_opt ::= COUNT_WINDOW NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ - -4, /* (722) twindow_clause_opt ::= ANOMALY_WINDOW NK_LP expr_or_subquery NK_RP */ - -6, /* (723) twindow_clause_opt ::= ANOMALY_WINDOW NK_LP expr_or_subquery NK_COMMA NK_STRING NK_RP */ - 0, /* (724) sliding_opt ::= */ - -4, /* (725) sliding_opt ::= SLIDING NK_LP interval_sliding_duration_literal NK_RP */ - -1, /* (726) interval_sliding_duration_literal ::= NK_VARIABLE */ - -1, /* (727) interval_sliding_duration_literal ::= NK_STRING */ - -1, /* (728) interval_sliding_duration_literal ::= NK_INTEGER */ - 0, /* (729) fill_opt ::= */ - -4, /* (730) fill_opt ::= FILL NK_LP fill_mode NK_RP */ - -6, /* (731) fill_opt ::= FILL NK_LP VALUE NK_COMMA expression_list NK_RP */ - -6, /* (732) fill_opt ::= FILL NK_LP VALUE_F NK_COMMA expression_list NK_RP */ - -1, /* (733) fill_mode ::= NONE */ - -1, /* (734) fill_mode ::= PREV */ - -1, /* (735) fill_mode ::= NULL */ - -1, /* (736) fill_mode ::= NULL_F */ - -1, /* (737) fill_mode ::= LINEAR */ - -1, /* (738) fill_mode ::= NEXT */ - 0, /* (739) group_by_clause_opt ::= */ - -3, /* (740) group_by_clause_opt ::= GROUP BY group_by_list */ - -1, /* (741) group_by_list ::= expr_or_subquery */ - -3, /* (742) group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ - 0, /* (743) having_clause_opt ::= */ - -2, /* (744) having_clause_opt ::= HAVING search_condition */ - 0, /* (745) range_opt ::= */ - -6, /* (746) range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ - -4, /* (747) range_opt ::= RANGE NK_LP expr_or_subquery NK_RP */ - 0, /* (748) every_opt ::= */ - -4, /* (749) every_opt ::= EVERY NK_LP duration_literal NK_RP */ - -4, /* (750) query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ - -1, /* (751) query_simple ::= query_specification */ - -1, /* (752) query_simple ::= union_query_expression */ - -4, /* (753) union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ - -3, /* (754) union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ - -1, /* (755) query_simple_or_subquery ::= query_simple */ - -1, /* (756) query_simple_or_subquery ::= subquery */ - -1, /* (757) query_or_subquery ::= query_expression */ - -1, /* (758) query_or_subquery ::= subquery */ - 0, /* (759) order_by_clause_opt ::= */ - -3, /* (760) order_by_clause_opt ::= ORDER BY sort_specification_list */ - 0, /* (761) slimit_clause_opt ::= */ - -2, /* (762) slimit_clause_opt ::= SLIMIT NK_INTEGER */ - -4, /* (763) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - -4, /* (764) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - 0, /* (765) limit_clause_opt ::= */ - -2, /* (766) limit_clause_opt ::= LIMIT NK_INTEGER */ - -4, /* (767) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ - -4, /* (768) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - -3, /* (769) subquery ::= NK_LP query_expression NK_RP */ - -3, /* (770) subquery ::= NK_LP subquery NK_RP */ - -1, /* (771) search_condition ::= common_expression */ - -1, /* (772) sort_specification_list ::= sort_specification */ - -3, /* (773) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ - -3, /* (774) sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ - 0, /* (775) ordering_specification_opt ::= */ - -1, /* (776) ordering_specification_opt ::= ASC */ - -1, /* (777) ordering_specification_opt ::= DESC */ - 0, /* (778) null_ordering_opt ::= */ - -2, /* (779) null_ordering_opt ::= NULLS FIRST */ - -2, /* (780) null_ordering_opt ::= NULLS LAST */ - 0, /* (781) column_options ::= */ - -3, /* (782) column_options ::= column_options PRIMARY KEY */ - -3, /* (783) column_options ::= column_options NK_ID NK_STRING */ -}; - -static void yy_accept(yyParser*); /* Forward Declaration */ - -/* -** Perform a reduce action and the shift that must immediately -** follow the reduce. -** -** The yyLookahead and yyLookaheadToken parameters provide reduce actions -** access to the lookahead token (if any). The yyLookahead will be YYNOCODE -** if the lookahead token has already been consumed. As this procedure is -** only called from one place, optimizing compilers will in-line it, which -** means that the extra parameters have no performance impact. -*/ -static YYACTIONTYPE yy_reduce( - yyParser *yypParser, /* The parser */ - unsigned int yyruleno, /* Number of the rule by which to reduce */ - int yyLookahead, /* Lookahead token, or YYNOCODE if none */ - ParseTOKENTYPE yyLookaheadToken /* Value of the lookahead token */ - ParseCTX_PDECL /* %extra_context */ -){ - int yygoto; /* The next state */ - YYACTIONTYPE yyact; /* The next action */ - yyStackEntry *yymsp; /* The top of the parser's stack */ - int yysize; /* Amount to pop the stack */ - ParseARG_FETCH - (void)yyLookahead; - (void)yyLookaheadToken; - yymsp = yypParser->yytos; -#ifndef NDEBUG - if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ - yysize = yyRuleInfoNRhs[yyruleno]; - if( yysize ){ - fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", - yyTracePrompt, - yyruleno, yyRuleName[yyruleno], - yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){ - yypParser->yyhwm++; - assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack)); - } -#endif -#if YYSTACKDEPTH>0 - if( yypParser->yytos>=yypParser->yystackEnd ){ - yyStackOverflow(yypParser); - /* The call to yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; - } -#else - if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ - if( yyGrowStack(yypParser) ){ - yyStackOverflow(yypParser); - /* The call to yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; - } - yymsp = yypParser->yytos; - } -#endif - } - - switch( yyruleno ){ - /* Beginning here are the reduction cases. A typical example - ** follows: - ** case 0: - ** #line - ** { ... } // User supplied code - ** #line - ** break; - */ -/********** Begin reduce actions **********************************************/ - YYMINORTYPE yylhsminor; - case 0: /* cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */ -{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } - yy_destructor(yypParser,390,&yymsp[0].minor); - break; - case 1: /* cmd ::= ALTER ACCOUNT NK_ID alter_account_options */ -{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } - yy_destructor(yypParser,391,&yymsp[0].minor); - break; - case 2: /* account_options ::= */ -{ } - break; - case 3: /* account_options ::= account_options PPS literal */ - case 4: /* account_options ::= account_options TSERIES literal */ yytestcase(yyruleno==4); - case 5: /* account_options ::= account_options STORAGE literal */ yytestcase(yyruleno==5); - case 6: /* account_options ::= account_options STREAMS literal */ yytestcase(yyruleno==6); - case 7: /* account_options ::= account_options QTIME literal */ yytestcase(yyruleno==7); - case 8: /* account_options ::= account_options DBS literal */ yytestcase(yyruleno==8); - case 9: /* account_options ::= account_options USERS literal */ yytestcase(yyruleno==9); - case 10: /* account_options ::= account_options CONNS literal */ yytestcase(yyruleno==10); - case 11: /* account_options ::= account_options STATE literal */ yytestcase(yyruleno==11); -{ yy_destructor(yypParser,390,&yymsp[-2].minor); -{ } - yy_destructor(yypParser,392,&yymsp[0].minor); -} - break; - case 12: /* alter_account_options ::= alter_account_option */ -{ yy_destructor(yypParser,393,&yymsp[0].minor); -{ } -} - break; - case 13: /* alter_account_options ::= alter_account_options alter_account_option */ -{ yy_destructor(yypParser,391,&yymsp[-1].minor); -{ } - yy_destructor(yypParser,393,&yymsp[0].minor); -} - break; - case 14: /* alter_account_option ::= PASS literal */ - case 15: /* alter_account_option ::= PPS literal */ yytestcase(yyruleno==15); - case 16: /* alter_account_option ::= TSERIES literal */ yytestcase(yyruleno==16); - case 17: /* alter_account_option ::= STORAGE literal */ yytestcase(yyruleno==17); - case 18: /* alter_account_option ::= STREAMS literal */ yytestcase(yyruleno==18); - case 19: /* alter_account_option ::= QTIME literal */ yytestcase(yyruleno==19); - case 20: /* alter_account_option ::= DBS literal */ yytestcase(yyruleno==20); - case 21: /* alter_account_option ::= USERS literal */ yytestcase(yyruleno==21); - case 22: /* alter_account_option ::= CONNS literal */ yytestcase(yyruleno==22); - case 23: /* alter_account_option ::= STATE literal */ yytestcase(yyruleno==23); -{ } - yy_destructor(yypParser,392,&yymsp[0].minor); - break; - case 24: /* ip_range_list ::= NK_STRING */ -{ yylhsminor.yy946 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy946 = yylhsminor.yy946; - break; - case 25: /* ip_range_list ::= ip_range_list NK_COMMA NK_STRING */ -{ yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-2].minor.yy946, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy946 = yylhsminor.yy946; - break; - case 26: /* white_list ::= HOST ip_range_list */ -{ yymsp[-1].minor.yy946 = yymsp[0].minor.yy946; } - break; - case 27: /* white_list_opt ::= */ - case 212: /* specific_cols_opt ::= */ yytestcase(yyruleno==212); - case 250: /* tags_def_opt ::= */ yytestcase(yyruleno==250); - case 343: /* tag_list_opt ::= */ yytestcase(yyruleno==343); - case 416: /* col_list_opt ::= */ yytestcase(yyruleno==416); - case 423: /* tag_def_or_ref_opt ::= */ yytestcase(yyruleno==423); - case 707: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==707); - case 739: /* group_by_clause_opt ::= */ yytestcase(yyruleno==739); - case 759: /* order_by_clause_opt ::= */ yytestcase(yyruleno==759); -{ yymsp[1].minor.yy946 = NULL; } - break; - case 28: /* white_list_opt ::= white_list */ - case 251: /* tags_def_opt ::= tags_def */ yytestcase(yyruleno==251); - case 424: /* tag_def_or_ref_opt ::= tags_def */ yytestcase(yyruleno==424); - case 615: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==615); -{ yylhsminor.yy946 = yymsp[0].minor.yy946; } - yymsp[0].minor.yy946 = yylhsminor.yy946; - break; - case 29: /* is_import_opt ::= */ - case 31: /* is_createdb_opt ::= */ yytestcase(yyruleno==31); -{ yymsp[1].minor.yy815 = 0; } - break; - case 30: /* is_import_opt ::= IS_IMPORT NK_INTEGER */ - case 32: /* is_createdb_opt ::= CREATEDB NK_INTEGER */ yytestcase(yyruleno==32); - case 42: /* sysinfo_opt ::= SYSINFO NK_INTEGER */ yytestcase(yyruleno==42); -{ yymsp[-1].minor.yy815 = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); } - break; - case 33: /* cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt is_createdb_opt is_import_opt white_list_opt */ -{ - pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-6].minor.yy557, &yymsp[-4].minor.yy0, yymsp[-3].minor.yy815, yymsp[-1].minor.yy815, yymsp[-2].minor.yy815); - pCxt->pRootNode = addCreateUserStmtWhiteList(pCxt, pCxt->pRootNode, yymsp[0].minor.yy946); - } - break; - case 34: /* cmd ::= ALTER USER user_name PASS NK_STRING */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy557, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); } - break; - case 35: /* cmd ::= ALTER USER user_name ENABLE NK_INTEGER */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy557, TSDB_ALTER_USER_ENABLE, &yymsp[0].minor.yy0); } - break; - case 36: /* cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy557, TSDB_ALTER_USER_SYSINFO, &yymsp[0].minor.yy0); } - break; - case 37: /* cmd ::= ALTER USER user_name CREATEDB NK_INTEGER */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy557, TSDB_ALTER_USER_CREATEDB, &yymsp[0].minor.yy0); } - break; - case 38: /* cmd ::= ALTER USER user_name ADD white_list */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy557, TSDB_ALTER_USER_ADD_WHITE_LIST, yymsp[0].minor.yy946); } - break; - case 39: /* cmd ::= ALTER USER user_name DROP white_list */ -{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy557, TSDB_ALTER_USER_DROP_WHITE_LIST, yymsp[0].minor.yy946); } - break; - case 40: /* cmd ::= DROP USER user_name */ -{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy557); } - break; - case 41: /* sysinfo_opt ::= */ -{ yymsp[1].minor.yy815 = 1; } - break; - case 43: /* cmd ::= GRANT privileges ON priv_level with_clause_opt TO user_name */ -{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-5].minor.yy483, &yymsp[-3].minor.yy723, &yymsp[0].minor.yy557, yymsp[-2].minor.yy974); } - break; - case 44: /* cmd ::= REVOKE privileges ON priv_level with_clause_opt FROM user_name */ -{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-5].minor.yy483, &yymsp[-3].minor.yy723, &yymsp[0].minor.yy557, yymsp[-2].minor.yy974); } - break; - case 45: /* privileges ::= ALL */ -{ yymsp[0].minor.yy483 = PRIVILEGE_TYPE_ALL; } - break; - case 46: /* privileges ::= priv_type_list */ - case 48: /* priv_type_list ::= priv_type */ yytestcase(yyruleno==48); -{ yylhsminor.yy483 = yymsp[0].minor.yy483; } - yymsp[0].minor.yy483 = yylhsminor.yy483; - break; - case 47: /* privileges ::= SUBSCRIBE */ -{ yymsp[0].minor.yy483 = PRIVILEGE_TYPE_SUBSCRIBE; } - break; - case 49: /* priv_type_list ::= priv_type_list NK_COMMA priv_type */ -{ yylhsminor.yy483 = yymsp[-2].minor.yy483 | yymsp[0].minor.yy483; } - yymsp[-2].minor.yy483 = yylhsminor.yy483; - break; - case 50: /* priv_type ::= READ */ -{ yymsp[0].minor.yy483 = PRIVILEGE_TYPE_READ; } - break; - case 51: /* priv_type ::= WRITE */ -{ yymsp[0].minor.yy483 = PRIVILEGE_TYPE_WRITE; } - break; - case 52: /* priv_type ::= ALTER */ -{ yymsp[0].minor.yy483 = PRIVILEGE_TYPE_ALTER; } - break; - case 53: /* priv_level ::= NK_STAR NK_DOT NK_STAR */ -{ yylhsminor.yy723.first = yymsp[-2].minor.yy0; yylhsminor.yy723.second = yymsp[0].minor.yy0; } - yymsp[-2].minor.yy723 = yylhsminor.yy723; - break; - case 54: /* priv_level ::= db_name NK_DOT NK_STAR */ -{ yylhsminor.yy723.first = yymsp[-2].minor.yy557; yylhsminor.yy723.second = yymsp[0].minor.yy0; } - yymsp[-2].minor.yy723 = yylhsminor.yy723; - break; - case 55: /* priv_level ::= db_name NK_DOT table_name */ -{ yylhsminor.yy723.first = yymsp[-2].minor.yy557; yylhsminor.yy723.second = yymsp[0].minor.yy557; } - yymsp[-2].minor.yy723 = yylhsminor.yy723; - break; - case 56: /* priv_level ::= topic_name */ -{ yylhsminor.yy723.first = yymsp[0].minor.yy557; yylhsminor.yy723.second = nil_token; } - yymsp[0].minor.yy723 = yylhsminor.yy723; - break; - case 57: /* with_clause_opt ::= */ - case 177: /* start_opt ::= */ yytestcase(yyruleno==177); - case 181: /* end_opt ::= */ yytestcase(yyruleno==181); - case 338: /* like_pattern_opt ::= */ yytestcase(yyruleno==338); - case 435: /* subtable_opt ::= */ yytestcase(yyruleno==435); - case 625: /* case_when_else_opt ::= */ yytestcase(yyruleno==625); - case 655: /* from_clause_opt ::= */ yytestcase(yyruleno==655); - case 682: /* join_on_clause_opt ::= */ yytestcase(yyruleno==682); - case 684: /* window_offset_clause_opt ::= */ yytestcase(yyruleno==684); - case 688: /* jlimit_clause_opt ::= */ yytestcase(yyruleno==688); - case 705: /* where_clause_opt ::= */ yytestcase(yyruleno==705); - case 714: /* twindow_clause_opt ::= */ yytestcase(yyruleno==714); - case 724: /* sliding_opt ::= */ yytestcase(yyruleno==724); - case 729: /* fill_opt ::= */ yytestcase(yyruleno==729); - case 743: /* having_clause_opt ::= */ yytestcase(yyruleno==743); - case 745: /* range_opt ::= */ yytestcase(yyruleno==745); - case 748: /* every_opt ::= */ yytestcase(yyruleno==748); - case 761: /* slimit_clause_opt ::= */ yytestcase(yyruleno==761); - case 765: /* limit_clause_opt ::= */ yytestcase(yyruleno==765); -{ yymsp[1].minor.yy974 = NULL; } - break; - case 58: /* with_clause_opt ::= WITH search_condition */ - case 656: /* from_clause_opt ::= FROM table_reference_list */ yytestcase(yyruleno==656); - case 683: /* join_on_clause_opt ::= ON search_condition */ yytestcase(yyruleno==683); - case 706: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==706); - case 744: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==744); -{ yymsp[-1].minor.yy974 = yymsp[0].minor.yy974; } - break; - case 59: /* cmd ::= CREATE ENCRYPT_KEY NK_STRING */ -{ pCxt->pRootNode = createEncryptKeyStmt(pCxt, &yymsp[0].minor.yy0); } - break; - case 60: /* cmd ::= CREATE ANODE NK_STRING */ -{ pCxt->pRootNode = createCreateAnodeStmt(pCxt, &yymsp[0].minor.yy0); } - break; - case 61: /* cmd ::= UPDATE ANODE NK_INTEGER */ -{ pCxt->pRootNode = createUpdateAnodeStmt(pCxt, &yymsp[0].minor.yy0, false); } - break; - case 62: /* cmd ::= UPDATE ALL ANODES */ -{ pCxt->pRootNode = createUpdateAnodeStmt(pCxt, NULL, true); } - break; - case 63: /* cmd ::= DROP ANODE NK_INTEGER */ -{ pCxt->pRootNode = createDropAnodeStmt(pCxt, &yymsp[0].minor.yy0); } - break; - case 64: /* cmd ::= CREATE DNODE dnode_endpoint */ -{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy557, NULL); } - break; - case 65: /* cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */ -{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy0); } - break; - case 66: /* cmd ::= DROP DNODE NK_INTEGER force_opt */ -{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy569, false); } - break; - case 67: /* cmd ::= DROP DNODE dnode_endpoint force_opt */ -{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[-1].minor.yy557, yymsp[0].minor.yy569, false); } - break; - case 68: /* cmd ::= DROP DNODE NK_INTEGER unsafe_opt */ -{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[-1].minor.yy0, false, yymsp[0].minor.yy569); } - break; - case 69: /* cmd ::= DROP DNODE dnode_endpoint unsafe_opt */ -{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[-1].minor.yy557, false, yymsp[0].minor.yy569); } - break; - case 70: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ -{ pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, NULL); } - break; - case 71: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING */ -{ pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } - break; - case 72: /* cmd ::= ALTER ALL DNODES NK_STRING */ -{ pCxt->pRootNode = createAlterDnodeStmt(pCxt, NULL, &yymsp[0].minor.yy0, NULL); } - break; - case 73: /* cmd ::= ALTER ALL DNODES NK_STRING NK_STRING */ -{ pCxt->pRootNode = createAlterDnodeStmt(pCxt, NULL, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } - break; - case 74: /* cmd ::= RESTORE DNODE NK_INTEGER */ -{ pCxt->pRootNode = createRestoreComponentNodeStmt(pCxt, QUERY_NODE_RESTORE_DNODE_STMT, &yymsp[0].minor.yy0); } - break; - case 75: /* dnode_endpoint ::= NK_STRING */ - case 76: /* dnode_endpoint ::= NK_ID */ yytestcase(yyruleno==76); - case 77: /* dnode_endpoint ::= NK_IPTOKEN */ yytestcase(yyruleno==77); - case 372: /* sma_func_name ::= COUNT */ yytestcase(yyruleno==372); - case 373: /* sma_func_name ::= FIRST */ yytestcase(yyruleno==373); - case 374: /* sma_func_name ::= LAST */ yytestcase(yyruleno==374); - case 375: /* sma_func_name ::= LAST_ROW */ yytestcase(yyruleno==375); - case 523: /* db_name ::= NK_ID */ yytestcase(yyruleno==523); - case 524: /* table_name ::= NK_ID */ yytestcase(yyruleno==524); - case 525: /* column_name ::= NK_ID */ yytestcase(yyruleno==525); - case 526: /* function_name ::= NK_ID */ yytestcase(yyruleno==526); - case 527: /* view_name ::= NK_ID */ yytestcase(yyruleno==527); - case 528: /* table_alias ::= NK_ID */ yytestcase(yyruleno==528); - case 529: /* column_alias ::= NK_ID */ yytestcase(yyruleno==529); - case 530: /* column_alias ::= NK_ALIAS */ yytestcase(yyruleno==530); - case 531: /* user_name ::= NK_ID */ yytestcase(yyruleno==531); - case 532: /* topic_name ::= NK_ID */ yytestcase(yyruleno==532); - case 533: /* stream_name ::= NK_ID */ yytestcase(yyruleno==533); - case 534: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==534); - case 535: /* index_name ::= NK_ID */ yytestcase(yyruleno==535); - case 536: /* tsma_name ::= NK_ID */ yytestcase(yyruleno==536); - case 595: /* substr_func ::= SUBSTR */ yytestcase(yyruleno==595); - case 596: /* substr_func ::= SUBSTRING */ yytestcase(yyruleno==596); - case 600: /* noarg_func ::= NOW */ yytestcase(yyruleno==600); - case 601: /* noarg_func ::= TODAY */ yytestcase(yyruleno==601); - case 602: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==602); - case 603: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==603); - case 604: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==604); - case 605: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==605); - case 606: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==606); - case 607: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==607); - case 608: /* noarg_func ::= USER */ yytestcase(yyruleno==608); - case 609: /* noarg_func ::= PI */ yytestcase(yyruleno==609); - case 610: /* star_func ::= COUNT */ yytestcase(yyruleno==610); - case 611: /* star_func ::= FIRST */ yytestcase(yyruleno==611); - case 612: /* star_func ::= LAST */ yytestcase(yyruleno==612); - case 613: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==613); -{ yylhsminor.yy557 = yymsp[0].minor.yy0; } - yymsp[0].minor.yy557 = yylhsminor.yy557; - break; - case 78: /* force_opt ::= */ - case 105: /* not_exists_opt ::= */ yytestcase(yyruleno==105); - case 107: /* exists_opt ::= */ yytestcase(yyruleno==107); - case 210: /* with_opt ::= */ yytestcase(yyruleno==210); - case 393: /* analyze_opt ::= */ yytestcase(yyruleno==393); - case 400: /* agg_func_opt ::= */ yytestcase(yyruleno==400); - case 406: /* or_replace_opt ::= */ yytestcase(yyruleno==406); - case 437: /* ignore_opt ::= */ yytestcase(yyruleno==437); - case 693: /* tag_mode_opt ::= */ yytestcase(yyruleno==693); - case 695: /* set_quantifier_opt ::= */ yytestcase(yyruleno==695); -{ yymsp[1].minor.yy569 = false; } - break; - case 79: /* force_opt ::= FORCE */ - case 80: /* unsafe_opt ::= UNSAFE */ yytestcase(yyruleno==80); - case 211: /* with_opt ::= WITH */ yytestcase(yyruleno==211); - case 394: /* analyze_opt ::= ANALYZE */ yytestcase(yyruleno==394); - case 401: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==401); - case 694: /* tag_mode_opt ::= TAGS */ yytestcase(yyruleno==694); - case 696: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==696); -{ yymsp[0].minor.yy569 = true; } - break; - case 81: /* cmd ::= ALTER CLUSTER NK_STRING */ -{ pCxt->pRootNode = createAlterClusterStmt(pCxt, &yymsp[0].minor.yy0, NULL); } - break; - case 82: /* cmd ::= ALTER CLUSTER NK_STRING NK_STRING */ -{ pCxt->pRootNode = createAlterClusterStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } - break; - case 83: /* cmd ::= ALTER LOCAL NK_STRING */ -{ pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[0].minor.yy0, NULL); } - break; - case 84: /* cmd ::= ALTER LOCAL NK_STRING NK_STRING */ -{ pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } - break; - case 85: /* cmd ::= CREATE QNODE ON DNODE NK_INTEGER */ -{ pCxt->pRootNode = createCreateComponentNodeStmt(pCxt, QUERY_NODE_CREATE_QNODE_STMT, &yymsp[0].minor.yy0); } - break; - case 86: /* cmd ::= DROP QNODE ON DNODE NK_INTEGER */ -{ pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_QNODE_STMT, &yymsp[0].minor.yy0); } - break; - case 87: /* cmd ::= RESTORE QNODE ON DNODE NK_INTEGER */ -{ pCxt->pRootNode = createRestoreComponentNodeStmt(pCxt, QUERY_NODE_RESTORE_QNODE_STMT, &yymsp[0].minor.yy0); } - break; - case 88: /* cmd ::= CREATE BNODE ON DNODE NK_INTEGER */ -{ pCxt->pRootNode = createCreateComponentNodeStmt(pCxt, QUERY_NODE_CREATE_BNODE_STMT, &yymsp[0].minor.yy0); } - break; - case 89: /* cmd ::= DROP BNODE ON DNODE NK_INTEGER */ -{ pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_BNODE_STMT, &yymsp[0].minor.yy0); } - break; - case 90: /* cmd ::= CREATE SNODE ON DNODE NK_INTEGER */ -{ pCxt->pRootNode = createCreateComponentNodeStmt(pCxt, QUERY_NODE_CREATE_SNODE_STMT, &yymsp[0].minor.yy0); } - break; - case 91: /* cmd ::= DROP SNODE ON DNODE NK_INTEGER */ -{ pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_SNODE_STMT, &yymsp[0].minor.yy0); } - break; - case 92: /* cmd ::= CREATE MNODE ON DNODE NK_INTEGER */ -{ pCxt->pRootNode = createCreateComponentNodeStmt(pCxt, QUERY_NODE_CREATE_MNODE_STMT, &yymsp[0].minor.yy0); } - break; - case 93: /* cmd ::= DROP MNODE ON DNODE NK_INTEGER */ -{ pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_MNODE_STMT, &yymsp[0].minor.yy0); } - break; - case 94: /* cmd ::= RESTORE MNODE ON DNODE NK_INTEGER */ -{ pCxt->pRootNode = createRestoreComponentNodeStmt(pCxt, QUERY_NODE_RESTORE_MNODE_STMT, &yymsp[0].minor.yy0); } - break; - case 95: /* cmd ::= RESTORE VNODE ON DNODE NK_INTEGER */ -{ pCxt->pRootNode = createRestoreComponentNodeStmt(pCxt, QUERY_NODE_RESTORE_VNODE_STMT, &yymsp[0].minor.yy0); } - break; - case 96: /* cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ -{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy569, &yymsp[-1].minor.yy557, yymsp[0].minor.yy974); } - break; - case 97: /* cmd ::= DROP DATABASE exists_opt db_name */ -{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy569, &yymsp[0].minor.yy557); } - break; - case 98: /* cmd ::= USE db_name */ -{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy557); } - break; - case 99: /* cmd ::= ALTER DATABASE db_name alter_db_options */ -{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy557, yymsp[0].minor.yy974); } - break; - case 100: /* cmd ::= FLUSH DATABASE db_name */ -{ pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &yymsp[0].minor.yy557); } - break; - case 101: /* cmd ::= TRIM DATABASE db_name speed_opt */ -{ pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &yymsp[-1].minor.yy557, yymsp[0].minor.yy904); } - break; - case 102: /* cmd ::= S3MIGRATE DATABASE db_name */ -{ pCxt->pRootNode = createS3MigrateDatabaseStmt(pCxt, &yymsp[0].minor.yy557); } - break; - case 103: /* cmd ::= COMPACT DATABASE db_name start_opt end_opt */ -{ pCxt->pRootNode = createCompactStmt(pCxt, &yymsp[-2].minor.yy557, yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } - break; - case 104: /* not_exists_opt ::= IF NOT EXISTS */ -{ yymsp[-2].minor.yy569 = true; } - break; - case 106: /* exists_opt ::= IF EXISTS */ - case 407: /* or_replace_opt ::= OR REPLACE */ yytestcase(yyruleno==407); - case 438: /* ignore_opt ::= IGNORE UNTREATED */ yytestcase(yyruleno==438); -{ yymsp[-1].minor.yy569 = true; } - break; - case 108: /* db_options ::= */ -{ yymsp[1].minor.yy974 = createDefaultDatabaseOptions(pCxt); } - break; - case 109: /* db_options ::= db_options BUFFER NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 110: /* db_options ::= db_options CACHEMODEL NK_STRING */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_CACHEMODEL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 111: /* db_options ::= db_options CACHESIZE NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_CACHESIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 112: /* db_options ::= db_options COMP NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_COMP, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 113: /* db_options ::= db_options DURATION NK_INTEGER */ - case 114: /* db_options ::= db_options DURATION NK_VARIABLE */ yytestcase(yyruleno==114); -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_DAYS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 115: /* db_options ::= db_options MAXROWS NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 116: /* db_options ::= db_options MINROWS NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 117: /* db_options ::= db_options KEEP integer_list */ - case 118: /* db_options ::= db_options KEEP variable_list */ yytestcase(yyruleno==118); -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_KEEP, yymsp[0].minor.yy946); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 119: /* db_options ::= db_options PAGES NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_PAGES, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 120: /* db_options ::= db_options PAGESIZE NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 121: /* db_options ::= db_options TSDB_PAGESIZE NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_TSDB_PAGESIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 122: /* db_options ::= db_options PRECISION NK_STRING */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 123: /* db_options ::= db_options REPLICA NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 124: /* db_options ::= db_options VGROUPS NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 125: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 126: /* db_options ::= db_options RETENTIONS retention_list */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_RETENTIONS, yymsp[0].minor.yy946); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 127: /* db_options ::= db_options SCHEMALESS NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 128: /* db_options ::= db_options WAL_LEVEL NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_WAL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 129: /* db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 130: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_WAL_RETENTION_PERIOD, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 131: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */ -{ - SToken t = yymsp[-1].minor.yy0; - t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-3].minor.yy974, DB_OPTION_WAL_RETENTION_PERIOD, &t); - } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 132: /* db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_WAL_RETENTION_SIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 133: /* db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */ -{ - SToken t = yymsp[-1].minor.yy0; - t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-3].minor.yy974, DB_OPTION_WAL_RETENTION_SIZE, &t); - } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 134: /* db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_WAL_ROLL_PERIOD, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 135: /* db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_WAL_SEGMENT_SIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 136: /* db_options ::= db_options STT_TRIGGER NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_STT_TRIGGER, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 137: /* db_options ::= db_options TABLE_PREFIX signed */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_TABLE_PREFIX, yymsp[0].minor.yy974); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 138: /* db_options ::= db_options TABLE_SUFFIX signed */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_TABLE_SUFFIX, yymsp[0].minor.yy974); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 139: /* db_options ::= db_options S3_CHUNKSIZE NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_S3_CHUNKSIZE, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 140: /* db_options ::= db_options S3_KEEPLOCAL NK_INTEGER */ - case 141: /* db_options ::= db_options S3_KEEPLOCAL NK_VARIABLE */ yytestcase(yyruleno==141); -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_S3_KEEPLOCAL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 142: /* db_options ::= db_options S3_COMPACT NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_S3_COMPACT, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 143: /* db_options ::= db_options KEEP_TIME_OFFSET NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_KEEP_TIME_OFFSET, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 144: /* db_options ::= db_options ENCRYPT_ALGORITHM NK_STRING */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_ENCRYPT_ALGORITHM, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 145: /* alter_db_options ::= alter_db_option */ -{ yylhsminor.yy974 = createAlterDatabaseOptions(pCxt); yylhsminor.yy974 = setAlterDatabaseOption(pCxt, yylhsminor.yy974, &yymsp[0].minor.yy683); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 146: /* alter_db_options ::= alter_db_options alter_db_option */ -{ yylhsminor.yy974 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy974, &yymsp[0].minor.yy683); } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 147: /* alter_db_option ::= BUFFER NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_BUFFER; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 148: /* alter_db_option ::= CACHEMODEL NK_STRING */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_CACHEMODEL; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 149: /* alter_db_option ::= CACHESIZE NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_CACHESIZE; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 150: /* alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 151: /* alter_db_option ::= KEEP integer_list */ - case 152: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==152); -{ yymsp[-1].minor.yy683.type = DB_OPTION_KEEP; yymsp[-1].minor.yy683.pList = yymsp[0].minor.yy946; } - break; - case 153: /* alter_db_option ::= PAGES NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_PAGES; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 154: /* alter_db_option ::= REPLICA NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_REPLICA; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 155: /* alter_db_option ::= WAL_LEVEL NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_WAL; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 156: /* alter_db_option ::= STT_TRIGGER NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_STT_TRIGGER; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 157: /* alter_db_option ::= MINROWS NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_MINROWS; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 158: /* alter_db_option ::= WAL_RETENTION_PERIOD NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_WAL_RETENTION_PERIOD; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 159: /* alter_db_option ::= WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */ -{ - SToken t = yymsp[-1].minor.yy0; - t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yymsp[-2].minor.yy683.type = DB_OPTION_WAL_RETENTION_PERIOD; yymsp[-2].minor.yy683.val = t; - } - break; - case 160: /* alter_db_option ::= WAL_RETENTION_SIZE NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_WAL_RETENTION_SIZE; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 161: /* alter_db_option ::= WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */ -{ - SToken t = yymsp[-1].minor.yy0; - t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yymsp[-2].minor.yy683.type = DB_OPTION_WAL_RETENTION_SIZE; yymsp[-2].minor.yy683.val = t; - } - break; - case 162: /* alter_db_option ::= S3_KEEPLOCAL NK_INTEGER */ - case 163: /* alter_db_option ::= S3_KEEPLOCAL NK_VARIABLE */ yytestcase(yyruleno==163); -{ yymsp[-1].minor.yy683.type = DB_OPTION_S3_KEEPLOCAL; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 164: /* alter_db_option ::= S3_COMPACT NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_S3_COMPACT, yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 165: /* alter_db_option ::= KEEP_TIME_OFFSET NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_KEEP_TIME_OFFSET; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 166: /* alter_db_option ::= ENCRYPT_ALGORITHM NK_STRING */ -{ yymsp[-1].minor.yy683.type = DB_OPTION_ENCRYPT_ALGORITHM; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 167: /* integer_list ::= NK_INTEGER */ -{ yylhsminor.yy946 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy946 = yylhsminor.yy946; - break; - case 168: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */ - case 452: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==452); -{ yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-2].minor.yy946, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy946 = yylhsminor.yy946; - break; - case 169: /* variable_list ::= NK_VARIABLE */ -{ yylhsminor.yy946 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy946 = yylhsminor.yy946; - break; - case 170: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */ -{ yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-2].minor.yy946, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy946 = yylhsminor.yy946; - break; - case 171: /* retention_list ::= retention */ - case 204: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==204); - case 207: /* multi_drop_clause ::= drop_table_clause */ yytestcase(yyruleno==207); - case 216: /* tag_def_list ::= tag_def */ yytestcase(yyruleno==216); - case 219: /* column_def_list ::= column_def */ yytestcase(yyruleno==219); - case 267: /* rollup_func_list ::= rollup_func_name */ yytestcase(yyruleno==267); - case 272: /* col_name_list ::= col_name */ yytestcase(yyruleno==272); - case 344: /* tag_list_opt ::= tag_item */ yytestcase(yyruleno==344); - case 368: /* func_list ::= func */ yytestcase(yyruleno==368); - case 418: /* column_stream_def_list ::= column_stream_def */ yytestcase(yyruleno==418); - case 496: /* tags_literal_list ::= tags_literal */ yytestcase(yyruleno==496); - case 521: /* literal_list ::= signed_literal */ yytestcase(yyruleno==521); - case 616: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==616); - case 622: /* when_then_list ::= when_then_expr */ yytestcase(yyruleno==622); - case 698: /* select_list ::= select_item */ yytestcase(yyruleno==698); - case 709: /* partition_list ::= partition_item */ yytestcase(yyruleno==709); - case 772: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==772); -{ yylhsminor.yy946 = createNodeList(pCxt, yymsp[0].minor.yy974); } - yymsp[0].minor.yy946 = yylhsminor.yy946; - break; - case 172: /* retention_list ::= retention_list NK_COMMA retention */ - case 208: /* multi_drop_clause ::= multi_drop_clause NK_COMMA drop_table_clause */ yytestcase(yyruleno==208); - case 217: /* tag_def_list ::= tag_def_list NK_COMMA tag_def */ yytestcase(yyruleno==217); - case 220: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==220); - case 268: /* rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */ yytestcase(yyruleno==268); - case 273: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==273); - case 345: /* tag_list_opt ::= tag_list_opt NK_COMMA tag_item */ yytestcase(yyruleno==345); - case 369: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==369); - case 419: /* column_stream_def_list ::= column_stream_def_list NK_COMMA column_stream_def */ yytestcase(yyruleno==419); - case 497: /* tags_literal_list ::= tags_literal_list NK_COMMA tags_literal */ yytestcase(yyruleno==497); - case 522: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==522); - case 617: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==617); - case 699: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==699); - case 710: /* partition_list ::= partition_list NK_COMMA partition_item */ yytestcase(yyruleno==710); - case 773: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==773); -{ yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-2].minor.yy946, yymsp[0].minor.yy974); } - yymsp[-2].minor.yy946 = yylhsminor.yy946; - break; - case 173: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ - case 174: /* retention ::= NK_MINUS NK_COLON NK_VARIABLE */ yytestcase(yyruleno==174); -{ yylhsminor.yy974 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 175: /* speed_opt ::= */ - case 402: /* bufsize_opt ::= */ yytestcase(yyruleno==402); -{ yymsp[1].minor.yy904 = 0; } - break; - case 176: /* speed_opt ::= BWLIMIT NK_INTEGER */ - case 403: /* bufsize_opt ::= BUFSIZE NK_INTEGER */ yytestcase(yyruleno==403); -{ yymsp[-1].minor.yy904 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); } - break; - case 178: /* start_opt ::= START WITH NK_INTEGER */ - case 182: /* end_opt ::= END WITH NK_INTEGER */ yytestcase(yyruleno==182); -{ yymsp[-2].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } - break; - case 179: /* start_opt ::= START WITH NK_STRING */ - case 183: /* end_opt ::= END WITH NK_STRING */ yytestcase(yyruleno==183); -{ yymsp[-2].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } - break; - case 180: /* start_opt ::= START WITH TIMESTAMP NK_STRING */ - case 184: /* end_opt ::= END WITH TIMESTAMP NK_STRING */ yytestcase(yyruleno==184); -{ yymsp[-3].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } - break; - case 185: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ - case 188: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==188); -{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy569, yymsp[-5].minor.yy974, yymsp[-3].minor.yy946, yymsp[-1].minor.yy946, yymsp[0].minor.yy974); } - break; - case 186: /* cmd ::= CREATE TABLE multi_create_clause */ -{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy946); } - break; - case 187: /* cmd ::= CREATE TABLE not_exists_opt USING full_table_name NK_LP tag_list_opt NK_RP FILE NK_STRING */ -{ pCxt->pRootNode = createCreateSubTableFromFileClause(pCxt, yymsp[-7].minor.yy569, yymsp[-5].minor.yy974, yymsp[-3].minor.yy946, &yymsp[0].minor.yy0); } - break; - case 189: /* cmd ::= DROP TABLE with_opt multi_drop_clause */ -{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[-1].minor.yy569, yymsp[0].minor.yy946); } - break; - case 190: /* cmd ::= DROP STABLE with_opt exists_opt full_table_name */ -{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-2].minor.yy569, yymsp[-1].minor.yy569, yymsp[0].minor.yy974); } - break; - case 191: /* cmd ::= ALTER TABLE alter_table_clause */ - case 454: /* cmd ::= query_or_subquery */ yytestcase(yyruleno==454); - case 455: /* cmd ::= insert_query */ yytestcase(yyruleno==455); -{ pCxt->pRootNode = yymsp[0].minor.yy974; } - break; - case 192: /* cmd ::= ALTER STABLE alter_table_clause */ -{ pCxt->pRootNode = setAlterSuperTableType(yymsp[0].minor.yy974); } - break; - case 193: /* alter_table_clause ::= full_table_name alter_table_options */ -{ yylhsminor.yy974 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 194: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name column_options */ -{ yylhsminor.yy974 = createAlterTableAddModifyColOptions2(pCxt, yymsp[-5].minor.yy974, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-2].minor.yy557, yymsp[-1].minor.yy424, yymsp[0].minor.yy974); } - yymsp[-5].minor.yy974 = yylhsminor.yy974; - break; - case 195: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */ -{ yylhsminor.yy974 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy974, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy557); } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 196: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ -{ yylhsminor.yy974 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy974, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy557, yymsp[0].minor.yy424); } - yymsp[-4].minor.yy974 = yylhsminor.yy974; - break; - case 197: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name column_options */ -{ yylhsminor.yy974 = createAlterTableAddModifyColOptions(pCxt, yymsp[-4].minor.yy974, TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS, &yymsp[-1].minor.yy557, yymsp[0].minor.yy974); } - yymsp[-4].minor.yy974 = yylhsminor.yy974; - break; - case 198: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ -{ yylhsminor.yy974 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy974, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy557, &yymsp[0].minor.yy557); } - yymsp[-4].minor.yy974 = yylhsminor.yy974; - break; - case 199: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */ -{ yylhsminor.yy974 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy974, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy557, yymsp[0].minor.yy424); } - yymsp[-4].minor.yy974 = yylhsminor.yy974; - break; - case 200: /* alter_table_clause ::= full_table_name DROP TAG column_name */ -{ yylhsminor.yy974 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy974, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy557); } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 201: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ -{ yylhsminor.yy974 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy974, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy557, yymsp[0].minor.yy424); } - yymsp[-4].minor.yy974 = yylhsminor.yy974; - break; - case 202: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ -{ yylhsminor.yy974 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy974, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy557, &yymsp[0].minor.yy557); } - yymsp[-4].minor.yy974 = yylhsminor.yy974; - break; - case 203: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ tags_literal */ -{ yylhsminor.yy974 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy974, &yymsp[-2].minor.yy557, yymsp[0].minor.yy974); } - yymsp[-5].minor.yy974 = yylhsminor.yy974; - break; - case 205: /* multi_create_clause ::= multi_create_clause create_subtable_clause */ - case 623: /* when_then_list ::= when_then_list when_then_expr */ yytestcase(yyruleno==623); -{ yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-1].minor.yy946, yymsp[0].minor.yy974); } - yymsp[-1].minor.yy946 = yylhsminor.yy946; - break; - case 206: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP tags_literal_list NK_RP table_options */ -{ yylhsminor.yy974 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy569, yymsp[-8].minor.yy974, yymsp[-6].minor.yy974, yymsp[-5].minor.yy946, yymsp[-2].minor.yy946, yymsp[0].minor.yy974); } - yymsp[-9].minor.yy974 = yylhsminor.yy974; - break; - case 209: /* drop_table_clause ::= exists_opt full_table_name */ -{ yylhsminor.yy974 = createDropTableClause(pCxt, yymsp[-1].minor.yy569, yymsp[0].minor.yy974); } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 213: /* specific_cols_opt ::= NK_LP col_name_list NK_RP */ - case 417: /* col_list_opt ::= NK_LP column_stream_def_list NK_RP */ yytestcase(yyruleno==417); -{ yymsp[-2].minor.yy946 = yymsp[-1].minor.yy946; } - break; - case 214: /* full_table_name ::= table_name */ - case 358: /* full_tsma_name ::= tsma_name */ yytestcase(yyruleno==358); -{ yylhsminor.yy974 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy557, NULL); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 215: /* full_table_name ::= db_name NK_DOT table_name */ - case 359: /* full_tsma_name ::= db_name NK_DOT tsma_name */ yytestcase(yyruleno==359); -{ yylhsminor.yy974 = createRealTableNode(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy557, NULL); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 218: /* tag_def ::= column_name type_name */ -{ yylhsminor.yy974 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy557, yymsp[0].minor.yy424, NULL); } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 221: /* column_def ::= column_name type_name column_options */ -{ yylhsminor.yy974 = createColumnDefNode(pCxt, &yymsp[-2].minor.yy557, yymsp[-1].minor.yy424, yymsp[0].minor.yy974); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 222: /* type_name ::= BOOL */ -{ yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_BOOL); } - break; - case 223: /* type_name ::= TINYINT */ -{ yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_TINYINT); } - break; - case 224: /* type_name ::= SMALLINT */ -{ yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_SMALLINT); } - break; - case 225: /* type_name ::= INT */ - case 226: /* type_name ::= INTEGER */ yytestcase(yyruleno==226); -{ yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_INT); } - break; - case 227: /* type_name ::= BIGINT */ -{ yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_BIGINT); } - break; - case 228: /* type_name ::= FLOAT */ -{ yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_FLOAT); } - break; - case 229: /* type_name ::= DOUBLE */ -{ yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_DOUBLE); } - break; - case 230: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); } - break; - case 231: /* type_name ::= TIMESTAMP */ -{ yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); } - break; - case 232: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); } - break; - case 233: /* type_name ::= TINYINT UNSIGNED */ -{ yymsp[-1].minor.yy424 = createDataType(TSDB_DATA_TYPE_UTINYINT); } - break; - case 234: /* type_name ::= SMALLINT UNSIGNED */ -{ yymsp[-1].minor.yy424 = createDataType(TSDB_DATA_TYPE_USMALLINT); } - break; - case 235: /* type_name ::= INT UNSIGNED */ -{ yymsp[-1].minor.yy424 = createDataType(TSDB_DATA_TYPE_UINT); } - break; - case 236: /* type_name ::= BIGINT UNSIGNED */ -{ yymsp[-1].minor.yy424 = createDataType(TSDB_DATA_TYPE_UBIGINT); } - break; - case 237: /* type_name ::= JSON */ -{ yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_JSON); } - break; - case 238: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); } - break; - case 239: /* type_name ::= MEDIUMBLOB */ -{ yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); } - break; - case 240: /* type_name ::= BLOB */ -{ yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_BLOB); } - break; - case 241: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); } - break; - case 242: /* type_name ::= GEOMETRY NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_GEOMETRY, &yymsp[-1].minor.yy0); } - break; - case 243: /* type_name ::= DECIMAL */ -{ yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_DECIMAL); } - break; - case 244: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy424 = createDataType(TSDB_DATA_TYPE_DECIMAL); } - break; - case 245: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ -{ yymsp[-5].minor.yy424 = createDataType(TSDB_DATA_TYPE_DECIMAL); } - break; - case 246: /* type_name_default_len ::= BINARY */ -{ yymsp[0].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, NULL); } - break; - case 247: /* type_name_default_len ::= NCHAR */ -{ yymsp[0].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, NULL); } - break; - case 248: /* type_name_default_len ::= VARCHAR */ -{ yymsp[0].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, NULL); } - break; - case 249: /* type_name_default_len ::= VARBINARY */ -{ yymsp[0].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, NULL); } - break; - case 252: /* tags_def ::= TAGS NK_LP tag_def_list NK_RP */ - case 425: /* tag_def_or_ref_opt ::= TAGS NK_LP column_stream_def_list NK_RP */ yytestcase(yyruleno==425); -{ yymsp[-3].minor.yy946 = yymsp[-1].minor.yy946; } - break; - case 253: /* table_options ::= */ -{ yymsp[1].minor.yy974 = createDefaultTableOptions(pCxt); } - break; - case 254: /* table_options ::= table_options COMMENT NK_STRING */ -{ yylhsminor.yy974 = setTableOption(pCxt, yymsp[-2].minor.yy974, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 255: /* table_options ::= table_options MAX_DELAY duration_list */ -{ yylhsminor.yy974 = setTableOption(pCxt, yymsp[-2].minor.yy974, TABLE_OPTION_MAXDELAY, yymsp[0].minor.yy946); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 256: /* table_options ::= table_options WATERMARK duration_list */ -{ yylhsminor.yy974 = setTableOption(pCxt, yymsp[-2].minor.yy974, TABLE_OPTION_WATERMARK, yymsp[0].minor.yy946); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 257: /* table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */ -{ yylhsminor.yy974 = setTableOption(pCxt, yymsp[-4].minor.yy974, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy946); } - yymsp[-4].minor.yy974 = yylhsminor.yy974; - break; - case 258: /* table_options ::= table_options TTL NK_INTEGER */ -{ yylhsminor.yy974 = setTableOption(pCxt, yymsp[-2].minor.yy974, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 259: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */ -{ yylhsminor.yy974 = setTableOption(pCxt, yymsp[-4].minor.yy974, TABLE_OPTION_SMA, yymsp[-1].minor.yy946); } - yymsp[-4].minor.yy974 = yylhsminor.yy974; - break; - case 260: /* table_options ::= table_options DELETE_MARK duration_list */ -{ yylhsminor.yy974 = setTableOption(pCxt, yymsp[-2].minor.yy974, TABLE_OPTION_DELETE_MARK, yymsp[0].minor.yy946); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 261: /* alter_table_options ::= alter_table_option */ -{ yylhsminor.yy974 = createAlterTableOptions(pCxt); yylhsminor.yy974 = setTableOption(pCxt, yylhsminor.yy974, yymsp[0].minor.yy683.type, &yymsp[0].minor.yy683.val); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 262: /* alter_table_options ::= alter_table_options alter_table_option */ -{ yylhsminor.yy974 = setTableOption(pCxt, yymsp[-1].minor.yy974, yymsp[0].minor.yy683.type, &yymsp[0].minor.yy683.val); } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 263: /* alter_table_option ::= COMMENT NK_STRING */ -{ yymsp[-1].minor.yy683.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 264: /* alter_table_option ::= TTL NK_INTEGER */ -{ yymsp[-1].minor.yy683.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } - break; - case 265: /* duration_list ::= duration_literal */ - case 554: /* expression_list ::= expr_or_subquery */ yytestcase(yyruleno==554); -{ yylhsminor.yy946 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } - yymsp[0].minor.yy946 = yylhsminor.yy946; - break; - case 266: /* duration_list ::= duration_list NK_COMMA duration_literal */ - case 555: /* expression_list ::= expression_list NK_COMMA expr_or_subquery */ yytestcase(yyruleno==555); -{ yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-2].minor.yy946, releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } - yymsp[-2].minor.yy946 = yylhsminor.yy946; - break; - case 269: /* rollup_func_name ::= function_name */ -{ yylhsminor.yy974 = createFunctionNode(pCxt, &yymsp[0].minor.yy557, NULL); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 270: /* rollup_func_name ::= FIRST */ - case 271: /* rollup_func_name ::= LAST */ yytestcase(yyruleno==271); - case 347: /* tag_item ::= QTAGS */ yytestcase(yyruleno==347); -{ yylhsminor.yy974 = createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 274: /* col_name ::= column_name */ - case 348: /* tag_item ::= column_name */ yytestcase(yyruleno==348); -{ yylhsminor.yy974 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy557); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 275: /* cmd ::= SHOW DNODES */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT); } - break; - case 276: /* cmd ::= SHOW USERS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_USERS_STMT); } - break; - case 277: /* cmd ::= SHOW USERS FULL */ -{ pCxt->pRootNode = createShowStmtWithFull(pCxt, QUERY_NODE_SHOW_USERS_FULL_STMT); } - break; - case 278: /* cmd ::= SHOW USER PRIVILEGES */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_USER_PRIVILEGES_STMT); } - break; - case 279: /* cmd ::= SHOW db_kind_opt DATABASES */ -{ - pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT); - (void)setShowKind(pCxt, pCxt->pRootNode, yymsp[-1].minor.yy741); - } - break; - case 280: /* cmd ::= SHOW table_kind_db_name_cond_opt TABLES like_pattern_opt */ -{ - pCxt->pRootNode = createShowTablesStmt(pCxt, yymsp[-2].minor.yy595, yymsp[0].minor.yy974, OP_TYPE_LIKE); - } - break; - case 281: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ -{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy974, yymsp[0].minor.yy974, OP_TYPE_LIKE); } - break; - case 282: /* cmd ::= SHOW db_name_cond_opt VGROUPS */ -{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy974, NULL, OP_TYPE_LIKE); } - break; - case 283: /* cmd ::= SHOW MNODES */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT); } - break; - case 284: /* cmd ::= SHOW QNODES */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QNODES_STMT); } - break; - case 285: /* cmd ::= SHOW ANODES */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_ANODES_STMT); } - break; - case 286: /* cmd ::= SHOW ANODES FULL */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_ANODES_FULL_STMT); } - break; - case 287: /* cmd ::= SHOW ARBGROUPS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_ARBGROUPS_STMT); } - break; - case 288: /* cmd ::= SHOW FUNCTIONS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_FUNCTIONS_STMT); } - break; - case 289: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ -{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[0].minor.yy974, yymsp[-1].minor.yy974, OP_TYPE_EQUAL); } - break; - case 290: /* cmd ::= SHOW INDEXES FROM db_name NK_DOT table_name */ -{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, createIdentifierValueNode(pCxt, &yymsp[-2].minor.yy557), createIdentifierValueNode(pCxt, &yymsp[0].minor.yy557), OP_TYPE_EQUAL); } - break; - case 291: /* cmd ::= SHOW STREAMS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STREAMS_STMT); } - break; - case 292: /* cmd ::= SHOW ACCOUNTS */ -{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } - break; - case 293: /* cmd ::= SHOW APPS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_APPS_STMT); } - break; - case 294: /* cmd ::= SHOW CONNECTIONS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONNECTIONS_STMT); } - break; - case 295: /* cmd ::= SHOW LICENCES */ - case 296: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==296); -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LICENCES_STMT); } - break; - case 297: /* cmd ::= SHOW GRANTS FULL */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_GRANTS_FULL_STMT); } - break; - case 298: /* cmd ::= SHOW GRANTS LOGS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_GRANTS_LOGS_STMT); } - break; - case 299: /* cmd ::= SHOW CLUSTER MACHINES */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CLUSTER_MACHINES_STMT); } - break; - case 300: /* cmd ::= SHOW CREATE DATABASE db_name */ -{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy557); } - break; - case 301: /* cmd ::= SHOW CREATE TABLE full_table_name */ -{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy974); } - break; - case 302: /* cmd ::= SHOW CREATE STABLE full_table_name */ -{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, -yymsp[0].minor.yy974); } - break; - case 303: /* cmd ::= SHOW ENCRYPTIONS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_ENCRYPTIONS_STMT); } - break; - case 304: /* cmd ::= SHOW QUERIES */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QUERIES_STMT); } - break; - case 305: /* cmd ::= SHOW SCORES */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SCORES_STMT); } - break; - case 306: /* cmd ::= SHOW TOPICS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TOPICS_STMT); } - break; - case 307: /* cmd ::= SHOW VARIABLES */ - case 308: /* cmd ::= SHOW CLUSTER VARIABLES */ yytestcase(yyruleno==308); -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VARIABLES_STMT); } - break; - case 309: /* cmd ::= SHOW LOCAL VARIABLES */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT); } - break; - case 310: /* cmd ::= SHOW DNODE NK_INTEGER VARIABLES like_pattern_opt */ -{ pCxt->pRootNode = createShowDnodeVariablesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[-2].minor.yy0), yymsp[0].minor.yy974); } - break; - case 311: /* cmd ::= SHOW BNODES */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_BNODES_STMT); } - break; - case 312: /* cmd ::= SHOW SNODES */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SNODES_STMT); } - break; - case 313: /* cmd ::= SHOW CLUSTER */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CLUSTER_STMT); } - break; - case 314: /* cmd ::= SHOW TRANSACTIONS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TRANSACTIONS_STMT); } - break; - case 315: /* cmd ::= SHOW TABLE DISTRIBUTED full_table_name */ -{ pCxt->pRootNode = createShowTableDistributedStmt(pCxt, yymsp[0].minor.yy974); } - break; - case 316: /* cmd ::= SHOW CONSUMERS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONSUMERS_STMT); } - break; - case 317: /* cmd ::= SHOW SUBSCRIPTIONS */ -{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT); } - break; - case 318: /* cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */ -{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, yymsp[0].minor.yy974, yymsp[-1].minor.yy974, OP_TYPE_EQUAL); } - break; - case 319: /* cmd ::= SHOW TAGS FROM db_name NK_DOT table_name */ -{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, createIdentifierValueNode(pCxt, &yymsp[-2].minor.yy557), createIdentifierValueNode(pCxt, &yymsp[0].minor.yy557), OP_TYPE_EQUAL); } - break; - case 320: /* cmd ::= SHOW TABLE TAGS tag_list_opt FROM table_name_cond from_db_opt */ -{ pCxt->pRootNode = createShowTableTagsStmt(pCxt, yymsp[-1].minor.yy974, yymsp[0].minor.yy974, yymsp[-3].minor.yy946); } - break; - case 321: /* cmd ::= SHOW TABLE TAGS tag_list_opt FROM db_name NK_DOT table_name */ -{ pCxt->pRootNode = createShowTableTagsStmt(pCxt, createIdentifierValueNode(pCxt, &yymsp[0].minor.yy557), createIdentifierValueNode(pCxt, &yymsp[-2].minor.yy557), yymsp[-4].minor.yy946); } - break; - case 322: /* cmd ::= SHOW VNODES ON DNODE NK_INTEGER */ -{ pCxt->pRootNode = createShowVnodesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0), NULL); } - break; - case 323: /* cmd ::= SHOW VNODES */ -{ pCxt->pRootNode = createShowVnodesStmt(pCxt, NULL, NULL); } - break; - case 324: /* cmd ::= SHOW db_name_cond_opt ALIVE */ -{ pCxt->pRootNode = createShowAliveStmt(pCxt, yymsp[-1].minor.yy974, QUERY_NODE_SHOW_DB_ALIVE_STMT); } - break; - case 325: /* cmd ::= SHOW CLUSTER ALIVE */ -{ pCxt->pRootNode = createShowAliveStmt(pCxt, NULL, QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT); } - break; - case 326: /* cmd ::= SHOW db_name_cond_opt VIEWS like_pattern_opt */ -{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VIEWS_STMT, yymsp[-2].minor.yy974, yymsp[0].minor.yy974, OP_TYPE_LIKE); } - break; - case 327: /* cmd ::= SHOW CREATE VIEW full_table_name */ -{ pCxt->pRootNode = createShowCreateViewStmt(pCxt, QUERY_NODE_SHOW_CREATE_VIEW_STMT, yymsp[0].minor.yy974); } - break; - case 328: /* cmd ::= SHOW COMPACTS */ -{ pCxt->pRootNode = createShowCompactsStmt(pCxt, QUERY_NODE_SHOW_COMPACTS_STMT); } - break; - case 329: /* cmd ::= SHOW COMPACT NK_INTEGER */ -{ pCxt->pRootNode = createShowCompactDetailsStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - break; - case 330: /* table_kind_db_name_cond_opt ::= */ -{ yymsp[1].minor.yy595.kind = SHOW_KIND_ALL; yymsp[1].minor.yy595.dbName = nil_token; } - break; - case 331: /* table_kind_db_name_cond_opt ::= table_kind */ -{ yylhsminor.yy595.kind = yymsp[0].minor.yy741; yylhsminor.yy595.dbName = nil_token; } - yymsp[0].minor.yy595 = yylhsminor.yy595; - break; - case 332: /* table_kind_db_name_cond_opt ::= db_name NK_DOT */ -{ yylhsminor.yy595.kind = SHOW_KIND_ALL; yylhsminor.yy595.dbName = yymsp[-1].minor.yy557; } - yymsp[-1].minor.yy595 = yylhsminor.yy595; - break; - case 333: /* table_kind_db_name_cond_opt ::= table_kind db_name NK_DOT */ -{ yylhsminor.yy595.kind = yymsp[-2].minor.yy741; yylhsminor.yy595.dbName = yymsp[-1].minor.yy557; } - yymsp[-2].minor.yy595 = yylhsminor.yy595; - break; - case 334: /* table_kind ::= NORMAL */ -{ yymsp[0].minor.yy741 = SHOW_KIND_TABLES_NORMAL; } - break; - case 335: /* table_kind ::= CHILD */ -{ yymsp[0].minor.yy741 = SHOW_KIND_TABLES_CHILD; } - break; - case 336: /* db_name_cond_opt ::= */ - case 341: /* from_db_opt ::= */ yytestcase(yyruleno==341); -{ yymsp[1].minor.yy974 = createDefaultDatabaseCondValue(pCxt); } - break; - case 337: /* db_name_cond_opt ::= db_name NK_DOT */ -{ yylhsminor.yy974 = createIdentifierValueNode(pCxt, &yymsp[-1].minor.yy557); } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 339: /* like_pattern_opt ::= LIKE NK_STRING */ -{ yymsp[-1].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } - break; - case 340: /* table_name_cond ::= table_name */ -{ yylhsminor.yy974 = createIdentifierValueNode(pCxt, &yymsp[0].minor.yy557); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 342: /* from_db_opt ::= FROM db_name */ -{ yymsp[-1].minor.yy974 = createIdentifierValueNode(pCxt, &yymsp[0].minor.yy557); } - break; - case 346: /* tag_item ::= TBNAME */ -{ yylhsminor.yy974 = setProjectionAlias(pCxt, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL), &yymsp[0].minor.yy0); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 349: /* tag_item ::= column_name column_alias */ -{ yylhsminor.yy974 = setProjectionAlias(pCxt, createColumnNode(pCxt, NULL, &yymsp[-1].minor.yy557), &yymsp[0].minor.yy557); } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 350: /* tag_item ::= column_name AS column_alias */ -{ yylhsminor.yy974 = setProjectionAlias(pCxt, createColumnNode(pCxt, NULL, &yymsp[-2].minor.yy557), &yymsp[0].minor.yy557); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 351: /* db_kind_opt ::= */ -{ yymsp[1].minor.yy741 = SHOW_KIND_ALL; } - break; - case 352: /* db_kind_opt ::= USER */ -{ yymsp[0].minor.yy741 = SHOW_KIND_DATABASES_USER; } - break; - case 353: /* db_kind_opt ::= SYSTEM */ -{ yymsp[0].minor.yy741 = SHOW_KIND_DATABASES_SYSTEM; } - break; - case 354: /* cmd ::= CREATE TSMA not_exists_opt tsma_name ON full_table_name tsma_func_list INTERVAL NK_LP duration_literal NK_RP */ -{ pCxt->pRootNode = createCreateTSMAStmt(pCxt, yymsp[-8].minor.yy569, &yymsp[-7].minor.yy557, yymsp[-4].minor.yy974, yymsp[-5].minor.yy974, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } - break; - case 355: /* cmd ::= CREATE RECURSIVE TSMA not_exists_opt tsma_name ON full_table_name INTERVAL NK_LP duration_literal NK_RP */ -{ pCxt->pRootNode = createCreateTSMAStmt(pCxt, yymsp[-7].minor.yy569, &yymsp[-6].minor.yy557, NULL, yymsp[-4].minor.yy974, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } - break; - case 356: /* cmd ::= DROP TSMA exists_opt full_tsma_name */ -{ pCxt->pRootNode = createDropTSMAStmt(pCxt, yymsp[-1].minor.yy569, yymsp[0].minor.yy974); } - break; - case 357: /* cmd ::= SHOW db_name_cond_opt TSMAS */ -{ pCxt->pRootNode = createShowTSMASStmt(pCxt, yymsp[-1].minor.yy974); } - break; - case 360: /* tsma_func_list ::= FUNCTION NK_LP func_list NK_RP */ -{ yymsp[-3].minor.yy974 = createTSMAOptions(pCxt, yymsp[-1].minor.yy946); } - break; - case 361: /* cmd ::= CREATE SMA INDEX not_exists_opt col_name ON full_table_name index_options */ -{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy569, yymsp[-3].minor.yy974, yymsp[-1].minor.yy974, NULL, yymsp[0].minor.yy974); } - break; - case 362: /* cmd ::= CREATE INDEX not_exists_opt col_name ON full_table_name NK_LP col_name_list NK_RP */ -{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_NORMAL, yymsp[-6].minor.yy569, yymsp[-5].minor.yy974, yymsp[-3].minor.yy974, yymsp[-1].minor.yy946, NULL); } - break; - case 363: /* cmd ::= DROP INDEX exists_opt full_index_name */ -{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-1].minor.yy569, yymsp[0].minor.yy974); } - break; - case 364: /* full_index_name ::= index_name */ -{ yylhsminor.yy974 = createRealTableNodeForIndexName(pCxt, NULL, &yymsp[0].minor.yy557); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 365: /* full_index_name ::= db_name NK_DOT index_name */ -{ yylhsminor.yy974 = createRealTableNodeForIndexName(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy557); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 366: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */ -{ yymsp[-9].minor.yy974 = createIndexOption(pCxt, yymsp[-7].minor.yy946, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), NULL, yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } - break; - case 367: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */ -{ yymsp[-11].minor.yy974 = createIndexOption(pCxt, yymsp[-9].minor.yy946, releaseRawExprNode(pCxt, yymsp[-5].minor.yy974), releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } - break; - case 370: /* func ::= sma_func_name NK_LP expression_list NK_RP */ -{ yylhsminor.yy974 = createFunctionNode(pCxt, &yymsp[-3].minor.yy557, yymsp[-1].minor.yy946); } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 371: /* sma_func_name ::= function_name */ - case 666: /* alias_opt ::= table_alias */ yytestcase(yyruleno==666); -{ yylhsminor.yy557 = yymsp[0].minor.yy557; } - yymsp[0].minor.yy557 = yylhsminor.yy557; - break; - case 376: /* sma_stream_opt ::= */ - case 426: /* stream_options ::= */ yytestcase(yyruleno==426); -{ yymsp[1].minor.yy974 = createStreamOptions(pCxt); } - break; - case 377: /* sma_stream_opt ::= sma_stream_opt WATERMARK duration_literal */ -{ ((SStreamOptions*)yymsp[-2].minor.yy974)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = yymsp[-2].minor.yy974; } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 378: /* sma_stream_opt ::= sma_stream_opt MAX_DELAY duration_literal */ -{ ((SStreamOptions*)yymsp[-2].minor.yy974)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = yymsp[-2].minor.yy974; } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 379: /* sma_stream_opt ::= sma_stream_opt DELETE_MARK duration_literal */ -{ ((SStreamOptions*)yymsp[-2].minor.yy974)->pDeleteMark = releaseRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = yymsp[-2].minor.yy974; } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 380: /* with_meta ::= AS */ -{ yymsp[0].minor.yy904 = 0; } - break; - case 381: /* with_meta ::= WITH META AS */ -{ yymsp[-2].minor.yy904 = 1; } - break; - case 382: /* with_meta ::= ONLY META AS */ -{ yymsp[-2].minor.yy904 = 2; } - break; - case 383: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery */ -{ pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, yymsp[-3].minor.yy569, &yymsp[-2].minor.yy557, yymsp[0].minor.yy974); } - break; - case 384: /* cmd ::= CREATE TOPIC not_exists_opt topic_name with_meta DATABASE db_name */ -{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-4].minor.yy569, &yymsp[-3].minor.yy557, &yymsp[0].minor.yy557, yymsp[-2].minor.yy904); } - break; - case 385: /* cmd ::= CREATE TOPIC not_exists_opt topic_name with_meta STABLE full_table_name where_clause_opt */ -{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-5].minor.yy569, &yymsp[-4].minor.yy557, yymsp[-1].minor.yy974, yymsp[-3].minor.yy904, yymsp[0].minor.yy974); } - break; - case 386: /* cmd ::= DROP TOPIC exists_opt topic_name */ -{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy569, &yymsp[0].minor.yy557); } - break; - case 387: /* cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */ -{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy569, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy557); } - break; - case 388: /* cmd ::= DESC full_table_name */ - case 389: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==389); -{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy974); } - break; - case 390: /* cmd ::= RESET QUERY CACHE */ -{ pCxt->pRootNode = createResetQueryCacheStmt(pCxt); } - break; - case 391: /* cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery */ - case 392: /* cmd ::= EXPLAIN analyze_opt explain_options insert_query */ yytestcase(yyruleno==392); -{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy569, yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } - break; - case 395: /* explain_options ::= */ -{ yymsp[1].minor.yy974 = createDefaultExplainOptions(pCxt); } - break; - case 396: /* explain_options ::= explain_options VERBOSE NK_BOOL */ -{ yylhsminor.yy974 = setExplainVerbose(pCxt, yymsp[-2].minor.yy974, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 397: /* explain_options ::= explain_options RATIO NK_FLOAT */ -{ yylhsminor.yy974 = setExplainRatio(pCxt, yymsp[-2].minor.yy974, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 398: /* cmd ::= CREATE or_replace_opt agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt language_opt */ -{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-7].minor.yy569, yymsp[-9].minor.yy569, &yymsp[-6].minor.yy557, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy424, yymsp[-1].minor.yy904, &yymsp[0].minor.yy557, yymsp[-10].minor.yy569); } - break; - case 399: /* cmd ::= DROP FUNCTION exists_opt function_name */ -{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy569, &yymsp[0].minor.yy557); } - break; - case 404: /* language_opt ::= */ - case 449: /* on_vgroup_id ::= */ yytestcase(yyruleno==449); -{ yymsp[1].minor.yy557 = nil_token; } - break; - case 405: /* language_opt ::= LANGUAGE NK_STRING */ - case 450: /* on_vgroup_id ::= ON NK_INTEGER */ yytestcase(yyruleno==450); -{ yymsp[-1].minor.yy557 = yymsp[0].minor.yy0; } - break; - case 408: /* cmd ::= CREATE or_replace_opt VIEW full_view_name AS query_or_subquery */ -{ pCxt->pRootNode = createCreateViewStmt(pCxt, yymsp[-4].minor.yy569, yymsp[-2].minor.yy974, &yymsp[-1].minor.yy0, yymsp[0].minor.yy974); } - break; - case 409: /* cmd ::= DROP VIEW exists_opt full_view_name */ -{ pCxt->pRootNode = createDropViewStmt(pCxt, yymsp[-1].minor.yy569, yymsp[0].minor.yy974); } - break; - case 410: /* full_view_name ::= view_name */ -{ yylhsminor.yy974 = createViewNode(pCxt, NULL, &yymsp[0].minor.yy557); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 411: /* full_view_name ::= db_name NK_DOT view_name */ -{ yylhsminor.yy974 = createViewNode(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy557); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 412: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name col_list_opt tag_def_or_ref_opt subtable_opt AS query_or_subquery */ -{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-9].minor.yy569, &yymsp[-8].minor.yy557, yymsp[-5].minor.yy974, yymsp[-7].minor.yy974, yymsp[-3].minor.yy946, yymsp[-2].minor.yy974, yymsp[0].minor.yy974, yymsp[-4].minor.yy946); } - break; - case 413: /* cmd ::= DROP STREAM exists_opt stream_name */ -{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy569, &yymsp[0].minor.yy557); } - break; - case 414: /* cmd ::= PAUSE STREAM exists_opt stream_name */ -{ pCxt->pRootNode = createPauseStreamStmt(pCxt, yymsp[-1].minor.yy569, &yymsp[0].minor.yy557); } - break; - case 415: /* cmd ::= RESUME STREAM exists_opt ignore_opt stream_name */ -{ pCxt->pRootNode = createResumeStreamStmt(pCxt, yymsp[-2].minor.yy569, yymsp[-1].minor.yy569, &yymsp[0].minor.yy557); } - break; - case 420: /* column_stream_def ::= column_name stream_col_options */ -{ yylhsminor.yy974 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy557, createDataType(TSDB_DATA_TYPE_NULL), yymsp[0].minor.yy974); } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 421: /* stream_col_options ::= */ - case 781: /* column_options ::= */ yytestcase(yyruleno==781); -{ yymsp[1].minor.yy974 = createDefaultColumnOptions(pCxt); } - break; - case 422: /* stream_col_options ::= stream_col_options PRIMARY KEY */ - case 782: /* column_options ::= column_options PRIMARY KEY */ yytestcase(yyruleno==782); -{ yylhsminor.yy974 = setColumnOptionsPK(pCxt, yymsp[-2].minor.yy974); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 427: /* stream_options ::= stream_options TRIGGER AT_ONCE */ - case 428: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ yytestcase(yyruleno==428); -{ yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-2].minor.yy974, SOPT_TRIGGER_TYPE_SET, &yymsp[0].minor.yy0, NULL); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 429: /* stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */ -{ yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-3].minor.yy974, SOPT_TRIGGER_TYPE_SET, &yymsp[-1].minor.yy0, releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 430: /* stream_options ::= stream_options WATERMARK duration_literal */ -{ yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-2].minor.yy974, SOPT_WATERMARK_SET, NULL, releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 431: /* stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */ -{ yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-3].minor.yy974, SOPT_IGNORE_EXPIRED_SET, &yymsp[0].minor.yy0, NULL); } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 432: /* stream_options ::= stream_options FILL_HISTORY NK_INTEGER */ -{ yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-2].minor.yy974, SOPT_FILL_HISTORY_SET, &yymsp[0].minor.yy0, NULL); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 433: /* stream_options ::= stream_options DELETE_MARK duration_literal */ -{ yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-2].minor.yy974, SOPT_DELETE_MARK_SET, NULL, releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 434: /* stream_options ::= stream_options IGNORE UPDATE NK_INTEGER */ -{ yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-3].minor.yy974, SOPT_IGNORE_UPDATE_SET, &yymsp[0].minor.yy0, NULL); } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 436: /* subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ - case 725: /* sliding_opt ::= SLIDING NK_LP interval_sliding_duration_literal NK_RP */ yytestcase(yyruleno==725); - case 749: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==749); -{ yymsp[-3].minor.yy974 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy974); } - break; - case 439: /* cmd ::= KILL CONNECTION NK_INTEGER */ -{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); } - break; - case 440: /* cmd ::= KILL QUERY NK_STRING */ -{ pCxt->pRootNode = createKillQueryStmt(pCxt, &yymsp[0].minor.yy0); } - break; - case 441: /* cmd ::= KILL TRANSACTION NK_INTEGER */ -{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); } - break; - case 442: /* cmd ::= KILL COMPACT NK_INTEGER */ -{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_COMPACT_STMT, &yymsp[0].minor.yy0); } - break; - case 443: /* cmd ::= BALANCE VGROUP */ -{ pCxt->pRootNode = createBalanceVgroupStmt(pCxt); } - break; - case 444: /* cmd ::= BALANCE VGROUP LEADER on_vgroup_id */ -{ pCxt->pRootNode = createBalanceVgroupLeaderStmt(pCxt, &yymsp[0].minor.yy557); } - break; - case 445: /* cmd ::= BALANCE VGROUP LEADER DATABASE db_name */ -{ pCxt->pRootNode = createBalanceVgroupLeaderDBNameStmt(pCxt, &yymsp[0].minor.yy557); } - break; - case 446: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ -{ pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } - break; - case 447: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ -{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy946); } - break; - case 448: /* cmd ::= SPLIT VGROUP NK_INTEGER */ -{ pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); } - break; - case 451: /* dnode_list ::= DNODE NK_INTEGER */ -{ yymsp[-1].minor.yy946 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } - break; - case 453: /* cmd ::= DELETE FROM full_table_name where_clause_opt */ -{ pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } - break; - case 456: /* insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ -{ yymsp[-6].minor.yy974 = createInsertStmt(pCxt, yymsp[-4].minor.yy974, yymsp[-2].minor.yy946, yymsp[0].minor.yy974); } - break; - case 457: /* insert_query ::= INSERT INTO full_table_name query_or_subquery */ -{ yymsp[-3].minor.yy974 = createInsertStmt(pCxt, yymsp[-1].minor.yy974, NULL, yymsp[0].minor.yy974); } - break; - case 458: /* tags_literal ::= NK_INTEGER */ - case 470: /* tags_literal ::= NK_BIN */ yytestcase(yyruleno==470); - case 479: /* tags_literal ::= NK_HEX */ yytestcase(yyruleno==479); -{ yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0, NULL); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 459: /* tags_literal ::= NK_INTEGER NK_PLUS duration_literal */ - case 460: /* tags_literal ::= NK_INTEGER NK_MINUS duration_literal */ yytestcase(yyruleno==460); - case 471: /* tags_literal ::= NK_BIN NK_PLUS duration_literal */ yytestcase(yyruleno==471); - case 472: /* tags_literal ::= NK_BIN NK_MINUS duration_literal */ yytestcase(yyruleno==472); - case 480: /* tags_literal ::= NK_HEX NK_PLUS duration_literal */ yytestcase(yyruleno==480); - case 481: /* tags_literal ::= NK_HEX NK_MINUS duration_literal */ yytestcase(yyruleno==481); - case 489: /* tags_literal ::= NK_STRING NK_PLUS duration_literal */ yytestcase(yyruleno==489); - case 490: /* tags_literal ::= NK_STRING NK_MINUS duration_literal */ yytestcase(yyruleno==490); -{ - SToken l = yymsp[-2].minor.yy0; - SToken r = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - l.n = (r.z + r.n) - l.z; - yylhsminor.yy974 = createRawValueNodeExt(pCxt, TSDB_DATA_TYPE_BINARY, &l, NULL, yymsp[0].minor.yy974); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 461: /* tags_literal ::= NK_PLUS NK_INTEGER */ - case 464: /* tags_literal ::= NK_MINUS NK_INTEGER */ yytestcase(yyruleno==464); - case 473: /* tags_literal ::= NK_PLUS NK_BIN */ yytestcase(yyruleno==473); - case 476: /* tags_literal ::= NK_MINUS NK_BIN */ yytestcase(yyruleno==476); - case 482: /* tags_literal ::= NK_PLUS NK_HEX */ yytestcase(yyruleno==482); - case 485: /* tags_literal ::= NK_MINUS NK_HEX */ yytestcase(yyruleno==485); -{ - SToken t = yymsp[-1].minor.yy0; - t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &t, NULL); - } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 462: /* tags_literal ::= NK_PLUS NK_INTEGER NK_PLUS duration_literal */ - case 463: /* tags_literal ::= NK_PLUS NK_INTEGER NK_MINUS duration_literal */ yytestcase(yyruleno==463); - case 465: /* tags_literal ::= NK_MINUS NK_INTEGER NK_PLUS duration_literal */ yytestcase(yyruleno==465); - case 466: /* tags_literal ::= NK_MINUS NK_INTEGER NK_MINUS duration_literal */ yytestcase(yyruleno==466); - case 474: /* tags_literal ::= NK_PLUS NK_BIN NK_PLUS duration_literal */ yytestcase(yyruleno==474); - case 475: /* tags_literal ::= NK_PLUS NK_BIN NK_MINUS duration_literal */ yytestcase(yyruleno==475); - case 477: /* tags_literal ::= NK_MINUS NK_BIN NK_PLUS duration_literal */ yytestcase(yyruleno==477); - case 478: /* tags_literal ::= NK_MINUS NK_BIN NK_MINUS duration_literal */ yytestcase(yyruleno==478); - case 483: /* tags_literal ::= NK_PLUS NK_HEX NK_PLUS duration_literal */ yytestcase(yyruleno==483); - case 484: /* tags_literal ::= NK_PLUS NK_HEX NK_MINUS duration_literal */ yytestcase(yyruleno==484); - case 486: /* tags_literal ::= NK_MINUS NK_HEX NK_PLUS duration_literal */ yytestcase(yyruleno==486); - case 487: /* tags_literal ::= NK_MINUS NK_HEX NK_MINUS duration_literal */ yytestcase(yyruleno==487); -{ - SToken l = yymsp[-3].minor.yy0; - SToken r = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - l.n = (r.z + r.n) - l.z; - yylhsminor.yy974 = createRawValueNodeExt(pCxt, TSDB_DATA_TYPE_BINARY, &l, NULL, yymsp[0].minor.yy974); - } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 467: /* tags_literal ::= NK_FLOAT */ -{ yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0, NULL); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 468: /* tags_literal ::= NK_PLUS NK_FLOAT */ - case 469: /* tags_literal ::= NK_MINUS NK_FLOAT */ yytestcase(yyruleno==469); -{ - SToken t = yymsp[-1].minor.yy0; - t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t, NULL); - } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 488: /* tags_literal ::= NK_STRING */ -{ yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0, NULL); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 491: /* tags_literal ::= NK_BOOL */ -{ yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0, NULL); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 492: /* tags_literal ::= NULL */ -{ yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0, NULL); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 493: /* tags_literal ::= literal_func */ -{ yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_BINARY, NULL, yymsp[0].minor.yy974); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 494: /* tags_literal ::= literal_func NK_PLUS duration_literal */ - case 495: /* tags_literal ::= literal_func NK_MINUS duration_literal */ yytestcase(yyruleno==495); -{ - SToken l = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - SToken r = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - l.n = (r.z + r.n) - l.z; - yylhsminor.yy974 = createRawValueNodeExt(pCxt, TSDB_DATA_TYPE_BINARY, &l, yymsp[-2].minor.yy974, yymsp[0].minor.yy974); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 498: /* literal ::= NK_INTEGER */ -{ yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 499: /* literal ::= NK_FLOAT */ -{ yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 500: /* literal ::= NK_STRING */ -{ yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 501: /* literal ::= NK_BOOL */ -{ yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 502: /* literal ::= TIMESTAMP NK_STRING */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 503: /* literal ::= duration_literal */ - case 513: /* signed_literal ::= signed */ yytestcase(yyruleno==513); - case 537: /* expr_or_subquery ::= expression */ yytestcase(yyruleno==537); - case 538: /* expression ::= literal */ yytestcase(yyruleno==538); - case 540: /* expression ::= column_reference */ yytestcase(yyruleno==540); - case 541: /* expression ::= function_expression */ yytestcase(yyruleno==541); - case 542: /* expression ::= case_when_expression */ yytestcase(yyruleno==542); - case 588: /* function_expression ::= literal_func */ yytestcase(yyruleno==588); - case 589: /* function_expression ::= rand_func */ yytestcase(yyruleno==589); - case 647: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==647); - case 651: /* boolean_primary ::= predicate */ yytestcase(yyruleno==651); - case 653: /* common_expression ::= expr_or_subquery */ yytestcase(yyruleno==653); - case 654: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==654); - case 657: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==657); - case 659: /* table_reference ::= table_primary */ yytestcase(yyruleno==659); - case 660: /* table_reference ::= joined_table */ yytestcase(yyruleno==660); - case 664: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==664); - case 751: /* query_simple ::= query_specification */ yytestcase(yyruleno==751); - case 752: /* query_simple ::= union_query_expression */ yytestcase(yyruleno==752); - case 755: /* query_simple_or_subquery ::= query_simple */ yytestcase(yyruleno==755); - case 757: /* query_or_subquery ::= query_expression */ yytestcase(yyruleno==757); -{ yylhsminor.yy974 = yymsp[0].minor.yy974; } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 504: /* literal ::= NULL */ -{ yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 505: /* literal ::= NK_QUESTION */ -{ yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 506: /* duration_literal ::= NK_VARIABLE */ - case 726: /* interval_sliding_duration_literal ::= NK_VARIABLE */ yytestcase(yyruleno==726); - case 727: /* interval_sliding_duration_literal ::= NK_STRING */ yytestcase(yyruleno==727); - case 728: /* interval_sliding_duration_literal ::= NK_INTEGER */ yytestcase(yyruleno==728); -{ yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 507: /* signed ::= NK_INTEGER */ -{ yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 508: /* signed ::= NK_PLUS NK_INTEGER */ -{ yymsp[-1].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } - break; - case 509: /* signed ::= NK_MINUS NK_INTEGER */ -{ - SToken t = yymsp[-1].minor.yy0; - t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t); - } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 510: /* signed ::= NK_FLOAT */ -{ yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 511: /* signed ::= NK_PLUS NK_FLOAT */ -{ yymsp[-1].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } - break; - case 512: /* signed ::= NK_MINUS NK_FLOAT */ -{ - SToken t = yymsp[-1].minor.yy0; - t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t); - } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 514: /* signed_literal ::= NK_STRING */ -{ yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 515: /* signed_literal ::= NK_BOOL */ -{ yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 516: /* signed_literal ::= TIMESTAMP NK_STRING */ -{ yymsp[-1].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } - break; - case 517: /* signed_literal ::= duration_literal */ - case 519: /* signed_literal ::= literal_func */ yytestcase(yyruleno==519); - case 618: /* star_func_para ::= expr_or_subquery */ yytestcase(yyruleno==618); - case 701: /* select_item ::= common_expression */ yytestcase(yyruleno==701); - case 711: /* partition_item ::= expr_or_subquery */ yytestcase(yyruleno==711); - case 756: /* query_simple_or_subquery ::= subquery */ yytestcase(yyruleno==756); - case 758: /* query_or_subquery ::= subquery */ yytestcase(yyruleno==758); - case 771: /* search_condition ::= common_expression */ yytestcase(yyruleno==771); -{ yylhsminor.yy974 = releaseRawExprNode(pCxt, yymsp[0].minor.yy974); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 518: /* signed_literal ::= NULL */ -{ yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 520: /* signed_literal ::= NK_QUESTION */ -{ yylhsminor.yy974 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 539: /* expression ::= pseudo_column */ -{ yylhsminor.yy974 = yymsp[0].minor.yy974; (void)setRawExprNodeIsPseudoColumn(pCxt, yylhsminor.yy974, true); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 543: /* expression ::= NK_LP expression NK_RP */ - case 652: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==652); - case 770: /* subquery ::= NK_LP subquery NK_RP */ yytestcase(yyruleno==770); -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 544: /* expression ::= NK_PLUS expr_or_subquery */ -{ - SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); - } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 545: /* expression ::= NK_MINUS expr_or_subquery */ -{ - SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy974), NULL)); - } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 546: /* expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 547: /* expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 548: /* expression ::= expr_or_subquery NK_STAR expr_or_subquery */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 549: /* expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 550: /* expression ::= expr_or_subquery NK_REM expr_or_subquery */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 551: /* expression ::= column_reference NK_ARROW NK_STRING */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 552: /* expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 553: /* expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 556: /* column_reference ::= column_name */ -{ yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy557, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy557)); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 557: /* column_reference ::= table_name NK_DOT column_name */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy557, createColumnNode(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy557)); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 558: /* column_reference ::= NK_ALIAS */ -{ yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 559: /* column_reference ::= table_name NK_DOT NK_ALIAS */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy0, createColumnNode(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy0)); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 560: /* pseudo_column ::= ROWTS */ - case 561: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==561); - case 563: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==563); - case 564: /* pseudo_column ::= QEND */ yytestcase(yyruleno==564); - case 565: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==565); - case 566: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==566); - case 567: /* pseudo_column ::= WEND */ yytestcase(yyruleno==567); - case 568: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==568); - case 569: /* pseudo_column ::= IROWTS */ yytestcase(yyruleno==569); - case 570: /* pseudo_column ::= ISFILLED */ yytestcase(yyruleno==570); - case 571: /* pseudo_column ::= QTAGS */ yytestcase(yyruleno==571); - case 572: /* pseudo_column ::= FLOW */ yytestcase(yyruleno==572); - case 573: /* pseudo_column ::= FHIGH */ yytestcase(yyruleno==573); - case 574: /* pseudo_column ::= FROWTS */ yytestcase(yyruleno==574); - case 591: /* literal_func ::= NOW */ yytestcase(yyruleno==591); - case 592: /* literal_func ::= TODAY */ yytestcase(yyruleno==592); -{ yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 562: /* pseudo_column ::= table_name NK_DOT TBNAME */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy557)))); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 575: /* function_expression ::= function_name NK_LP expression_list NK_RP */ - case 576: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==576); - case 584: /* function_expression ::= substr_func NK_LP expression_list NK_RP */ yytestcase(yyruleno==584); -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy557, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy557, yymsp[-1].minor.yy946)); } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 577: /* function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ - case 578: /* function_expression ::= CAST NK_LP expr_or_subquery AS type_name_default_len NK_RP */ yytestcase(yyruleno==578); -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), yymsp[-1].minor.yy424)); } - yymsp[-5].minor.yy974 = yylhsminor.yy974; - break; - case 579: /* function_expression ::= POSITION NK_LP expr_or_subquery IN expr_or_subquery NK_RP */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createPositionFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974))); } - yymsp[-5].minor.yy974 = yylhsminor.yy974; - break; - case 580: /* function_expression ::= TRIM NK_LP expr_or_subquery NK_RP */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, createTrimFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), TRIM_TYPE_BOTH)); } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 581: /* function_expression ::= TRIM NK_LP trim_specification_type FROM expr_or_subquery NK_RP */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createTrimFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), yymsp[-3].minor.yy300)); } - yymsp[-5].minor.yy974 = yylhsminor.yy974; - break; - case 582: /* function_expression ::= TRIM NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createTrimFunctionNodeExt(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), TRIM_TYPE_BOTH)); } - yymsp[-5].minor.yy974 = yylhsminor.yy974; - break; - case 583: /* function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-6].minor.yy0, &yymsp[0].minor.yy0, createTrimFunctionNodeExt(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), yymsp[-4].minor.yy300)); } - yymsp[-6].minor.yy974 = yylhsminor.yy974; - break; - case 585: /* function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy557, &yymsp[0].minor.yy0, createSubstrFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974))); } - yymsp[-5].minor.yy974 = yylhsminor.yy974; - break; - case 586: /* function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-7].minor.yy557, &yymsp[0].minor.yy0, createSubstrFunctionNodeExt(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy974), releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974))); } - yymsp[-7].minor.yy974 = yylhsminor.yy974; - break; - case 587: /* function_expression ::= REPLACE NK_LP expression_list NK_RP */ - case 594: /* rand_func ::= RAND NK_LP expression_list NK_RP */ yytestcase(yyruleno==594); -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy946)); } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 590: /* literal_func ::= noarg_func NK_LP NK_RP */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy557, NULL)); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 593: /* rand_func ::= RAND NK_LP NK_RP */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy0, NULL)); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 597: /* trim_specification_type ::= BOTH */ -{ yymsp[0].minor.yy300 = TRIM_TYPE_BOTH; } - break; - case 598: /* trim_specification_type ::= TRAILING */ -{ yymsp[0].minor.yy300 = TRIM_TYPE_TRAILING; } - break; - case 599: /* trim_specification_type ::= LEADING */ -{ yymsp[0].minor.yy300 = TRIM_TYPE_LEADING; } - break; - case 614: /* star_func_para_list ::= NK_STAR */ -{ yylhsminor.yy946 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy946 = yylhsminor.yy946; - break; - case 619: /* star_func_para ::= table_name NK_DOT NK_STAR */ - case 704: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==704); -{ yylhsminor.yy974 = createColumnNode(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 620: /* case_when_expression ::= CASE when_then_list case_when_else_opt END */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, createCaseWhenNode(pCxt, NULL, yymsp[-2].minor.yy946, yymsp[-1].minor.yy974)); } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 621: /* case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-4].minor.yy0, &yymsp[0].minor.yy0, createCaseWhenNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), yymsp[-2].minor.yy946, yymsp[-1].minor.yy974)); } - yymsp[-4].minor.yy974 = yylhsminor.yy974; - break; - case 624: /* when_then_expr ::= WHEN common_expression THEN common_expression */ -{ yymsp[-3].minor.yy974 = createWhenThenNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } - break; - case 626: /* case_when_else_opt ::= ELSE common_expression */ -{ yymsp[-1].minor.yy974 = releaseRawExprNode(pCxt, yymsp[0].minor.yy974); } - break; - case 627: /* predicate ::= expr_or_subquery compare_op expr_or_subquery */ - case 632: /* predicate ::= expr_or_subquery in_op in_predicate_value */ yytestcase(yyruleno==632); -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy140, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 628: /* predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy974); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy974), releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); - } - yymsp[-4].minor.yy974 = yylhsminor.yy974; - break; - case 629: /* predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy974); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy974), releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); - } - yymsp[-5].minor.yy974 = yylhsminor.yy974; - break; - case 630: /* predicate ::= expr_or_subquery IS NULL */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), NULL)); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 631: /* predicate ::= expr_or_subquery IS NOT NULL */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), NULL)); - } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 633: /* compare_op ::= NK_LT */ -{ yymsp[0].minor.yy140 = OP_TYPE_LOWER_THAN; } - break; - case 634: /* compare_op ::= NK_GT */ -{ yymsp[0].minor.yy140 = OP_TYPE_GREATER_THAN; } - break; - case 635: /* compare_op ::= NK_LE */ -{ yymsp[0].minor.yy140 = OP_TYPE_LOWER_EQUAL; } - break; - case 636: /* compare_op ::= NK_GE */ -{ yymsp[0].minor.yy140 = OP_TYPE_GREATER_EQUAL; } - break; - case 637: /* compare_op ::= NK_NE */ -{ yymsp[0].minor.yy140 = OP_TYPE_NOT_EQUAL; } - break; - case 638: /* compare_op ::= NK_EQ */ -{ yymsp[0].minor.yy140 = OP_TYPE_EQUAL; } - break; - case 639: /* compare_op ::= LIKE */ -{ yymsp[0].minor.yy140 = OP_TYPE_LIKE; } - break; - case 640: /* compare_op ::= NOT LIKE */ -{ yymsp[-1].minor.yy140 = OP_TYPE_NOT_LIKE; } - break; - case 641: /* compare_op ::= MATCH */ -{ yymsp[0].minor.yy140 = OP_TYPE_MATCH; } - break; - case 642: /* compare_op ::= NMATCH */ -{ yymsp[0].minor.yy140 = OP_TYPE_NMATCH; } - break; - case 643: /* compare_op ::= CONTAINS */ -{ yymsp[0].minor.yy140 = OP_TYPE_JSON_CONTAINS; } - break; - case 644: /* in_op ::= IN */ -{ yymsp[0].minor.yy140 = OP_TYPE_IN; } - break; - case 645: /* in_op ::= NOT IN */ -{ yymsp[-1].minor.yy140 = OP_TYPE_NOT_IN; } - break; - case 646: /* in_predicate_value ::= NK_LP literal_list NK_RP */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy946)); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 648: /* boolean_value_expression ::= NOT boolean_primary */ -{ - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy974), NULL)); - } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 649: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 650: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ -{ - SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); - SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); - yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); - } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 658: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ -{ yylhsminor.yy974 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, JOIN_STYPE_NONE, yymsp[-2].minor.yy974, yymsp[0].minor.yy974, NULL); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 661: /* table_primary ::= table_name alias_opt */ -{ yylhsminor.yy974 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy557, &yymsp[0].minor.yy557); } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 662: /* table_primary ::= db_name NK_DOT table_name alias_opt */ -{ yylhsminor.yy974 = createRealTableNode(pCxt, &yymsp[-3].minor.yy557, &yymsp[-1].minor.yy557, &yymsp[0].minor.yy557); } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 663: /* table_primary ::= subquery alias_opt */ -{ yylhsminor.yy974 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), &yymsp[0].minor.yy557); } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 665: /* alias_opt ::= */ -{ yymsp[1].minor.yy557 = nil_token; } - break; - case 667: /* alias_opt ::= AS table_alias */ -{ yymsp[-1].minor.yy557 = yymsp[0].minor.yy557; } - break; - case 668: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - case 669: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==669); -{ yymsp[-2].minor.yy974 = yymsp[-1].minor.yy974; } - break; - case 670: /* joined_table ::= table_reference join_type join_subtype JOIN table_reference join_on_clause_opt window_offset_clause_opt jlimit_clause_opt */ -{ - yylhsminor.yy974 = createJoinTableNode(pCxt, yymsp[-6].minor.yy792, yymsp[-5].minor.yy744, yymsp[-7].minor.yy974, yymsp[-3].minor.yy974, yymsp[-2].minor.yy974); - yylhsminor.yy974 = addWindowOffsetClause(pCxt, yylhsminor.yy974, yymsp[-1].minor.yy974); - yylhsminor.yy974 = addJLimitClause(pCxt, yylhsminor.yy974, yymsp[0].minor.yy974); - } - yymsp[-7].minor.yy974 = yylhsminor.yy974; - break; - case 671: /* join_type ::= */ -{ yymsp[1].minor.yy792 = JOIN_TYPE_INNER; } - break; - case 672: /* join_type ::= INNER */ -{ yymsp[0].minor.yy792 = JOIN_TYPE_INNER; } - break; - case 673: /* join_type ::= LEFT */ -{ yymsp[0].minor.yy792 = JOIN_TYPE_LEFT; } - break; - case 674: /* join_type ::= RIGHT */ -{ yymsp[0].minor.yy792 = JOIN_TYPE_RIGHT; } - break; - case 675: /* join_type ::= FULL */ -{ yymsp[0].minor.yy792 = JOIN_TYPE_FULL; } - break; - case 676: /* join_subtype ::= */ -{ yymsp[1].minor.yy744 = JOIN_STYPE_NONE; } - break; - case 677: /* join_subtype ::= OUTER */ -{ yymsp[0].minor.yy744 = JOIN_STYPE_OUTER; } - break; - case 678: /* join_subtype ::= SEMI */ -{ yymsp[0].minor.yy744 = JOIN_STYPE_SEMI; } - break; - case 679: /* join_subtype ::= ANTI */ -{ yymsp[0].minor.yy744 = JOIN_STYPE_ANTI; } - break; - case 680: /* join_subtype ::= ASOF */ -{ yymsp[0].minor.yy744 = JOIN_STYPE_ASOF; } - break; - case 681: /* join_subtype ::= WINDOW */ -{ yymsp[0].minor.yy744 = JOIN_STYPE_WIN; } - break; - case 685: /* window_offset_clause_opt ::= WINDOW_OFFSET NK_LP window_offset_literal NK_COMMA window_offset_literal NK_RP */ -{ yymsp[-5].minor.yy974 = createWindowOffsetNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } - break; - case 686: /* window_offset_literal ::= NK_VARIABLE */ -{ yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createTimeOffsetValueNode(pCxt, &yymsp[0].minor.yy0)); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 687: /* window_offset_literal ::= NK_MINUS NK_VARIABLE */ -{ - SToken t = yymsp[-1].minor.yy0; - t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; - yylhsminor.yy974 = createRawExprNode(pCxt, &t, createTimeOffsetValueNode(pCxt, &t)); - } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 689: /* jlimit_clause_opt ::= JLIMIT NK_INTEGER */ - case 762: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ yytestcase(yyruleno==762); - case 766: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==766); -{ yymsp[-1].minor.yy974 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } - break; - case 690: /* query_specification ::= SELECT hint_list set_quantifier_opt tag_mode_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ -{ - yymsp[-13].minor.yy974 = createSelectStmt(pCxt, yymsp[-11].minor.yy569, yymsp[-9].minor.yy946, yymsp[-8].minor.yy974, yymsp[-12].minor.yy946); - yymsp[-13].minor.yy974 = setSelectStmtTagMode(pCxt, yymsp[-13].minor.yy974, yymsp[-10].minor.yy569); - yymsp[-13].minor.yy974 = addWhereClause(pCxt, yymsp[-13].minor.yy974, yymsp[-7].minor.yy974); - yymsp[-13].minor.yy974 = addPartitionByClause(pCxt, yymsp[-13].minor.yy974, yymsp[-6].minor.yy946); - yymsp[-13].minor.yy974 = addWindowClauseClause(pCxt, yymsp[-13].minor.yy974, yymsp[-2].minor.yy974); - yymsp[-13].minor.yy974 = addGroupByClause(pCxt, yymsp[-13].minor.yy974, yymsp[-1].minor.yy946); - yymsp[-13].minor.yy974 = addHavingClause(pCxt, yymsp[-13].minor.yy974, yymsp[0].minor.yy974); - yymsp[-13].minor.yy974 = addRangeClause(pCxt, yymsp[-13].minor.yy974, yymsp[-5].minor.yy974); - yymsp[-13].minor.yy974 = addEveryClause(pCxt, yymsp[-13].minor.yy974, yymsp[-4].minor.yy974); - yymsp[-13].minor.yy974 = addFillClause(pCxt, yymsp[-13].minor.yy974, yymsp[-3].minor.yy974); - } - break; - case 691: /* hint_list ::= */ -{ yymsp[1].minor.yy946 = createHintNodeList(pCxt, NULL); } - break; - case 692: /* hint_list ::= NK_HINT */ -{ yylhsminor.yy946 = createHintNodeList(pCxt, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy946 = yylhsminor.yy946; - break; - case 697: /* set_quantifier_opt ::= ALL */ -{ yymsp[0].minor.yy569 = false; } - break; - case 700: /* select_item ::= NK_STAR */ -{ yylhsminor.yy974 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy974 = yylhsminor.yy974; - break; - case 702: /* select_item ::= common_expression column_alias */ - case 712: /* partition_item ::= expr_or_subquery column_alias */ yytestcase(yyruleno==712); -{ yylhsminor.yy974 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), &yymsp[0].minor.yy557); } - yymsp[-1].minor.yy974 = yylhsminor.yy974; - break; - case 703: /* select_item ::= common_expression AS column_alias */ - case 713: /* partition_item ::= expr_or_subquery AS column_alias */ yytestcase(yyruleno==713); -{ yylhsminor.yy974 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), &yymsp[0].minor.yy557); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 708: /* partition_by_clause_opt ::= PARTITION BY partition_list */ - case 740: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==740); - case 760: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==760); -{ yymsp[-2].minor.yy946 = yymsp[0].minor.yy946; } - break; - case 715: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA interval_sliding_duration_literal NK_RP */ -{ yymsp[-5].minor.yy974 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } - break; - case 716: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ -{ yymsp[-3].minor.yy974 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } - break; - case 717: /* twindow_clause_opt ::= INTERVAL NK_LP interval_sliding_duration_literal NK_RP sliding_opt fill_opt */ -{ yymsp[-5].minor.yy974 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), NULL, yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } - break; - case 718: /* twindow_clause_opt ::= INTERVAL NK_LP interval_sliding_duration_literal NK_COMMA interval_sliding_duration_literal NK_RP sliding_opt fill_opt */ -{ yymsp[-7].minor.yy974 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy974), releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } - break; - case 719: /* twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition */ -{ yymsp[-6].minor.yy974 = createEventWindowNode(pCxt, yymsp[-3].minor.yy974, yymsp[0].minor.yy974); } - break; - case 720: /* twindow_clause_opt ::= COUNT_WINDOW NK_LP NK_INTEGER NK_RP */ -{ yymsp[-3].minor.yy974 = createCountWindowNode(pCxt, &yymsp[-1].minor.yy0, &yymsp[-1].minor.yy0); } - break; - case 721: /* twindow_clause_opt ::= COUNT_WINDOW NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ -{ yymsp[-5].minor.yy974 = createCountWindowNode(pCxt, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0); } - break; - case 722: /* twindow_clause_opt ::= ANOMALY_WINDOW NK_LP expr_or_subquery NK_RP */ -{ yymsp[-3].minor.yy974 = createAnomalyWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), NULL); } - break; - case 723: /* twindow_clause_opt ::= ANOMALY_WINDOW NK_LP expr_or_subquery NK_COMMA NK_STRING NK_RP */ -{ yymsp[-5].minor.yy974 = createAnomalyWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), &yymsp[-1].minor.yy0); } - break; - case 730: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ -{ yymsp[-3].minor.yy974 = createFillNode(pCxt, yymsp[-1].minor.yy102, NULL); } - break; - case 731: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA expression_list NK_RP */ -{ yymsp[-5].minor.yy974 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy946)); } - break; - case 732: /* fill_opt ::= FILL NK_LP VALUE_F NK_COMMA expression_list NK_RP */ -{ yymsp[-5].minor.yy974 = createFillNode(pCxt, FILL_MODE_VALUE_F, createNodeListNode(pCxt, yymsp[-1].minor.yy946)); } - break; - case 733: /* fill_mode ::= NONE */ -{ yymsp[0].minor.yy102 = FILL_MODE_NONE; } - break; - case 734: /* fill_mode ::= PREV */ -{ yymsp[0].minor.yy102 = FILL_MODE_PREV; } - break; - case 735: /* fill_mode ::= NULL */ -{ yymsp[0].minor.yy102 = FILL_MODE_NULL; } - break; - case 736: /* fill_mode ::= NULL_F */ -{ yymsp[0].minor.yy102 = FILL_MODE_NULL_F; } - break; - case 737: /* fill_mode ::= LINEAR */ -{ yymsp[0].minor.yy102 = FILL_MODE_LINEAR; } - break; - case 738: /* fill_mode ::= NEXT */ -{ yymsp[0].minor.yy102 = FILL_MODE_NEXT; } - break; - case 741: /* group_by_list ::= expr_or_subquery */ -{ yylhsminor.yy946 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } - yymsp[0].minor.yy946 = yylhsminor.yy946; - break; - case 742: /* group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ -{ yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-2].minor.yy946, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } - yymsp[-2].minor.yy946 = yylhsminor.yy946; - break; - case 746: /* range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ -{ yymsp[-5].minor.yy974 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } - break; - case 747: /* range_opt ::= RANGE NK_LP expr_or_subquery NK_RP */ -{ yymsp[-3].minor.yy974 = createInterpTimePoint(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } - break; - case 750: /* query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ -{ - yylhsminor.yy974 = addOrderByClause(pCxt, yymsp[-3].minor.yy974, yymsp[-2].minor.yy946); - yylhsminor.yy974 = addSlimitClause(pCxt, yylhsminor.yy974, yymsp[-1].minor.yy974); - yylhsminor.yy974 = addLimitClause(pCxt, yylhsminor.yy974, yymsp[0].minor.yy974); - } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 753: /* union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ -{ yylhsminor.yy974 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy974, yymsp[0].minor.yy974); } - yymsp[-3].minor.yy974 = yylhsminor.yy974; - break; - case 754: /* union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ -{ yylhsminor.yy974 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy974, yymsp[0].minor.yy974); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 763: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - case 767: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==767); -{ yymsp[-3].minor.yy974 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } - break; - case 764: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - case 768: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==768); -{ yymsp[-3].minor.yy974 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } - break; - case 769: /* subquery ::= NK_LP query_expression NK_RP */ -{ yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy974); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 774: /* sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ -{ yylhsminor.yy974 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), yymsp[-1].minor.yy410, yymsp[0].minor.yy307); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - case 775: /* ordering_specification_opt ::= */ -{ yymsp[1].minor.yy410 = ORDER_ASC; } - break; - case 776: /* ordering_specification_opt ::= ASC */ -{ yymsp[0].minor.yy410 = ORDER_ASC; } - break; - case 777: /* ordering_specification_opt ::= DESC */ -{ yymsp[0].minor.yy410 = ORDER_DESC; } - break; - case 778: /* null_ordering_opt ::= */ -{ yymsp[1].minor.yy307 = NULL_ORDER_DEFAULT; } - break; - case 779: /* null_ordering_opt ::= NULLS FIRST */ -{ yymsp[-1].minor.yy307 = NULL_ORDER_FIRST; } - break; - case 780: /* null_ordering_opt ::= NULLS LAST */ -{ yymsp[-1].minor.yy307 = NULL_ORDER_LAST; } - break; - case 783: /* column_options ::= column_options NK_ID NK_STRING */ -{ yylhsminor.yy974 = setColumnOptions(pCxt, yymsp[-2].minor.yy974, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy974 = yylhsminor.yy974; - break; - default: - break; -/********** End reduce actions ************************************************/ - }; - assert( yyrulenoYY_MAX_SHIFT && yyact<=YY_MAX_SHIFTREDUCE) ); - - /* It is not possible for a REDUCE to be followed by an error */ - assert( yyact!=YY_ERROR_ACTION ); - - yymsp += yysize+1; - yypParser->yytos = yymsp; - yymsp->stateno = (YYACTIONTYPE)yyact; - yymsp->major = (YYCODETYPE)yygoto; - yyTraceShift(yypParser, yyact, "... then shift"); - return yyact; -} - -/* -** The following code executes when the parse fails -*/ -#ifndef YYNOERRORRECOVERY -static void yy_parse_failed( - yyParser *yypParser /* The parser */ -){ - ParseARG_FETCH - ParseCTX_FETCH -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); - } -#endif - while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); - /* Here code is inserted which will be executed whenever the - ** parser fails */ -/************ Begin %parse_failure code ***************************************/ -/************ End %parse_failure code *****************************************/ - ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ - ParseCTX_STORE -} -#endif /* YYNOERRORRECOVERY */ - -/* -** The following code executes when a syntax error first occurs. -*/ -static void yy_syntax_error( - yyParser *yypParser, /* The parser */ - int yymajor, /* The major type of the error token */ - ParseTOKENTYPE yyminor /* The minor type of the error token */ -){ - ParseARG_FETCH - ParseCTX_FETCH -#define TOKEN yyminor -/************ Begin %syntax_error code ****************************************/ - - if (TSDB_CODE_SUCCESS == pCxt->errCode) { - if(TOKEN.z) { - pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, TOKEN.z); - } else { - pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INCOMPLETE_SQL); - } - } else if (TSDB_CODE_PAR_DB_NOT_SPECIFIED == pCxt->errCode && TK_NK_FLOAT == TOKEN.type) { - pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, TOKEN.z); - } -/************ End %syntax_error code ******************************************/ - ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ - ParseCTX_STORE -} - -/* -** The following is executed when the parser accepts -*/ -static void yy_accept( - yyParser *yypParser /* The parser */ -){ - ParseARG_FETCH - ParseCTX_FETCH -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); - } -#endif -#ifndef YYNOERRORRECOVERY - yypParser->yyerrcnt = -1; -#endif - assert( yypParser->yytos==yypParser->yystack ); - /* Here code is inserted which will be executed whenever the - ** parser accepts */ -/*********** Begin %parse_accept code *****************************************/ -/*********** End %parse_accept code *******************************************/ - ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ - ParseCTX_STORE -} - -/* The main parser program. -** The first argument is a pointer to a structure obtained from -** "ParseAlloc" which describes the current state of the parser. -** The second argument is the major token number. The third is -** the minor token. The fourth optional argument is whatever the -** user wants (and specified in the grammar) and is available for -** use by the action routines. -** -** Inputs: -**
    -**
  • A pointer to the parser (an opaque structure.) -**
  • The major token number. -**
  • The minor token number. -**
  • An option argument of a grammar-specified type. -**
-** -** Outputs: -** None. -*/ -void Parse( - void *yyp, /* The parser */ - int yymajor, /* The major token code number */ - ParseTOKENTYPE yyminor /* The value for the token */ - ParseARG_PDECL /* Optional %extra_argument parameter */ -){ - YYMINORTYPE yyminorunion; - YYACTIONTYPE yyact; /* The parser action. */ -#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) - int yyendofinput; /* True if we are at the end of input */ -#endif -#ifdef YYERRORSYMBOL - int yyerrorhit = 0; /* True if yymajor has invoked an error */ -#endif - yyParser *yypParser = (yyParser*)yyp; /* The parser */ - ParseCTX_FETCH - ParseARG_STORE - - assert( yypParser->yytos!=0 ); -#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) - yyendofinput = (yymajor==0); -#endif - - yyact = yypParser->yytos->stateno; -#ifndef NDEBUG - if( yyTraceFILE ){ - if( yyact < YY_MIN_REDUCE ){ - fprintf(yyTraceFILE,"%sInput '%s' in state %d\n", - yyTracePrompt,yyTokenName[yymajor],yyact); - }else{ - fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n", - yyTracePrompt,yyTokenName[yymajor],yyact-YY_MIN_REDUCE); - } - } -#endif - - do{ - assert( yyact==yypParser->yytos->stateno ); - yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact); - if( yyact >= YY_MIN_REDUCE ){ - yyact = yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor, - yyminor ParseCTX_PARAM); - }else if( yyact <= YY_MAX_SHIFTREDUCE ){ - yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor); -#ifndef YYNOERRORRECOVERY - yypParser->yyerrcnt--; -#endif - break; - }else if( yyact==YY_ACCEPT_ACTION ){ - yypParser->yytos--; - yy_accept(yypParser); - return; - }else{ - assert( yyact == YY_ERROR_ACTION ); - yyminorunion.yy0 = yyminor; -#ifdef YYERRORSYMBOL - int yymx; -#endif -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt); - } -#endif -#ifdef YYERRORSYMBOL - /* A syntax error has occurred. - ** The response to an error depends upon whether or not the - ** grammar defines an error token "ERROR". - ** - ** This is what we do if the grammar does define ERROR: - ** - ** * Call the %syntax_error function. - ** - ** * Begin popping the stack until we enter a state where - ** it is legal to shift the error symbol, then shift - ** the error symbol. - ** - ** * Set the error count to three. - ** - ** * Begin accepting and shifting new tokens. No new error - ** processing will occur until three tokens have been - ** shifted successfully. - ** - */ - if( yypParser->yyerrcnt<0 ){ - yy_syntax_error(yypParser,yymajor,yyminor); - } - yymx = yypParser->yytos->major; - if( yymx==YYERRORSYMBOL || yyerrorhit ){ -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sDiscard input token %s\n", - yyTracePrompt,yyTokenName[yymajor]); - } -#endif - yy_destructor(yypParser, (YYCODETYPE)yymajor, &yyminorunion); - yymajor = YYNOCODE; - }else{ - while( yypParser->yytos >= yypParser->yystack - && (yyact = yy_find_reduce_action( - yypParser->yytos->stateno, - YYERRORSYMBOL)) > YY_MAX_SHIFTREDUCE - ){ - yy_pop_parser_stack(yypParser); - } - if( yypParser->yytos < yypParser->yystack || yymajor==0 ){ - yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); - yy_parse_failed(yypParser); -#ifndef YYNOERRORRECOVERY - yypParser->yyerrcnt = -1; -#endif - yymajor = YYNOCODE; - }else if( yymx!=YYERRORSYMBOL ){ - yy_shift(yypParser,yyact,YYERRORSYMBOL,yyminor); - } - } - yypParser->yyerrcnt = 3; - yyerrorhit = 1; - if( yymajor==YYNOCODE ) break; - yyact = yypParser->yytos->stateno; -#elif defined(YYNOERRORRECOVERY) - /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to - ** do any kind of error recovery. Instead, simply invoke the syntax - ** error routine and continue going as if nothing had happened. - ** - ** Applications can set this macro (for example inside %include) if - ** they intend to abandon the parse upon the first syntax error seen. - */ - yy_syntax_error(yypParser,yymajor, yyminor); - yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); - break; -#else /* YYERRORSYMBOL is not defined */ - /* This is what we do if the grammar does not define ERROR: - ** - ** * Report an error message, and throw away the input token. - ** - ** * If the input token is $, then fail the parse. - ** - ** As before, subsequent error messages are suppressed until - ** three input tokens have been successfully shifted. - */ - if( yypParser->yyerrcnt<=0 ){ - yy_syntax_error(yypParser,yymajor, yyminor); - } - yypParser->yyerrcnt = 3; - yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); - if( yyendofinput ){ - yy_parse_failed(yypParser); -#ifndef YYNOERRORRECOVERY - yypParser->yyerrcnt = -1; -#endif - } - break; -#endif - } - }while( yypParser->yytos>yypParser->yystack ); -#ifndef NDEBUG - if( yyTraceFILE ){ - yyStackEntry *i; - char cDiv = '['; - fprintf(yyTraceFILE,"%sReturn. Stack=",yyTracePrompt); - for(i=&yypParser->yystack[1]; i<=yypParser->yytos; i++){ - fprintf(yyTraceFILE,"%c%s", cDiv, yyTokenName[i->major]); - cDiv = ' '; - } - fprintf(yyTraceFILE,"]\n"); - } -#endif - return; -} - -/* -** Return the fallback token corresponding to canonical token iToken, or -** 0 if iToken has no fallback. -*/ -int ParseFallback(int iToken){ -#ifdef YYFALLBACK - assert( iToken<(int)(sizeof(yyFallback)/sizeof(yyFallback[0])) ); - return yyFallback[iToken]; -#else - (void)iToken; - return 0; -#endif -} diff --git a/source/libs/planner/inc/planInt.h b/source/libs/planner/inc/planInt.h index beb277493cb..59e771454c6 100644 --- a/source/libs/planner/inc/planInt.h +++ b/source/libs/planner/inc/planInt.h @@ -24,6 +24,18 @@ extern "C" { #include "tsimplehash.h" #include "taoserror.h" + +typedef struct SPhysiPlanContext { + SPlanContext* pPlanCxt; + int32_t errCode; + int16_t nextDataBlockId; + SArray* pLocationHelper; + SArray* pProjIdxLocHelper; + bool hasScan; + bool hasSysScan; +} SPhysiPlanContext; + + #define planFatal(param, ...) qFatal("PLAN: " param, ##__VA_ARGS__) #define planError(param, ...) qError("PLAN: " param, ##__VA_ARGS__) #define planWarn(param, ...) qWarn("PLAN: " param, ##__VA_ARGS__) diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 40cd415cb94..34c83acee8a 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -923,6 +923,15 @@ static bool isInterpFunc(int32_t funcId) { return fmIsInterpFunc(funcId) || fmIsInterpPseudoColumnFunc(funcId) || fmIsGroupKeyFunc(funcId) || fmisSelectGroupConstValueFunc(funcId); } +static void initStreamOption(SLogicPlanContext* pCxt, SStreamNodeOption* pOption) { + pOption->triggerType = pCxt->pPlanCxt->triggerType; + pOption->watermark = pCxt->pPlanCxt->watermark; + pOption->deleteMark = pCxt->pPlanCxt->deleteMark; + pOption->igExpired = pCxt->pPlanCxt->igExpired; + pOption->igCheckUpdate = pCxt->pPlanCxt->igCheckUpdate; + pOption->destHasPrimaryKey = pCxt->pPlanCxt->destHasPrimaryKey; +} + static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SLogicNode** pLogicNode) { if (!pSelect->hasInterpFunc) { return TSDB_CODE_SUCCESS; @@ -957,6 +966,8 @@ static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* p if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pEvery) { pInterpFunc->interval = ((SValueNode*)pSelect->pEvery)->datum.i; + pInterpFunc->intervalUnit = ((SValueNode*)pSelect->pEvery)->unit; + pInterpFunc->precision = pSelect->precision; } // set the output @@ -964,6 +975,10 @@ static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* p code = createColumnByRewriteExprs(pInterpFunc->pFuncs, &pInterpFunc->node.pTargets); } + if (TSDB_CODE_SUCCESS == code) { + initStreamOption(pCxt, &pInterpFunc->streamNodeOption); + } + if (TSDB_CODE_SUCCESS == code) { *pLogicNode = (SLogicNode*)pInterpFunc; } else { @@ -1279,71 +1294,139 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele return TSDB_CODE_FAILED; } +typedef struct SCollectFillExprsCtx { + SHashObj* pPseudoCols; + SNodeList* pFillExprs; + SNodeList* pNotFillExprs; + bool collectAggFuncs; + SNodeList* pAggFuncCols; +} SCollectFillExprsCtx; + +typedef struct SWalkFillSubExprCtx { + bool hasFillCol; + bool hasPseudoWinCol; + bool hasGroupKeyCol; + SCollectFillExprsCtx* pCollectFillCtx; + int32_t code; +} SWalkFillSubExprCtx; + +static bool nodeAlreadyContained(SNodeList* pList, SNode* pNode) { + SNode* pExpr = NULL; + FOREACH(pExpr, pList) { + if (nodesEqualNode(pExpr, pNode)) { + return true; + } + } + return false; +} + static EDealRes needFillValueImpl(SNode* pNode, void* pContext) { + SWalkFillSubExprCtx *pCtx = pContext; + EDealRes res = DEAL_RES_CONTINUE; if (QUERY_NODE_COLUMN == nodeType(pNode)) { SColumnNode* pCol = (SColumnNode*)pNode; - if (COLUMN_TYPE_WINDOW_START != pCol->colType && COLUMN_TYPE_WINDOW_END != pCol->colType && - COLUMN_TYPE_WINDOW_DURATION != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) { - *(bool*)pContext = true; - return DEAL_RES_END; + if (COLUMN_TYPE_WINDOW_START == pCol->colType || COLUMN_TYPE_WINDOW_END == pCol->colType || + COLUMN_TYPE_WINDOW_DURATION == pCol->colType) { + pCtx->hasPseudoWinCol = true; + pCtx->code = + taosHashPut(pCtx->pCollectFillCtx->pPseudoCols, pCol->colName, TSDB_COL_NAME_LEN, &pNode, POINTER_BYTES); + } else if (COLUMN_TYPE_GROUP_KEY == pCol->colType || COLUMN_TYPE_TBNAME == pCol->colType || + COLUMN_TYPE_TAG == pCol->colType) { + pCtx->hasGroupKeyCol = true; + pCtx->code = + taosHashPut(pCtx->pCollectFillCtx->pPseudoCols, pCol->colName, TSDB_COL_NAME_LEN, &pNode, POINTER_BYTES); + } else { + pCtx->hasFillCol = true; + if (pCtx->pCollectFillCtx->collectAggFuncs) { + // Agg funcs has already been rewriten to columns by Interval + // Here, we return DEAL_RES_CONTINUE cause we need to collect all agg funcs + if (!nodeAlreadyContained(pCtx->pCollectFillCtx->pFillExprs, pNode) && + !nodeAlreadyContained(pCtx->pCollectFillCtx->pAggFuncCols, pNode)) + pCtx->code = nodesListMakeStrictAppend(&pCtx->pCollectFillCtx->pAggFuncCols, pNode); + } else { + res = DEAL_RES_END; + } } } - return DEAL_RES_CONTINUE; + if (pCtx->code != TSDB_CODE_SUCCESS) res = DEAL_RES_ERROR; + return res; } -static bool needFillValue(SNode* pNode) { - bool hasFillCol = false; - nodesWalkExpr(pNode, needFillValueImpl, &hasFillCol); - return hasFillCol; +static void needFillValue(SNode* pNode, SWalkFillSubExprCtx* pCtx) { + nodesWalkExpr(pNode, needFillValueImpl, pCtx); } -static int32_t partFillExprs(SSelectStmt* pSelect, SNodeList** pFillExprs, SNodeList** pNotFillExprs) { - int32_t code = TSDB_CODE_SUCCESS; - SNode* pProject = NULL; - FOREACH(pProject, pSelect->pProjectionList) { - if (needFillValue(pProject)) { - SNode* pNew = NULL; - code = nodesCloneNode(pProject, &pNew); - if (TSDB_CODE_SUCCESS == code) { - code = nodesListMakeStrictAppend(pFillExprs, pNew); - } - } else if (QUERY_NODE_VALUE != nodeType(pProject)) { - SNode* pNew = NULL; - code = nodesCloneNode(pProject, &pNew); - if (TSDB_CODE_SUCCESS == code) { - code = nodesListMakeStrictAppend(pNotFillExprs, pNew); - } +static int32_t collectFillExpr(SNode* pNode, SCollectFillExprsCtx* pCollectFillCtx) { + SNode* pNew = NULL; + SWalkFillSubExprCtx collectFillSubExprCtx = { + .hasFillCol = false, .hasPseudoWinCol = false, .hasGroupKeyCol = false, .pCollectFillCtx = pCollectFillCtx}; + needFillValue(pNode, &collectFillSubExprCtx); + if (collectFillSubExprCtx.code != TSDB_CODE_SUCCESS) { + return collectFillSubExprCtx.code; + } + + if (collectFillSubExprCtx.hasFillCol && !pCollectFillCtx->collectAggFuncs) { + if (nodeType(pNode) == QUERY_NODE_ORDER_BY_EXPR) { + collectFillSubExprCtx.code = nodesCloneNode(((SOrderByExprNode*)pNode)->pExpr, &pNew); + } else { + collectFillSubExprCtx.code = nodesCloneNode(pNode, &pNew); } - if (TSDB_CODE_SUCCESS != code) { - NODES_DESTORY_LIST(*pFillExprs); - NODES_DESTORY_LIST(*pNotFillExprs); - break; + if (collectFillSubExprCtx.code == TSDB_CODE_SUCCESS) { + collectFillSubExprCtx.code = nodesListMakeStrictAppend(&pCollectFillCtx->pFillExprs, pNew); } } - if (!pSelect->isDistinct) { - SNode* pOrderExpr = NULL; - FOREACH(pOrderExpr, pSelect->pOrderByList) { - SNode* pExpr = ((SOrderByExprNode*)pOrderExpr)->pExpr; - if (needFillValue(pExpr)) { - SNode* pNew = NULL; - code = nodesCloneNode(pExpr, &pNew); - if (TSDB_CODE_SUCCESS == code) { - code = nodesListMakeStrictAppend(pFillExprs, pNew); - } - } else if (QUERY_NODE_VALUE != nodeType(pExpr)) { - SNode* pNew = NULL; - code = nodesCloneNode(pExpr, &pNew); - if (TSDB_CODE_SUCCESS == code) { - code = nodesListMakeStrictAppend(pNotFillExprs, pNew); - } + return collectFillSubExprCtx.code; +} + +static int32_t collectFillExprs(SSelectStmt* pSelect, SNodeList** pFillExprs, SNodeList** pNotFillExprs, + SNodeList** pPossibleFillNullCols) { + int32_t code = TSDB_CODE_SUCCESS; + SCollectFillExprsCtx collectFillCtx = {0}; + SNode* pNode = NULL; + collectFillCtx.pPseudoCols = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + if (!collectFillCtx.pPseudoCols) return terrno; + + FOREACH(pNode, pSelect->pProjectionList) { + code = collectFillExpr(pNode, &collectFillCtx); + if (code != TSDB_CODE_SUCCESS) break; + } + collectFillCtx.collectAggFuncs = true; + if (code == TSDB_CODE_SUCCESS) { + code = collectFillExpr(pSelect->pHaving, &collectFillCtx); + } + if (code == TSDB_CODE_SUCCESS) { + FOREACH(pNode, pSelect->pOrderByList) { + code = collectFillExpr(pNode, &collectFillCtx); + if (code != TSDB_CODE_SUCCESS) break; + } + } + if (code == TSDB_CODE_SUCCESS) { + void* pIter = taosHashIterate(collectFillCtx.pPseudoCols, 0); + while (pIter) { + SNode* pNode = *(SNode**)pIter, *pNew = NULL; + code = nodesCloneNode(pNode, &pNew); + if (code == TSDB_CODE_SUCCESS) { + code = nodesListMakeStrictAppend(&collectFillCtx.pNotFillExprs, pNew); } - if (TSDB_CODE_SUCCESS != code) { - NODES_DESTORY_LIST(*pFillExprs); - NODES_DESTORY_LIST(*pNotFillExprs); + if (code == TSDB_CODE_SUCCESS) { + pIter = taosHashIterate(collectFillCtx.pPseudoCols, pIter); + } else { + taosHashCancelIterate(collectFillCtx.pPseudoCols, pIter); break; } } + if (code == TSDB_CODE_SUCCESS) { + TSWAP(*pFillExprs, collectFillCtx.pFillExprs); + TSWAP(*pNotFillExprs, collectFillCtx.pNotFillExprs); + TSWAP(*pPossibleFillNullCols, collectFillCtx.pAggFuncCols); + } } + if (code != TSDB_CODE_SUCCESS) { + if (collectFillCtx.pFillExprs) nodesDestroyList(collectFillCtx.pFillExprs); + if (collectFillCtx.pNotFillExprs) nodesDestroyList(collectFillCtx.pNotFillExprs); + if (collectFillCtx.pAggFuncCols) nodesDestroyList(collectFillCtx.pAggFuncCols); + } + taosHashCleanup(collectFillCtx.pPseudoCols); return code; } @@ -1369,13 +1452,16 @@ static int32_t createFillLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect pFill->node.resultDataOrder = pFill->node.requireDataOrder; pFill->node.inputTsOrder = TSDB_ORDER_ASC; - code = partFillExprs(pSelect, &pFill->pFillExprs, &pFill->pNotFillExprs); + code = collectFillExprs(pSelect, &pFill->pFillExprs, &pFill->pNotFillExprs, &pFill->pFillNullExprs); if (TSDB_CODE_SUCCESS == code) { code = rewriteExprsForSelect(pFill->pFillExprs, pSelect, SQL_CLAUSE_FILL, NULL); } if (TSDB_CODE_SUCCESS == code) { code = rewriteExprsForSelect(pFill->pNotFillExprs, pSelect, SQL_CLAUSE_FILL, NULL); } + if (TSDB_CODE_SUCCESS == code && LIST_LENGTH(pFill->pFillNullExprs) > 0) { + code = createColumnByRewriteExprs(pFill->pFillNullExprs, &pFill->node.pTargets); + } if (TSDB_CODE_SUCCESS == code) { code = createColumnByRewriteExprs(pFill->pFillExprs, &pFill->node.pTargets); } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 738ccf3224b..347aeba95e3 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -30,15 +30,6 @@ typedef struct SSlotIndex { SArray* pSlotIdsInfo; // duplicate name slot } SSlotIndex; -typedef struct SPhysiPlanContext { - SPlanContext* pPlanCxt; - int32_t errCode; - int16_t nextDataBlockId; - SArray* pLocationHelper; - SArray* pProjIdxLocHelper; - bool hasScan; - bool hasSysScan; -} SPhysiPlanContext; static int32_t getSlotKey(SNode* pNode, const char* pStmtName, char** ppKey, int32_t *pLen, uint16_t extraBufLen) { int32_t code = 0; @@ -341,7 +332,7 @@ static int32_t addDataBlockSlotsImpl(SPhysiPlanContext* pCxt, SNodeList* pList, } static int32_t addDataBlockSlots(SPhysiPlanContext* pCxt, SNodeList* pList, SDataBlockDescNode* pDataBlockDesc) { - return addDataBlockSlotsImpl(pCxt, pList, pDataBlockDesc, NULL, false, false); + return addDataBlockSlotsImpl(pCxt, pList, pDataBlockDesc, NULL, false, true); } static int32_t addDataBlockSlot(SPhysiPlanContext* pCxt, SNode** pNode, SDataBlockDescNode* pDataBlockDesc) { @@ -363,7 +354,7 @@ static int32_t addDataBlockSlot(SPhysiPlanContext* pCxt, SNode** pNode, SDataBlo static int32_t addDataBlockSlotsForProject(SPhysiPlanContext* pCxt, const char* pStmtName, SNodeList* pList, SDataBlockDescNode* pDataBlockDesc) { - return addDataBlockSlotsImpl(pCxt, pList, pDataBlockDesc, pStmtName, false, false); + return addDataBlockSlotsImpl(pCxt, pList, pDataBlockDesc, pStmtName, false, true); } static int32_t pushdownDataBlockSlots(SPhysiPlanContext* pCxt, SNodeList* pList, SDataBlockDescNode* pDataBlockDesc) { @@ -1933,8 +1924,9 @@ static int32_t createIndefRowsFuncPhysiNode(SPhysiPlanContext* pCxt, SNodeList* static int32_t createInterpFuncPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SInterpFuncLogicNode* pFuncLogicNode, SPhysiNode** pPhyNode) { - SInterpFuncPhysiNode* pInterpFunc = - (SInterpFuncPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pFuncLogicNode, QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC); + SInterpFuncPhysiNode* pInterpFunc = (SInterpFuncPhysiNode*)makePhysiNode( + pCxt, (SLogicNode*)pFuncLogicNode, + pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC : QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC); if (NULL == pInterpFunc) { return terrno; } @@ -1963,6 +1955,8 @@ static int32_t createInterpFuncPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pCh pInterpFunc->timeRange = pFuncLogicNode->timeRange; pInterpFunc->interval = pFuncLogicNode->interval; pInterpFunc->fillMode = pFuncLogicNode->fillMode; + pInterpFunc->intervalUnit = pFuncLogicNode->intervalUnit; + pInterpFunc->precision = pFuncLogicNode->node.precision; pInterpFunc->pFillValues = NULL; code = nodesCloneNode(pFuncLogicNode->pFillValues, &pInterpFunc->pFillValues); if (TSDB_CODE_SUCCESS != code) { @@ -1978,6 +1972,10 @@ static int32_t createInterpFuncPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pCh code = setConditionsSlotId(pCxt, (const SLogicNode*)pFuncLogicNode, (SPhysiNode*)pInterpFunc); } + if (pCxt->pPlanCxt->streamQuery) { + pInterpFunc->streamNodeOption = pFuncLogicNode->streamNodeOption; + } + if (TSDB_CODE_SUCCESS == code) { *pPhyNode = (SPhysiNode*)pInterpFunc; } else { @@ -2158,7 +2156,7 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList* pWindow->deleteMark = pWindowLogicNode->deleteMark; pWindow->igExpired = pWindowLogicNode->igExpired; if (pCxt->pPlanCxt->streamQuery) { - pWindow->destHasPrimayKey = pCxt->pPlanCxt->destHasPrimaryKey; + pWindow->destHasPrimaryKey = pCxt->pPlanCxt->destHasPrimaryKey; } pWindow->mergeDataBlock = (GROUP_ACTION_KEEP == pWindowLogicNode->node.groupAction ? false : true); pWindow->node.inputTsOrder = pWindowLogicNode->node.inputTsOrder; @@ -2605,6 +2603,12 @@ static int32_t createFillPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren if (TSDB_CODE_SUCCESS == code) { code = addDataBlockSlots(pCxt, pFill->pNotFillExprs, pFill->node.pOutputDataBlockDesc); } + if (TSDB_CODE_SUCCESS == code && LIST_LENGTH(pFillNode->pFillNullExprs) > 0) { + code = setListSlotId(pCxt, pChildTupe->dataBlockId, -1, pFillNode->pFillNullExprs, &pFill->pFillNullExprs); + if (TSDB_CODE_SUCCESS == code ) { + code = addDataBlockSlots(pCxt, pFill->pFillNullExprs, pFill->node.pOutputDataBlockDesc); + } + } if (TSDB_CODE_SUCCESS == code) { code = setNodeSlotId(pCxt, pChildTupe->dataBlockId, -1, pFillNode->pWStartTs, &pFill->pWStartTs); diff --git a/source/libs/planner/src/planValidator.c b/source/libs/planner/src/planValidator.c index 4fcd064e56f..6b7b46cfa76 100755 --- a/source/libs/planner/src/planValidator.c +++ b/source/libs/planner/src/planValidator.c @@ -118,6 +118,7 @@ int32_t doValidatePhysiNode(SValidatePlanContext* pCxt, SNode* pNode) { case QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN: case QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE: case QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC: break; case QUERY_NODE_PHYSICAL_SUBPLAN: return validateSubplanNode(pCxt, (SSubplan*)pNode); diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c index 69014d5b1c5..20b81bfc14d 100644 --- a/source/libs/qworker/src/qwMsg.c +++ b/source/libs/qworker/src/qwMsg.c @@ -429,7 +429,7 @@ int32_t qWorkerPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg, bool chkGran tFreeSSubQueryMsg(&msg); - return TSDB_CODE_SUCCESS; + return code; } int32_t qWorkerAbortPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg) { diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index ecb0d88ae53..436d42c0925 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -541,7 +541,7 @@ int32_t qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) { while (true) { tbGet = false; - code = qGetQueryTableSchemaVersion(pTaskInfo, dbFName, tbName, &tbInfo.sversion, &tbInfo.tversion, i, &tbGet); + code = qGetQueryTableSchemaVersion(pTaskInfo, dbFName, TSDB_DB_FNAME_LEN, tbName, TSDB_TABLE_NAME_LEN, &tbInfo.sversion, &tbInfo.tversion, i, &tbGet); if (TSDB_CODE_SUCCESS != code || !tbGet) { break; } diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index f7efea79953..74f965d5b01 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -782,7 +782,7 @@ int32_t qwPreprocessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { qwReleaseTaskCtx(mgmt, ctx); } - return TSDB_CODE_SUCCESS; + return code; } int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) { diff --git a/source/libs/qworker/test/CMakeLists.txt b/source/libs/qworker/test/CMakeLists.txt index 22870ea94dc..e87456cf72f 100644 --- a/source/libs/qworker/test/CMakeLists.txt +++ b/source/libs/qworker/test/CMakeLists.txt @@ -1,5 +1,6 @@ MESSAGE(STATUS "build qworker unit test") + IF(NOT TD_DARWIN) # GoogleTest requires at least C++11 SET(CMAKE_CXX_STANDARD 11) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 03bc2b544b8..d8622d93eec 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -4133,7 +4133,7 @@ int32_t fltSclBuildDatumFromValueNode(SFltSclDatum *datum, SValueNode *valNode) } case TSDB_DATA_TYPE_BOOL: { datum->kind = FLT_SCL_DATUM_KIND_INT64; - datum->i = (valNode->datum.b) ? 0 : 1; + datum->i = (valNode->datum.b) ? 1 : 0; break; } case TSDB_DATA_TYPE_TINYINT: @@ -4541,6 +4541,7 @@ int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict) { if (info->scalarMode) { SArray *colRanges = info->sclCtx.fltSclRange; + SOperatorNode *optNode = (SOperatorNode *) pNode; if (taosArrayGetSize(colRanges) == 1) { SFltSclColumnRange *colRange = taosArrayGet(colRanges, 0); if (NULL == colRange) { @@ -4560,7 +4561,8 @@ int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict) { FLT_ERR_JRET(fltSclGetTimeStampDatum(endPt, &end)); win->skey = start.i; win->ekey = end.i; - *isStrict = true; + if(optNode->opType == OP_TYPE_IN) *isStrict = false; + else *isStrict = true; goto _return; } else if (taosArrayGetSize(points) == 0) { *win = TSWINDOW_DESC_INITIALIZER; @@ -5023,6 +5025,34 @@ int32_t fltSclBuildRangePoints(SFltSclOperator *oper, SArray *points) { } break; } + case OP_TYPE_IN: { + SNodeListNode *listNode = (SNodeListNode *)oper->valNode; + SListCell *cell = listNode->pNodeList->pHead; + SFltSclDatum minDatum = {.kind = FLT_SCL_DATUM_KIND_INT64, .i = INT64_MAX, .type = oper->colNode->node.resType}; + SFltSclDatum maxDatum = {.kind = FLT_SCL_DATUM_KIND_INT64, .i = INT64_MIN, .type = oper->colNode->node.resType}; + for (int32_t i = 0; i < listNode->pNodeList->length; ++i) { + SValueNode *valueNode = (SValueNode *)cell->pNode; + SFltSclDatum valDatum; + FLT_ERR_RET(fltSclBuildDatumFromValueNode(&valDatum, valueNode)); + if(valueNode->node.resType.type == TSDB_DATA_TYPE_FLOAT || valueNode->node.resType.type == TSDB_DATA_TYPE_DOUBLE) { + minDatum.i = TMIN(minDatum.i, valDatum.d); + maxDatum.i = TMAX(maxDatum.i, valDatum.d); + } else { + minDatum.i = TMIN(minDatum.i, valDatum.i); + maxDatum.i = TMAX(maxDatum.i, valDatum.i); + } + cell = cell->pNext; + } + SFltSclPoint startPt = {.start = true, .excl = false, .val = minDatum}; + SFltSclPoint endPt = {.start = false, .excl = false, .val = maxDatum}; + if (NULL == taosArrayPush(points, &startPt)) { + FLT_ERR_RET(terrno); + } + if (NULL == taosArrayPush(points, &endPt)) { + FLT_ERR_RET(terrno); + } + break; + } default: { qError("not supported operator type : %d when build range points", oper->type); break; @@ -5075,11 +5105,13 @@ static bool fltSclIsCollectableNode(SNode *pNode) { if (!(pOper->opType == OP_TYPE_GREATER_THAN || pOper->opType == OP_TYPE_GREATER_EQUAL || pOper->opType == OP_TYPE_LOWER_THAN || pOper->opType == OP_TYPE_LOWER_EQUAL || - pOper->opType == OP_TYPE_NOT_EQUAL || pOper->opType == OP_TYPE_EQUAL)) { + pOper->opType == OP_TYPE_NOT_EQUAL || pOper->opType == OP_TYPE_EQUAL || + pOper->opType == OP_TYPE_IN)) { return false; } - if (!(nodeType(pOper->pLeft) == QUERY_NODE_COLUMN && nodeType(pOper->pRight) == QUERY_NODE_VALUE)) { + if (!((nodeType(pOper->pLeft) == QUERY_NODE_COLUMN && nodeType(pOper->pRight) == QUERY_NODE_VALUE) || + (nodeType(pOper->pLeft) == QUERY_NODE_COLUMN && nodeType(pOper->pRight) == QUERY_NODE_NODE_LIST))) { return false; } return true; diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 6f6362a8f79..788ac38d8c5 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -2085,7 +2085,8 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp (void)memcpy(varDataVal(output), convBuf, len); varDataSetLen(output, len); } else { - NUM_TO_STRING(inputType, input, bufSize, buf); + int32_t outputSize = (outputLen - VARSTR_HEADER_SIZE) < bufSize ? (outputLen - VARSTR_HEADER_SIZE + 1): bufSize; + NUM_TO_STRING(inputType, input, outputSize, buf); int32_t len = (int32_t)strlen(buf); len = (outputLen - VARSTR_HEADER_SIZE) > len ? len : (outputLen - VARSTR_HEADER_SIZE); (void)memcpy(varDataVal(output), buf, len); @@ -2413,7 +2414,7 @@ int32_t toCharFunction(SScalarParam* pInput, int32_t inputNum, SScalarParam* pOu char *ts = colDataGetData(pInput[0].columnData, i); char *formatData = colDataGetData(pInput[1].columnData, pInput[1].numOfRows > 1 ? i : 0); - len = TMIN(TS_FORMAT_MAX_LEN - 1, varDataLen(formatData)); + len = TMIN(TS_FORMAT_MAX_LEN - VARSTR_HEADER_SIZE, varDataLen(formatData)); if (pInput[1].numOfRows > 1 || i == 0) { (void)strncpy(format, varDataVal(formatData), len); format[len] = '\0'; @@ -2662,6 +2663,10 @@ int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOut return TSDB_CODE_SUCCESS; } +int32_t timeZoneStrLen() { + return sizeof(VarDataLenT) + strlen(tsTimezoneStr); +} + int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { char output[TD_TIMEZONE_LEN + VARSTR_HEADER_SIZE] = {0}; (void)memcpy(varDataVal(output), tsTimezoneStr, TD_TIMEZONE_LEN); diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index 54794b9044f..8db0562c631 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -1031,23 +1031,23 @@ int8_t gConvertTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = { /*GEOM*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0}; int8_t gDisplyTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = { - /*NULL BOOL TINY SMAL INT BIGI FLOA DOUB VARC TIME NCHA UTINY USMA UINT UBIG JSON VARB DECI BLOB MEDB GEOM*/ - /*NULL*/ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, -1, -1, -1, 8, - /*BOOL*/ 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 10, 11, 12, 13, 14, 8, -1, -1, -1, -1, 8, - /*TINY*/ 0, 0, 2, 3, 4, 5, 8, 8, 8, 5, 10, 3, 4, 5, 8, 8, -1, -1, -1, -1, 8, - /*SMAL*/ 0, 0, 0, 3, 4, 5, 8, 8, 8, 5, 10, 3, 4, 5, 8, 8, -1, -1, -1, -1, 8, - /*INT */ 0, 0, 0, 0, 4, 5, 8, 8, 8, 5, 10, 4, 4, 5, 8, 8, -1, -1, -1, -1, 8, - /*BIGI*/ 0, 0, 0, 0, 0, 5, 8, 8, 8, 5, 10, 5, 5, 5, 8, 8, -1, -1, -1, -1, 8, - /*FLOA*/ 0, 0, 0, 0, 0, 0, 6, 7, 8, 8, 10, 8, 8, 8, 8, 8, -1, -1, -1, -1, 8, - /*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 7, 8, 8, 10, 8, 8, 8, 8, 8, -1, -1, -1, -1, 8, - /*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 10, 8, 8, 8, 8, 8, -1, -1, -1, -1, 8, - /*TIME*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 10, 5, 5, 5, 8, 8, -1, -1, -1, -1, 8, - /*NCHA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, -1, -1, -1, -1, 10, - /*UTINY*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 13, 14, 8, -1, -1, -1, -1, 8, - /*USMA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 14, 8, -1, -1, -1, -1, 8, - /*UINT*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 14, 8, -1, -1, -1, -1, 8, - /*UBIG*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 8, -1, -1, -1, -1, 8, - /*JSON*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, -1, -1, -1, -1, 8, + /*NULL BOOL TINY SMAL INT BIGI FLOA DOUB VARC TIM NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB MEDB GEOM*/ + /*NULL*/ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, -1, -1, -1, 20, + /*BOOL*/ 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 10, 11, 12, 13, 14, -1, -1, -1, -1, -1, -1, + /*TINY*/ 0, 0, 2, 3, 4, 5, 8, 8, 8, 5, 10, 3, 4, 5, 8, -1, -1, -1, -1, -1, -1, + /*SMAL*/ 0, 0, 0, 3, 4, 5, 8, 8, 8, 5, 10, 3, 4, 5, 8, -1, -1, -1, -1, -1, -1, + /*INT */ 0, 0, 0, 0, 4, 5, 8, 8, 8, 5, 10, 4, 4, 5, 8, -1, -1, -1, -1, -1, -1, + /*BIGI*/ 0, 0, 0, 0, 0, 5, 8, 8, 8, 5, 10, 5, 5, 5, 8, -1, -1, -1, -1, -1, -1, + /*FLOA*/ 0, 0, 0, 0, 0, 0, 6, 7, 8, 8, 10, 8, 8, 8, 8, -1, -1, -1, -1, -1, -1, + /*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 7, 8, 8, 10, 8, 8, 8, 8, -1, -1, -1, -1, -1, -1, + /*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 10, 8, 8, 8, 8, -1, 16, -1, -1, -1, -1, + /*TIME*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 10, 5, 5, 5, 8, -1, -1, -1, -1, -1, -1, + /*NCHA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, -1, -1, -1, -1, -1, -1, + /*UTINY*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, 13, 14, -1, -1, -1, -1, -1, -1, + /*USMA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 14, -1, -1, -1, -1, -1, -1, + /*UINT*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 14, -1, -1, -1, -1, -1, -1, + /*UBIG*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, -1, -1, -1, -1, -1, -1, + /*JSON*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, -1, -1, -1, -1, -1, /*VARB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, -1, -1, -1, -1, /*DECI*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, /*BLOB*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index 8a156e8a06d..96b9d2da8d3 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -62,6 +62,7 @@ typedef enum { #define SCH_DEFAULT_MAX_RETRY_NUM 6 #define SCH_MIN_AYSNC_EXEC_NUM 3 #define SCH_DEFAULT_RETRY_TOTAL_ROUND 3 +#define SCH_DEFAULT_TASK_CAPACITY_NUM 1000 typedef struct SSchDebug { bool lockEnable; @@ -318,6 +319,8 @@ typedef struct SSchTaskCtx { extern SSchedulerMgmt schMgmt; +#define SCH_GET_TASK_CAPACITY(_n) ((_n) > SCH_DEFAULT_TASK_CAPACITY_NUM ? SCH_DEFAULT_TASK_CAPACITY_NUM : (_n)) + #define SCH_TASK_TIMEOUT(_task) \ ((taosGetTimestampUs() - *(int64_t *)taosArrayGet((_task)->profile.execTime, (_task)->execId)) > (_task)->timeoutUsec) @@ -330,8 +333,8 @@ extern SSchedulerMgmt schMgmt; #define SCH_TASK_EID(_task) ((_task) ? (_task)->execId : -1) #define SCH_IS_DATA_BIND_QRY_TASK(task) ((task)->plan->subplanType == SUBPLAN_TYPE_SCAN) -#define SCH_IS_DATA_BIND_TASK(task) \ - (((task)->plan->subplanType == SUBPLAN_TYPE_SCAN) || ((task)->plan->subplanType == SUBPLAN_TYPE_MODIFY)) +#define SCH_IS_DATA_BIND_PLAN(_plan) (((_plan)->subplanType == SUBPLAN_TYPE_SCAN) || ((_plan)->subplanType == SUBPLAN_TYPE_MODIFY)) +#define SCH_IS_DATA_BIND_TASK(task) SCH_IS_DATA_BIND_PLAN((task)->plan) #define SCH_IS_LEAF_TASK(_job, _task) (((_task)->level->level + 1) == (_job)->levelNum) #define SCH_IS_DATA_MERGE_TASK(task) (!SCH_IS_DATA_BIND_TASK(task)) #define SCH_IS_LOCAL_EXEC_TASK(_job, _task) \ @@ -641,6 +644,7 @@ void schDropTaskInHashList(SSchJob *pJob, SHashObj *list); int32_t schNotifyTaskInHashList(SSchJob *pJob, SHashObj *list, ETaskNotifyType type, SSchTask *pTask); int32_t schLaunchLevelTasks(SSchJob *pJob, SSchLevel *level); void schGetTaskFromList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask); +int32_t schValidateSubplan(SSchJob *pJob, SSubplan* pSubplan, int32_t level, int32_t idx, int32_t taskNum); int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel); int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask); void schDirectPostJobRes(SSchedulerReq *pReq, int32_t errCode); diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c index 03145da9398..375a3161852 100644 --- a/source/libs/scheduler/src/schJob.c +++ b/source/libs/scheduler/src/schJob.c @@ -200,7 +200,12 @@ int32_t schBuildTaskRalation(SSchJob *pJob, SHashObj *planToTask) { SSubplan *child = (SSubplan *)nodesListGetNode(pPlan->pChildren, n); if (NULL == child) { SCH_JOB_ELOG("fail to get the %dth child subplan, childNum: %d", n, childNum); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + if (QUERY_NODE_PHYSICAL_SUBPLAN != nodeType(child)) { + SCH_JOB_ELOG("invalid subplan type for the %dth child, level:%d, subplanNodeType:%d", n, i, nodeType(child)); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } SSchTask **childTask = taosHashGet(planToTask, &child, POINTER_BYTES); @@ -242,6 +247,11 @@ int32_t schBuildTaskRalation(SSchJob *pJob, SHashObj *planToTask) { SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); } + if (QUERY_NODE_PHYSICAL_SUBPLAN != nodeType(parent)) { + SCH_JOB_ELOG("invalid subplan type for the %dth parent, level:%d, subplanNodeType:%d", n, i, nodeType(parent)); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + SSchTask **parentTask = taosHashGet(planToTask, &parent, POINTER_BYTES); if (NULL == parentTask || NULL == *parentTask) { SCH_TASK_ELOG("subplan parent relationship error, level:%d, taskIdx:%d, childIdx:%d", i, m, n); @@ -307,7 +317,7 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) { SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } - pJob->dataSrcTasks = taosArrayInit(pDag->numOfSubplans, POINTER_BYTES); + pJob->dataSrcTasks = taosArrayInit(SCH_GET_TASK_CAPACITY(pDag->numOfSubplans), POINTER_BYTES); if (NULL == pJob->dataSrcTasks) { SCH_ERR_RET(terrno); } @@ -319,12 +329,12 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) { } SHashObj *planToTask = taosHashInit( - pDag->numOfSubplans, + SCH_GET_TASK_CAPACITY(pDag->numOfSubplans), taosGetDefaultHashFunction(POINTER_BYTES == sizeof(int64_t) ? TSDB_DATA_TYPE_BIGINT : TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); if (NULL == planToTask) { SCH_JOB_ELOG("taosHashInit %d failed", SCHEDULE_DEFAULT_MAX_TASK_NUM); - SCH_ERR_RET(terrno); + SCH_ERR_JRET(terrno); } pJob->levels = taosArrayInit(levelNum, sizeof(SSchLevel)); @@ -339,6 +349,7 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) { SSchLevel level = {0}; SNodeListNode *plans = NULL; int32_t taskNum = 0; + int32_t totalTaskNum = 0; SSchLevel *pLevel = NULL; level.status = JOB_TASK_STATUS_INIT; @@ -352,7 +363,7 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) { pLevel = taosArrayGet(pJob->levels, i); if (NULL == pLevel) { SCH_JOB_ELOG("fail to get the %dth level, levelNum: %d", i, levelNum); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); } pLevel->level = i; @@ -363,12 +374,23 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) { SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); } + if (QUERY_NODE_NODE_LIST != nodeType(plans)) { + SCH_JOB_ELOG("invalid level plan, level:%d, planNodeType:%d", i, nodeType(plans)); + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); + } + taskNum = (int32_t)LIST_LENGTH(plans->pNodeList); if (taskNum <= 0) { SCH_JOB_ELOG("invalid level plan number:%d, level:%d", taskNum, i); SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); } + totalTaskNum += taskNum; + if (totalTaskNum > pDag->numOfSubplans) { + SCH_JOB_ELOG("current totalTaskNum %d is bigger than numOfSubplans %d, level:%d", totalTaskNum, pDag->numOfSubplans, i); + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); + } + pLevel->taskNum = taskNum; pLevel->subTasks = taosArrayInit(taskNum, sizeof(SSchTask)); @@ -379,11 +401,9 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) { for (int32_t n = 0; n < taskNum; ++n) { SSubplan *plan = (SSubplan *)nodesListGetNode(plans->pNodeList, n); - if (NULL == plan) { - SCH_JOB_ELOG("fail to get the %dth subplan, taskNum: %d", n, taskNum); - SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); - } + SCH_ERR_JRET(schValidateSubplan(pJob, plan, pLevel->level, n, taskNum)); + SCH_SET_JOB_TYPE(pJob, plan->subplanType); SSchTask task = {0}; @@ -397,14 +417,16 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) { SCH_ERR_JRET(schAppendJobDataSrc(pJob, pTask)); - if (0 != taosHashPut(planToTask, &plan, POINTER_BYTES, &pTask, POINTER_BYTES)) { - SCH_TASK_ELOG("taosHashPut to planToTaks failed, taskIdx:%d", n); - SCH_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); + code = taosHashPut(planToTask, &plan, POINTER_BYTES, &pTask, POINTER_BYTES); + if (0 != code) { + SCH_TASK_ELOG("taosHashPut to planToTaks failed, taskIdx:%d, error:%s", n, tstrerror(code)); + SCH_ERR_JRET(code); } - if (0 != taosHashPut(pJob->taskList, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES)) { - SCH_TASK_ELOG("taosHashPut to taskList failed, taskIdx:%d", n); - SCH_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); + code = taosHashPut(pJob->taskList, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES); + if (0 != code) { + SCH_TASK_ELOG("taosHashPut to taskList failed, taskIdx:%d, error:%s", n, tstrerror(code)); + SCH_ERR_JRET(code); } ++pJob->taskNum; @@ -413,6 +435,11 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) { SCH_JOB_DLOG("level %d initialized, taskNum:%d", i, taskNum); } + if (totalTaskNum != pDag->numOfSubplans) { + SCH_JOB_ELOG("totalTaskNum %d mis-match with numOfSubplans %d", totalTaskNum, pDag->numOfSubplans); + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); + } + SCH_ERR_JRET(schBuildTaskRalation(pJob, planToTask)); _return: @@ -781,9 +808,11 @@ void schFreeJobImpl(void *job) { } taosMemoryFree(pJob); - int32_t jobNum = atomic_sub_fetch_32(&schMgmt.jobNum, 1); - if (jobNum == 0) { - schCloseJobRef(); + if (refId > 0) { + int32_t jobNum = atomic_sub_fetch_32(&schMgmt.jobNum, 1); + if (jobNum == 0) { + schCloseJobRef(); + } } qDebug("QID:0x%" PRIx64 " sch job freed, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob); @@ -861,10 +890,10 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) { } } - pJob->taskList = taosHashInit(pReq->pDag->numOfSubplans, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, + pJob->taskList = taosHashInit(SCH_GET_TASK_CAPACITY(pReq->pDag->numOfSubplans), taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_ENTRY_LOCK); if (NULL == pJob->taskList) { - SCH_JOB_ELOG("taosHashInit %d taskList failed", pReq->pDag->numOfSubplans); + SCH_JOB_ELOG("taosHashInit %d taskList failed", SCH_GET_TASK_CAPACITY(pReq->pDag->numOfSubplans)); SCH_ERR_JRET(terrno); } @@ -904,7 +933,7 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) { if (NULL == pJob) { qDestroyQueryPlan(pReq->pDag); - } else if (pJob->refId < 0) { + } else if (pJob->refId <= 0) { schFreeJobImpl(pJob); } else { code = taosRemoveRef(schMgmt.jobRef, pJob->refId); diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index e6b68051f93..fe24633c122 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -831,7 +831,7 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) { if (SCH_IS_DATA_BIND_TASK(pTask)) { SCH_TASK_ELOG("no execNode specifed for data src task, numOfEps:%d", pTask->plan->execNode.epSet.numOfEps); - SCH_ERR_RET(TSDB_CODE_MND_INVALID_SCHEMA_VER); + SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); } SCH_ERR_RET(schSetAddrsFromNodeList(pJob, pTask)); diff --git a/source/libs/scheduler/src/schUtil.c b/source/libs/scheduler/src/schUtil.c index b68f665200f..4697de6f285 100644 --- a/source/libs/scheduler/src/schUtil.c +++ b/source/libs/scheduler/src/schUtil.c @@ -360,3 +360,50 @@ void schGetTaskFromList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask) *pTask = *task; } + +int32_t schValidateSubplan(SSchJob *pJob, SSubplan* pSubplan, int32_t level, int32_t idx, int32_t taskNum) { + if (NULL == pSubplan) { + SCH_JOB_ELOG("fail to get the %dth subplan, taskNum: %d, level: %d", idx, taskNum, level); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + if (QUERY_NODE_PHYSICAL_SUBPLAN != nodeType(pSubplan)) { + SCH_JOB_ELOG("invalid subplan type, level:%d, subplanNodeType:%d", level, nodeType(pSubplan)); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + if (pSubplan->subplanType < SUBPLAN_TYPE_MERGE || pSubplan->subplanType > SUBPLAN_TYPE_COMPUTE) { + SCH_JOB_ELOG("invalid subplanType %d, level:%d, subplan idx:%d", pSubplan->subplanType, level, idx); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + if (pSubplan->level != level) { + SCH_JOB_ELOG("plan level %d mis-match with current level %d", pSubplan->level, level); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + if (SCH_IS_DATA_BIND_PLAN(pSubplan)) { + if (pSubplan->execNode.epSet.numOfEps <= 0) { + SCH_JOB_ELOG("no execNode specifed for data src plan %d, numOfEps:%d", pSubplan->subplanType, pSubplan->execNode.epSet.numOfEps); + SCH_ERR_RET(TSDB_CODE_SCH_DATA_SRC_EP_MISS); + } + if (pSubplan->execNode.epSet.inUse >= pSubplan->execNode.epSet.numOfEps) { + SCH_JOB_ELOG("invalid epset inUse %d for data src plan %d, numOfEps:%d", pSubplan->execNode.epSet.inUse, pSubplan->subplanType, pSubplan->execNode.epSet.numOfEps); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + } + + if (NULL == pSubplan->pNode && pSubplan->subplanType != SUBPLAN_TYPE_MODIFY) { + SCH_JOB_ELOG("empty plan root node, level:%d, subplan idx:%d, subplanType:%d", level, idx, pSubplan->subplanType); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + if (NULL == pSubplan->pDataSink) { + SCH_JOB_ELOG("empty plan dataSink, level:%d, subplan idx:%d", level, idx); + SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); + } + + return TSDB_CODE_SUCCESS; +} + + diff --git a/source/libs/scheduler/src/scheduler.c b/source/libs/scheduler/src/scheduler.c index 091de5c0482..db9ecd60250 100644 --- a/source/libs/scheduler/src/scheduler.c +++ b/source/libs/scheduler/src/scheduler.c @@ -224,3 +224,33 @@ void schedulerDestroy(void) { qWorkerDestroy(&schMgmt.queryMgmt); schMgmt.queryMgmt = NULL; } + +int32_t schedulerValidatePlan(SQueryPlan* pPlan) { + int32_t code = TSDB_CODE_SUCCESS; + SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob)); + if (NULL == pJob) { + qError("QID:0x%" PRIx64 " calloc %d failed", pPlan->queryId, (int32_t)sizeof(SSchJob)); + SCH_ERR_RET(terrno); + } + + pJob->taskList = taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, + HASH_ENTRY_LOCK); + if (NULL == pJob->taskList) { + SCH_JOB_ELOG("taosHashInit %d taskList failed", 100); + SCH_ERR_JRET(terrno); + } + + SCH_ERR_JRET(schValidateAndBuildJob(pPlan, pJob)); + + if (SCH_IS_EXPLAIN_JOB(pJob)) { + SCH_ERR_JRET(qExecExplainBegin(pPlan, &pJob->explainCtx, 0)); + } + +_return: + + schFreeJobImpl(pJob); + + return code; +} + + diff --git a/source/libs/scheduler/test/schedulerTests.cpp b/source/libs/scheduler/test/schedulerTests.cpp index 44d32b9480e..6e13e37e88e 100644 --- a/source/libs/scheduler/test/schedulerTests.cpp +++ b/source/libs/scheduler/test/schedulerTests.cpp @@ -203,6 +203,10 @@ void schtBuildQueryDag(SQueryPlan *dag) { return; } scanPlan->msgType = TDMT_SCH_QUERY; + code = nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN_DISPATCH, (SNode**)&scanPlan->pDataSink); + if (NULL == scanPlan->pDataSink) { + return; + } mergePlan->id.queryId = qId; mergePlan->id.groupId = schtMergeTemplateId; @@ -223,6 +227,10 @@ void schtBuildQueryDag(SQueryPlan *dag) { return; } mergePlan->msgType = TDMT_SCH_QUERY; + code = nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN_DISPATCH, (SNode**)&mergePlan->pDataSink); + if (NULL == mergePlan->pDataSink) { + return; + } merge->pNodeList = NULL; code = nodesMakeList(&merge->pNodeList); @@ -235,6 +243,7 @@ void schtBuildQueryDag(SQueryPlan *dag) { return; } + (void)nodesListAppend(merge->pNodeList, (SNode *)mergePlan); (void)nodesListAppend(scan->pNodeList, (SNode *)scanPlan); @@ -250,7 +259,7 @@ void schtBuildQueryFlowCtrlDag(SQueryPlan *dag) { int32_t scanPlanNum = 20; dag->queryId = qId; - dag->numOfSubplans = 2; + dag->numOfSubplans = scanPlanNum + 1; dag->pSubplans = NULL; int32_t code = nodesMakeList(&dag->pSubplans); if (NULL == dag->pSubplans) { @@ -289,6 +298,10 @@ void schtBuildQueryFlowCtrlDag(SQueryPlan *dag) { if (NULL == mergePlan->pChildren) { return; } + code = nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN_DISPATCH, (SNode**)&mergePlan->pDataSink); + if (NULL == mergePlan->pDataSink) { + return; + } for (int32_t i = 0; i < scanPlanNum; ++i) { SSubplan *scanPlan = NULL; @@ -322,6 +335,10 @@ void schtBuildQueryFlowCtrlDag(SQueryPlan *dag) { return; } scanPlan->msgType = TDMT_SCH_QUERY; + code = nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN_DISPATCH, (SNode**)&scanPlan->pDataSink); + if (NULL == scanPlan->pDataSink) { + return; + } (void)nodesListAppend(scanPlan->pParents, (SNode *)mergePlan); (void)nodesListAppend(mergePlan->pChildren, (SNode *)scanPlan); diff --git a/source/libs/stream/inc/streamBackendRocksdb.h b/source/libs/stream/inc/streamBackendRocksdb.h index 1e0801fb6b0..d313acc61d7 100644 --- a/source/libs/stream/inc/streamBackendRocksdb.h +++ b/source/libs/stream/inc/streamBackendRocksdb.h @@ -166,12 +166,13 @@ int32_t streamStateDel_rocksdb(SStreamState* pState, const SWinKey* key); int32_t streamStateClear_rocksdb(SStreamState* pState); void streamStateCurNext_rocksdb(SStreamStateCur* pCur); int32_t streamStateGetFirst_rocksdb(SStreamState* pState, SWinKey* key); -int32_t streamStateGetGroupKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); +int32_t streamStateGetGroupKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); int32_t streamStateAddIfNotExist_rocksdb(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); void streamStateCurPrev_rocksdb(SStreamStateCur* pCur); int32_t streamStateGetKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); SStreamStateCur* streamStateGetAndCheckCur_rocksdb(SStreamState* pState, SWinKey* key); SStreamStateCur* streamStateSeekKeyNext_rocksdb(SStreamState* pState, const SWinKey* key); +SStreamStateCur* streamStateSeekKeyPrev_rocksdb(SStreamState* pState, const SWinKey* key); SStreamStateCur* streamStateSeekToLast_rocksdb(SStreamState* pState); SStreamStateCur* streamStateGetCur_rocksdb(SStreamState* pState, const SWinKey* key); @@ -210,10 +211,14 @@ SStreamStateCur* streamStateFillGetCur_rocksdb(SStreamState* pState, const SWinK int32_t streamStateFillGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); SStreamStateCur* streamStateFillSeekKeyPrev_rocksdb(SStreamState* pState, const SWinKey* key); SStreamStateCur* streamStateFillSeekKeyNext_rocksdb(SStreamState* pState, const SWinKey* key); +SStreamStateCur* streamStateFillSeekToLast_rocksdb(SStreamState* pState); +int32_t streamStateFillGetGroupKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); // partag cf int32_t streamStatePutParTag_rocksdb(SStreamState* pState, int64_t groupId, const void* tag, int32_t tagLen); int32_t streamStateGetParTag_rocksdb(SStreamState* pState, int64_t groupId, void** tagVal, int32_t* tagLen); +void streamStateParTagSeekKeyNext_rocksdb(SStreamState* pState, const int64_t groupId, SStreamStateCur* pCur); +int32_t streamStateParTagGetKVByCur_rocksdb(SStreamStateCur* pCur, int64_t* pGroupId, const void** pVal, int32_t* pVLen); // parname cf int32_t streamStatePutParName_rocksdb(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]); diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index 94c196d280f..863bc76c79e 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -21,6 +21,7 @@ #include "streamBackendRocksdb.h" #include "trpc.h" #include "tstream.h" +#include "tref.h" #ifdef __cplusplus extern "C" { @@ -70,7 +71,7 @@ struct SActiveCheckpointInfo { SStreamTmrInfo chkptReadyMsgTmr; }; -int32_t streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, SStreamTask* pTask); +void streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, void* param); typedef struct { int8_t type; @@ -225,6 +226,8 @@ void destroyMetaHbInfo(SMetaHbInfo* pInfo); void streamMetaWaitForHbTmrQuit(SStreamMeta* pMeta); void streamMetaGetHbSendInfo(SMetaHbInfo* pInfo, int64_t* pStartTs, int32_t* pSendCount); int32_t streamMetaSendHbHelper(SStreamMeta* pMeta); +int32_t metaRefMgtAdd(int64_t vgId, int64_t* rid); +void metaRefMgtRemove(int64_t* pRefId); ECHECKPOINT_BACKUP_TYPE streamGetCheckpointBackupType(); @@ -238,7 +241,9 @@ int32_t initCheckpointReadyMsg(SStreamTask* pTask, int32_t upstreamNodeId, int32 int64_t checkpointId, SRpcMsg* pMsg); int32_t flushStateDataInExecutor(SStreamTask* pTask, SStreamQueueItem* pCheckpointBlock); - +int32_t streamCreateSinkResTrigger(SStreamTrigger** pTrigger); +int32_t streamCreateForcewindowTrigger(SStreamTrigger** pTrigger, int32_t trigger, SInterval* pInterval, + STimeWindow* pLatestWindow, const char* id); #ifdef __cplusplus } diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index e86deb1d922..18bb31e94cc 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -3364,7 +3364,7 @@ int32_t streamStateClear_rocksdb(SStreamState* pState) { return 0; } void streamStateCurNext_rocksdb(SStreamStateCur* pCur) { - if (pCur) { + if (pCur && pCur->iter && rocksdb_iter_valid(pCur->iter)) { rocksdb_iter_next(pCur->iter); } } @@ -3386,7 +3386,7 @@ int32_t streamStateGetFirst_rocksdb(SStreamState* pState, SWinKey* key) { return streamStateDel_rocksdb(pState, &tmp); } -int32_t streamStateGetGroupKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { +int32_t streamStateFillGetGroupKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { if (!pCur) { return -1; } @@ -3478,7 +3478,7 @@ int32_t streamStateGetKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCu SStreamStateCur* streamStateGetAndCheckCur_rocksdb(SStreamState* pState, SWinKey* key) { SStreamStateCur* pCur = streamStateFillGetCur_rocksdb(pState, key); if (pCur) { - int32_t code = streamStateGetGroupKVByCur_rocksdb(pCur, key, NULL, 0); + int32_t code = streamStateFillGetGroupKVByCur_rocksdb(pCur, key, NULL, 0); if (code == 0) return pCur; streamStateFreeCur(pCur); } @@ -3562,6 +3562,7 @@ SStreamStateCur* streamStateSeekToLast_rocksdb(SStreamState* pState) { STREAM_STATE_DEL_ROCKSDB(pState, "state", &maxStateKey); return pCur; } + SStreamStateCur* streamStateGetCur_rocksdb(SStreamState* pState, const SWinKey* key) { stDebug("streamStateGetCur_rocksdb"); STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; @@ -4126,6 +4127,12 @@ SStreamStateCur* streamStateFillSeekKeyPrev_rocksdb(SStreamState* pState, const streamStateFreeCur(pCur); return NULL; } + +SStreamStateCur* streamStateFillSeekToLast_rocksdb(SStreamState* pState) { + SWinKey key = {.groupId = UINT64_MAX, .ts = INT64_MAX}; + return streamStateFillSeekKeyNext_rocksdb(pState, &key); +} + #ifdef BUILD_NO_CALL int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSessionKey* key, SSessionKey* curKey) { stDebug("streamStateSessionGetKeyByRange_rocksdb"); @@ -4316,25 +4323,87 @@ int32_t streamStateStateAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* return res; } -#ifdef BUILD_NO_CALL // partag cf int32_t streamStatePutParTag_rocksdb(SStreamState* pState, int64_t groupId, const void* tag, int32_t tagLen) { int code = 0; char* dst = NULL; size_t size = 0; - if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL) { + if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL || tag == NULL) { STREAM_STATE_PUT_ROCKSDB(pState, "partag", &groupId, tag, tagLen); return code; } - code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size); + code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, tag, tagLen, &dst, &size); if (code != 0) { return code; } - STREAM_STATE_PUT_ROCKSDB(pState, "partag", &groupId, dst, size); + STREAM_STATE_PUT_ROCKSDB(pState, "partag", &groupId, dst, (int32_t)size); taosMemoryFree(dst); return code; } +void streamStateParTagSeekKeyNext_rocksdb(SStreamState* pState, const int64_t groupId, SStreamStateCur* pCur) { + if (pCur == NULL) { + return ; + } + STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; + pCur->number = pState->number; + pCur->db = wrapper->db; + pCur->iter = streamStateIterCreate(pState, "partag", (rocksdb_snapshot_t**)&pCur->snapshot, + (rocksdb_readoptions_t**)&pCur->readOpt); + int i = streamStateGetCfIdx(pState, "partag"); + if (i < 0) { + stError("streamState failed to put to cf name:%s", "partag"); + return ; + } + + char buf[128] = {0}; + int32_t klen = ginitDict[i].enFunc((void*)&groupId, buf); + if (!streamStateIterSeekAndValid(pCur->iter, buf, klen)) { + return ; + } + // skip ttl expired data + while (rocksdb_iter_valid(pCur->iter) && iterValueIsStale(pCur->iter)) { + rocksdb_iter_next(pCur->iter); + } + + if (rocksdb_iter_valid(pCur->iter)) { + int64_t curGroupId; + size_t kLen = 0; + char* keyStr = (char*)rocksdb_iter_key(pCur->iter, &kLen); + TAOS_UNUSED(parKeyDecode((void*)&curGroupId, keyStr)); + if (curGroupId > groupId) return ; + + rocksdb_iter_next(pCur->iter); + } +} + +int32_t streamStateParTagGetKVByCur_rocksdb(SStreamStateCur* pCur, int64_t* pGroupId, const void** pVal, int32_t* pVLen) { + stDebug("streamStateFillGetKVByCur_rocksdb"); + if (!pCur) { + return -1; + } + SWinKey winKey; + if (!rocksdb_iter_valid(pCur->iter) || iterValueIsStale(pCur->iter)) { + return -1; + } + + size_t klen, vlen; + char* keyStr = (char*)rocksdb_iter_key(pCur->iter, &klen); + (void)parKeyDecode(pGroupId, keyStr); + + if (pVal) { + const char* valStr = rocksdb_iter_value(pCur->iter, &vlen); + int32_t len = valueDecode((void*)valStr, vlen, NULL, (char**)pVal); + if (len < 0) { + return -1; + } + if (pVLen != NULL) *pVLen = len; + } + + return 0; +} + +#ifdef BUILD_NO_CALL int32_t streamStateGetParTag_rocksdb(SStreamState* pState, int64_t groupId, void** tagVal, int32_t* tagLen) { int code = 0; char* tVal; @@ -4538,7 +4607,9 @@ int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb rocksdb_column_family_handle_t* pCf = wrapper->pCf[ginitDict[cfIdx].idx]; rocksdb_writebatch_put_cf((rocksdb_writebatch_t*)pBatch, pCf, buf, (size_t)klen, ttlV, (size_t)ttlVLen); - taosMemoryFree(dst); + if (pState->pResultRowStore.resultRowPut != NULL && pState->pExprSupp != NULL) { + taosMemoryFree(dst); + } if (tmpBuf == NULL) { taosMemoryFree(ttlV); @@ -5065,13 +5136,13 @@ int32_t dbChkpDumpTo(SDbChkp* p, char* dname, SArray* list) { goto _ERROR; } memset(dstBuf, 0, cap); - nBytes = snprintf(dstDir, cap, "%s%s%s", dstDir, TD_DIRSEP, chkpMeta); + nBytes = snprintf(dstBuf, cap, "%s%s%s", dstDir, TD_DIRSEP, chkpMeta); if (nBytes <= 0 || nBytes >= cap) { code = TSDB_CODE_OUT_OF_RANGE; goto _ERROR; } - TdFilePtr pFile = taosOpenFile(dstDir, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + TdFilePtr pFile = taosOpenFile(dstBuf, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); if (pFile == NULL) { code = terrno; stError("chkp failed to create meta file: %s, reason:%s", dstDir, tstrerror(code)); @@ -5240,3 +5311,61 @@ int32_t bkdMgtDumpTo(SBkdMgt* bm, char* taskId, char* dname) { return code; } #endif + +SStreamStateCur* streamStateSeekKeyPrev_rocksdb(SStreamState* pState, const SWinKey* key) { + stDebug("streamStateSeekKeyPrev_rocksdb"); + STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend; + SStreamStateCur* pCur = createStreamStateCursor(); + if (pCur == NULL) { + return NULL; + } + + pCur->db = wrapper->db; + pCur->iter = streamStateIterCreate(pState, "state", (rocksdb_snapshot_t**)&pCur->snapshot, + (rocksdb_readoptions_t**)&pCur->readOpt); + pCur->number = pState->number; + + char buf[128] = {0}; + int len = winKeyEncode((void*)key, buf); + if (!streamStateIterSeekAndValid(pCur->iter, buf, len)) { + streamStateFreeCur(pCur); + return NULL; + } + while (rocksdb_iter_valid(pCur->iter) && iterValueIsStale(pCur->iter)) { + rocksdb_iter_prev(pCur->iter); + } + + if (rocksdb_iter_valid(pCur->iter)) { + SWinKey curKey; + size_t kLen = 0; + char* keyStr = (char*)rocksdb_iter_key(pCur->iter, &kLen); + TAOS_UNUSED(winKeyDecode((void*)&curKey, keyStr)); + if (winKeyCmpr(key, sizeof(*key), &curKey, sizeof(curKey)) > 0) { + return pCur; + } + rocksdb_iter_prev(pCur->iter); + return pCur; + } + + streamStateFreeCur(pCur); + return NULL; +} + +int32_t streamStateGetGroupKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { + if (!pCur) { + return -1; + } + uint64_t groupId = pKey->groupId; + + int32_t code = streamStateGetKVByCur_rocksdb(pState, pCur, pKey, pVal, pVLen); + if (code == 0) { + if (pKey->groupId == groupId) { + return 0; + } + if (pVal != NULL) { + taosMemoryFree((void*)*pVal); + *pVal = NULL; + } + } + return -1; +} diff --git a/source/libs/stream/src/streamCheckStatus.c b/source/libs/stream/src/streamCheckStatus.c index c1c54b3c0bf..64b19e4ed9e 100644 --- a/source/libs/stream/src/streamCheckStatus.c +++ b/source/libs/stream/src/streamCheckStatus.c @@ -299,13 +299,14 @@ void streamTaskStartMonitorCheckRsp(SStreamTask* pTask) { return; } - int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); // add task ref here streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs()); - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s start check-rsp monitor, ref:%d ", pTask->id.idStr, ref); - streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTask, streamTimer, &pInfo->checkRspTmr, vgId, - "check-status-monitor"); + int64_t* pTaskRefId = NULL; + code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTaskRefId, streamTimer, &pInfo->checkRspTmr, vgId, + "check-status-monitor"); + } streamMutexUnlock(&pInfo->checkInfoLock); } @@ -721,21 +722,45 @@ int32_t addDownstreamFailedStatusResultAsync(SMsgCb* pMsgCb, int32_t vgId, int64 return streamTaskSchedTask(pMsgCb, vgId, streamId, taskId, STREAM_EXEC_T_ADD_FAILED_TASK); } +static void doCleanup(SStreamTask* pTask, SArray* pNotReadyList, SArray* pTimeoutList, void* param) { + streamMetaReleaseTask(pTask->pMeta, pTask); + + taosArrayDestroy(pNotReadyList); + taosArrayDestroy(pTimeoutList); + streamTaskFreeRefId(param); +} + // this function is executed in timer thread void rspMonitorFn(void* param, void* tmrId) { - SStreamTask* pTask = param; - SStreamMeta* pMeta = pTask->pMeta; - STaskCheckInfo* pInfo = &pTask->taskCheckInfo; - int32_t vgId = pTask->pMeta->vgId; - int64_t now = taosGetTimestampMs(); - int64_t timeoutDuration = now - pInfo->timeoutStartTs; - const char* id = pTask->id.idStr; int32_t numOfReady = 0; int32_t numOfFault = 0; int32_t numOfNotRsp = 0; int32_t numOfNotReady = 0; int32_t numOfTimeout = 0; - int32_t total = taosArrayGetSize(pInfo->pList); + int64_t taskRefId = *(int64_t*)param; + int64_t now = taosGetTimestampMs(); + SArray* pNotReadyList = NULL; + SArray* pTimeoutList = NULL; + SStreamMeta* pMeta = NULL; + STaskCheckInfo* pInfo = NULL; + int32_t vgId = -1; + int64_t timeoutDuration = 0; + const char* id = NULL; + int32_t total = 0; + + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); + streamTaskFreeRefId(param); + return; + } + + pMeta = pTask->pMeta; + pInfo = &pTask->taskCheckInfo; + vgId = pTask->pMeta->vgId; + timeoutDuration = now - pInfo->timeoutStartTs; + id = pTask->id.idStr; + total = (int32_t) taosArrayGetSize(pInfo->pList); stDebug("s-task:%s start to do check-downstream-rsp check in tmr", id); @@ -744,12 +769,10 @@ void rspMonitorFn(void* param, void* tmrId) { streamMutexUnlock(&pTask->lock); if (state.state == TASK_STATUS__STOP) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr, ref:%d", id, state.name, vgId, ref); - + stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr", id, state.name, vgId); streamTaskCompleteCheckRsp(pInfo, true, id); - // not record the failed of the current task if try to close current vnode + // not record the failure of the current task if try to close current vnode // otherwise, the put of message operation may incur invalid read of message queue. if (!pMeta->closeFlag) { int32_t code = addDownstreamFailedStatusResultAsync(pTask->pMsgCb, vgId, pTask->id.streamId, pTask->id.taskId); @@ -758,33 +781,30 @@ void rspMonitorFn(void* param, void* tmrId) { } } - streamMetaReleaseTask(pMeta, pTask); + doCleanup(pTask, pNotReadyList, pTimeoutList, param); return; } if (state.state == TASK_STATUS__DROPPING || state.state == TASK_STATUS__READY) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr, ref:%d", id, state.name, vgId, ref); + stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr", id, state.name, vgId); streamTaskCompleteCheckRsp(pInfo, true, id); - streamMetaReleaseTask(pMeta, pTask); + doCleanup(pTask, pNotReadyList, pTimeoutList, param); return; } streamMutexLock(&pInfo->checkInfoLock); if (pInfo->notReadyTasks == 0) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s status:%s vgId:%d all downstream ready, quit from monitor rsp tmr, ref:%d", id, state.name, vgId, - ref); + stDebug("s-task:%s status:%s vgId:%d all downstream ready, quit from monitor rsp tmr", id, state.name, vgId); streamTaskCompleteCheckRsp(pInfo, false, id); streamMutexUnlock(&pInfo->checkInfoLock); - streamMetaReleaseTask(pMeta, pTask); + doCleanup(pTask, pNotReadyList, pTimeoutList, param); return; } - SArray* pNotReadyList = taosArrayInit(4, sizeof(int64_t)); - SArray* pTimeoutList = taosArrayInit(4, sizeof(int64_t)); + pNotReadyList = taosArrayInit(4, sizeof(int64_t)); + pTimeoutList = taosArrayInit(4, sizeof(int64_t)); if (state.state == TASK_STATUS__UNINIT) { getCheckRspStatus(pInfo, timeoutDuration, &numOfReady, &numOfFault, &numOfNotRsp, pTimeoutList, pNotReadyList, id); @@ -795,31 +815,25 @@ void rspMonitorFn(void* param, void* tmrId) { // fault tasks detected, not try anymore bool jumpOut = false; if ((numOfReady + numOfFault + numOfNotReady + numOfTimeout + numOfNotRsp) != total) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stError( "s-task:%s vgId:%d internal error in handling the check downstream procedure, rsp number is inconsistent, " - "stop rspMonitor tmr, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d ref:%d", - id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady, ref); + "stop rspMonitor tmr, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d", + id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady); jumpOut = true; } if (numOfFault > 0) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); stDebug( "s-task:%s status:%s vgId:%d all rsp. quit from monitor rsp tmr, since vnode-transfer/leader-change/restart " - "detected, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d ref:%d", - id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady, ref); + "detected, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d", + id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady); jumpOut = true; } if (jumpOut) { streamTaskCompleteCheckRsp(pInfo, false, id); streamMutexUnlock(&pInfo->checkInfoLock); - streamMetaReleaseTask(pMeta, pTask); - - taosArrayDestroy(pNotReadyList); - taosArrayDestroy(pTimeoutList); + doCleanup(pTask, pNotReadyList, pTimeoutList, param); return; } } else { // unexpected status @@ -828,11 +842,10 @@ void rspMonitorFn(void* param, void* tmrId) { // checking of downstream tasks has been stopped by other threads if (pInfo->stopCheckProcess == 1) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); stDebug( "s-task:%s status:%s vgId:%d stopped by other threads to check downstream process, total:%d, notRsp:%d, " - "notReady:%d, fault:%d, timeout:%d, ready:%d ref:%d", - id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady, ref); + "notReady:%d, fault:%d, timeout:%d, ready:%d", + id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady); streamTaskCompleteCheckRsp(pInfo, false, id); streamMutexUnlock(&pInfo->checkInfoLock); @@ -842,10 +855,7 @@ void rspMonitorFn(void* param, void* tmrId) { stError("s-task:%s failed to create async record start failed task, code:%s", id, tstrerror(code)); } - streamMetaReleaseTask(pMeta, pTask); - - taosArrayDestroy(pNotReadyList); - taosArrayDestroy(pTimeoutList); + doCleanup(pTask, pNotReadyList, pTimeoutList, param); return; } @@ -857,7 +867,7 @@ void rspMonitorFn(void* param, void* tmrId) { handleTimeoutDownstreamTasks(pTask, pTimeoutList); } - streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTask, streamTimer, &pInfo->checkRspTmr, vgId, + streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, param, streamTimer, &pInfo->checkRspTmr, vgId, "check-status-monitor"); streamMutexUnlock(&pInfo->checkInfoLock); @@ -865,7 +875,5 @@ void rspMonitorFn(void* param, void* tmrId) { "s-task:%s vgId:%d continue checking rsp in 300ms, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, " "ready:%d", id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady); - - taosArrayDestroy(pNotReadyList); - taosArrayDestroy(pTimeoutList); + doCleanup(pTask, pNotReadyList, pTimeoutList, NULL); } diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index 73fe39c2034..f024507f8e9 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -345,13 +345,15 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr; int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1); if (old == 0) { - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref); - - int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); - streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, - "trigger-recv-monitor"); - pTmrInfo->launchChkptId = pActiveInfo->activeId; + stDebug("s-task:%s start checkpoint-trigger monitor in 10s", pTask->id.idStr); + + int64_t* pTaskRefId = NULL; + code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(checkpointTriggerMonitorFn, 200, pTaskRefId, streamTimer, &pTmrInfo->tmrHandle, vgId, + "trigger-recv-monitor"); + pTmrInfo->launchChkptId = pActiveInfo->activeId; + } } else { // already launched, do nothing stError("s-task:%s previous checkpoint-trigger monitor tmr is set, not start new one", pTask->id.idStr); } @@ -890,7 +892,7 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) { return code; } -static int32_t doChkptStatusCheck(SStreamTask* pTask) { +static int32_t doChkptStatusCheck(SStreamTask* pTask, void* param) { const char* id = pTask->id.idStr; int32_t vgId = pTask->pMeta->vgId; SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; @@ -898,25 +900,24 @@ static int32_t doChkptStatusCheck(SStreamTask* pTask) { // checkpoint-trigger recv flag is set, quit if (pActiveInfo->allUpstreamTriggerRecv) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s vgId:%d all checkpoint-trigger recv, quit from monitor checkpoint-trigger, ref:%d", id, vgId, - ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s vgId:%d all checkpoint-trigger recv, quit from monitor checkpoint-trigger", id, vgId); return -1; } if (pTmrInfo->launchChkptId != pActiveInfo->activeId) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); stWarn("s-task:%s vgId:%d checkpoint-trigger retrieve by previous checkpoint procedure, checkpointId:%" PRId64 - ", quit, ref:%d", - id, vgId, pTmrInfo->launchChkptId, ref); + ", quit", + id, vgId, pTmrInfo->launchChkptId); return -1; } // active checkpoint info is cleared for now if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (pTask->chkInfo.startTs == 0)) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from retrieve checkpoint-trigger send tmr, ref:%d", - id, vgId, ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from retrieve checkpoint-trigger send tmr", id, + vgId); return -1; } @@ -964,22 +965,22 @@ static int32_t doFindNotSendUpstream(SStreamTask* pTask, SArray* pList, SArray** return 0; } -static int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, SArray* pNotSendList) { +static int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, void* param, SArray* pNotSendList) { const char* id = pTask->id.idStr; SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr; int32_t vgId = pTask->pMeta->vgId; - int32_t code = doChkptStatusCheck(pTask); + int32_t code = doChkptStatusCheck(pTask, param); if (code) { return code; } code = doFindNotSendUpstream(pTask, pList, &pNotSendList); if (code) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s failed to find not send upstream, code:%s, out of tmr, ref:%d", id, tstrerror(code), ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s failed to find not send upstream, code:%s, out of tmr", id, tstrerror(code)); return code; } @@ -993,37 +994,50 @@ static int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, SArray* pNotSen return code; } +static void doCleanup(SStreamTask* pTask, SArray* pList) { + streamMetaReleaseTask(pTask->pMeta, pTask); + taosArrayDestroy(pList); +} + void checkpointTriggerMonitorFn(void* param, void* tmrId) { - SStreamTask* pTask = param; - int32_t vgId = pTask->pMeta->vgId; - int64_t now = taosGetTimestampMs(); - const char* id = pTask->id.idStr; - SArray* pNotSendList = NULL; - SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg int32_t code = 0; int32_t numOfNotSend = 0; + SArray* pNotSendList = NULL; + int64_t taskRefId = *(int64_t*)param; + int64_t now = taosGetTimestampMs(); + + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); + streamTaskFreeRefId(param); + return; + } + int32_t vgId = pTask->pMeta->vgId; + const char* id = pTask->id.idStr; + SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr; if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stError("s-task:%s source task should not start the checkpoint-trigger monitor fn, ref:%d quit", id, ref); - streamMetaReleaseTask(pTask->pMeta, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stError("s-task:%s source task should not start the checkpoint-trigger monitor fn, quit", id); + doCleanup(pTask, pNotSendList); return; } // check the status every 100ms if (streamTaskShouldStop(pTask)) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s vgId:%d quit from monitor checkpoint-trigger, ref:%d", id, vgId, ref); - streamMetaReleaseTask(pTask->pMeta, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s vgId:%d quit from monitor checkpoint-trigger", id, vgId); + doCleanup(pTask, pNotSendList); return; } if (++pTmrInfo->activeCounter < 50) { - streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + streamTmrStart(checkpointTriggerMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor"); + doCleanup(pTask, pNotSendList); return; } @@ -1035,20 +1049,19 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { streamMutexUnlock(&pTask->lock); if (state.state != TASK_STATUS__CK) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s vgId:%d status:%s not in checkpoint status, quit from monitor checkpoint-trigger, ref:%d", id, - vgId, state.name, ref); - streamMetaReleaseTask(pTask->pMeta, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s vgId:%d status:%s not in checkpoint status, quit from monitor checkpoint-trigger", id, + vgId, state.name); + doCleanup(pTask, pNotSendList); return; } streamMutexLock(&pActiveInfo->lock); - code = chkptTriggerRecvMonitorHelper(pTask, pNotSendList); + code = chkptTriggerRecvMonitorHelper(pTask, param, pNotSendList); streamMutexUnlock(&pActiveInfo->lock); if (code != TSDB_CODE_SUCCESS) { - streamMetaReleaseTask(pTask->pMeta, pTask); - taosArrayDestroy(pNotSendList); + doCleanup(pTask, pNotSendList); return; } @@ -1056,15 +1069,14 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { numOfNotSend = taosArrayGetSize(pNotSendList); if (numOfNotSend > 0) { stDebug("s-task:%s start to monitor checkpoint-trigger in 10s", id); - streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + streamTmrStart(checkpointTriggerMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor"); } else { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s all checkpoint-trigger recved, quit from monitor checkpoint-trigger tmr, ref:%d", id, ref); - streamMetaReleaseTask(pTask->pMeta, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s all checkpoint-trigger recved, quit from monitor checkpoint-trigger tmr", id); } - taosArrayDestroy(pNotSendList); + doCleanup(pTask, pNotSendList); } int32_t doSendRetrieveTriggerMsg(SStreamTask* pTask, SArray* pNotSendList) { diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index 58826b2e999..306d6a02390 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -15,6 +15,42 @@ #include "streamInt.h" +static int32_t streamMergedSubmitNew(SStreamMergedSubmit** pSubmit) { + *pSubmit = NULL; + + int32_t code = taosAllocateQitem(sizeof(SStreamMergedSubmit), DEF_QITEM, 0, (void**)pSubmit); + if (code) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + (*pSubmit)->submits = taosArrayInit(0, sizeof(SPackedData)); + if ((*pSubmit)->submits == NULL) { + taosFreeQitem(*pSubmit); + *pSubmit = NULL; + return terrno; + } + + (*pSubmit)->type = STREAM_INPUT__MERGED_SUBMIT; + return TSDB_CODE_SUCCESS; +} + +static int32_t streamMergeSubmit(SStreamMergedSubmit* pMerged, SStreamDataSubmit* pSubmit) { + void* p = taosArrayPush(pMerged->submits, &pSubmit->submit); + if (p == NULL) { + return terrno; + } + + if (pSubmit->ver > pMerged->ver) { + pMerged->ver = pSubmit->ver; + } + return 0; +} + +static void freeItems(void* param) { + SSDataBlock* pBlock = param; + taosArrayDestroy(pBlock->pDataBlock); +} + int32_t createStreamBlockFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg, SStreamDataBlock** pRes) { SStreamDataBlock* pData = NULL; int32_t code = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, pReq->totalLen, (void**)&pData); @@ -179,37 +215,6 @@ void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit) { } } -int32_t streamMergedSubmitNew(SStreamMergedSubmit** pSubmit) { - *pSubmit = NULL; - - int32_t code = taosAllocateQitem(sizeof(SStreamMergedSubmit), DEF_QITEM, 0, (void**)pSubmit); - if (code) { - return TSDB_CODE_OUT_OF_MEMORY; - } - - (*pSubmit)->submits = taosArrayInit(0, sizeof(SPackedData)); - if ((*pSubmit)->submits == NULL) { - taosFreeQitem(*pSubmit); - *pSubmit = NULL; - return terrno; - } - - (*pSubmit)->type = STREAM_INPUT__MERGED_SUBMIT; - return TSDB_CODE_SUCCESS; -} - -int32_t streamMergeSubmit(SStreamMergedSubmit* pMerged, SStreamDataSubmit* pSubmit) { - void* p = taosArrayPush(pMerged->submits, &pSubmit->submit); - if (p == NULL) { - return terrno; - } - - if (pSubmit->ver > pMerged->ver) { - pMerged->ver = pSubmit->ver; - } - return 0; -} - // todo handle memory error int32_t streamQueueMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem, SStreamQueueItem** pRes) { *pRes = NULL; @@ -267,11 +272,6 @@ int32_t streamQueueMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem } } -static void freeItems(void* param) { - SSDataBlock* pBlock = param; - taosArrayDestroy(pBlock->pDataBlock); -} - void streamFreeQitem(SStreamQueueItem* data) { int8_t type = data->type; if (type == STREAM_INPUT__GET_RES) { @@ -306,3 +306,68 @@ void streamFreeQitem(SStreamQueueItem* data) { taosFreeQitem(pBlock); } } + +int32_t streamCreateForcewindowTrigger(SStreamTrigger** pTrigger, int32_t trigger, SInterval* pInterval, STimeWindow* pLatestWindow, const char* id) { + QRY_PARAM_CHECK(pTrigger); + int64_t ts = INT64_MIN; + SStreamTrigger* p = NULL; + + int32_t code = taosAllocateQitem(sizeof(SStreamTrigger), DEF_QITEM, 0, (void**)&p); + if (code) { + return code; + } + + p->type = STREAM_INPUT__GET_RES; + p->pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); + if (p->pBlock == NULL) { + taosFreeQitem(p); + return terrno; + } + + // let's calculate the previous time window + SInterval interval = {.interval = trigger, + .sliding = trigger, + .intervalUnit = pInterval->intervalUnit, + .slidingUnit = pInterval->slidingUnit}; + + ts = taosGetTimestampMs(); + + if (pLatestWindow->skey == INT64_MIN) { + STimeWindow window = getAlignQueryTimeWindow(&interval, ts - trigger); + + p->pBlock->info.window.skey = window.skey; + p->pBlock->info.window.ekey = TMAX(ts, window.ekey); + } else { + int64_t skey = pLatestWindow->skey + trigger; + p->pBlock->info.window.skey = skey; + p->pBlock->info.window.ekey = TMAX(ts, skey + trigger); + } + + p->pBlock->info.type = STREAM_GET_RESULT; + stDebug("s-task:%s force_window_close trigger block generated, window range:%" PRId64 "-%" PRId64, id, + p->pBlock->info.window.skey, p->pBlock->info.window.ekey); + + *pTrigger = p; + return code; +} + +int32_t streamCreateSinkResTrigger(SStreamTrigger** pTrigger) { + QRY_PARAM_CHECK(pTrigger); + SStreamTrigger* p = NULL; + + int32_t code = taosAllocateQitem(sizeof(SStreamTrigger), DEF_QITEM, 0, (void**)&p); + if (code) { + return code; + } + + p->type = STREAM_INPUT__GET_RES; + p->pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); + if (p->pBlock == NULL) { + taosFreeQitem(p); + return terrno; + } + + p->pBlock->info.type = STREAM_GET_ALL; + *pTrigger = p; + return code; +} \ No newline at end of file diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 7e08c348506..820b1045ac7 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -145,7 +145,8 @@ int32_t streamTaskBroadcastRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* r static int32_t buildStreamRetrieveReq(SStreamTask* pTask, const SSDataBlock* pBlock, SStreamRetrieveReq* req) { SRetrieveTableRsp* pRetrieve = NULL; - int32_t len = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; + size_t dataEncodeSize = blockGetEncodeSize(pBlock); + int32_t len = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; pRetrieve = taosMemoryCalloc(1, len); if (pRetrieve == NULL) return terrno; @@ -162,7 +163,7 @@ static int32_t buildStreamRetrieveReq(SStreamTask* pTask, const SSDataBlock* pBl pRetrieve->ekey = htobe64(pBlock->info.window.ekey); pRetrieve->version = htobe64(pBlock->info.version); - int32_t actualLen = blockEncode(pBlock, pRetrieve->data + PAYLOAD_PREFIX_LEN, numOfCols); + int32_t actualLen = blockEncode(pBlock, pRetrieve->data + PAYLOAD_PREFIX_LEN, dataEncodeSize, numOfCols); if (actualLen < 0) { taosMemoryFree(pRetrieve); return terrno; @@ -518,45 +519,66 @@ static void doSendFailedDispatch(SStreamTask* pTask, SDispatchEntry* pEntry, int } } +static void cleanupInMonitor(int32_t taskId, int64_t taskRefId, void* param) { + int32_t ret = taosReleaseRef(streamTaskRefPool, taskRefId); + if (ret) { + stError("s-task:0x%x failed to release task refId:%" PRId64, taskId, taskRefId); + } + streamTaskFreeRefId(param); +} + static void doMonitorDispatchData(void* param, void* tmrId) { - SStreamTask* pTask = param; - const char* id = pTask->id.idStr; - int32_t vgId = pTask->pMeta->vgId; - SDispatchMsgInfo* pMsgInfo = &pTask->msgInfo; - int32_t msgId = pMsgInfo->msgId; int32_t code = 0; int64_t now = taosGetTimestampMs(); bool inDispatch = true; + SStreamTask* pTask = NULL; + int64_t taskRefId = *(int64_t*)param; + const char* id = NULL; + int32_t vgId = -1; + SDispatchMsgInfo* pMsgInfo = NULL; + int32_t msgId = -1; + + pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); + streamTaskFreeRefId(param); + return; + } - stDebug("s-task:%s start monitor dispatch data", id); + id = pTask->id.idStr; + vgId = pTask->pMeta->vgId; + pMsgInfo = &pTask->msgInfo; + msgId = pMsgInfo->msgId; + + stDebug("s-task:%s start to monitor dispatch data", id); if (streamTaskShouldStop(pTask)) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref); + stDebug("s-task:%s should stop, abort from timer", pTask->id.idStr); setNotInDispatchMonitor(pMsgInfo); + cleanupInMonitor(pTask->id.taskId, taskRefId, param); return; } // slave task not handle the dispatch, downstream not ready will break the monitor timer // follower not handle the dispatch rsp if ((pTask->pMeta->role == NODE_ROLE_FOLLOWER) || (pTask->status.downstreamReady != 1)) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stError("s-task:%s vgId:%d follower or downstream not ready, jump out of monitor tmr, ref:%d", id, vgId, ref); + stError("s-task:%s vgId:%d follower or downstream not ready, jump out of monitor tmr", id, vgId); setNotInDispatchMonitor(pMsgInfo); + cleanupInMonitor(pTask->id.taskId, taskRefId, param); return; } streamMutexLock(&pMsgInfo->lock); if (pTask->outputq.status == TASK_OUTPUT_STATUS__NORMAL) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s not in dispatch procedure, abort from timer, ref:%d", pTask->id.idStr, ref); - + stDebug("s-task:%s not in dispatch procedure, abort from timer", pTask->id.idStr); pMsgInfo->inMonitor = 0; inDispatch = false; } + streamMutexUnlock(&pMsgInfo->lock); if (!inDispatch) { + cleanupInMonitor(pTask->id.taskId, taskRefId, param); return; } @@ -564,6 +586,7 @@ static void doMonitorDispatchData(void* param, void* tmrId) { if (numOfFailed == 0) { stDebug("s-task:%s no error occurs, check again in %dms", id, DISPATCH_RETRY_INTERVAL_MS); streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS); + cleanupInMonitor(pTask->id.taskId, taskRefId, param); return; } @@ -628,18 +651,23 @@ static void doMonitorDispatchData(void* param, void* tmrId) { } if (streamTaskShouldStop(pTask)) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref); + stDebug("s-task:%s should stop, abort from timer", pTask->id.idStr); setNotInDispatchMonitor(pMsgInfo); } else { streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS); } + + cleanupInMonitor(pTask->id.taskId, taskRefId, param); } void streamStartMonitorDispatchData(SStreamTask* pTask, int64_t waitDuration) { - int32_t vgId = pTask->pMeta->vgId; - streamTmrStart(doMonitorDispatchData, waitDuration, pTask, streamTimer, &pTask->msgInfo.pRetryTmr, vgId, - "dispatch-monitor"); + int32_t vgId = pTask->pMeta->vgId; + int64_t* pTaskRefId = NULL; + int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(doMonitorDispatchData, waitDuration, pTaskRefId, streamTimer, &pTask->msgInfo.pRetryTmr, vgId, + "dispatch-monitor"); + } } static int32_t doAddDispatchBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, @@ -854,9 +882,9 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) { } else { streamMutexLock(&pTask->msgInfo.lock); if (pTask->msgInfo.inMonitor == 0) { - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s start dispatch monitor tmr in %dms, ref:%d, dispatch code:%s", id, DISPATCH_RETRY_INTERVAL_MS, - ref, tstrerror(code)); +// int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); + stDebug("s-task:%s start dispatch monitor tmr in %dms, dispatch code:%s", id, DISPATCH_RETRY_INTERVAL_MS, + tstrerror(code)); streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS); pTask->msgInfo.inMonitor = 1; } else { @@ -911,31 +939,31 @@ int32_t initCheckpointReadyMsg(SStreamTask* pTask, int32_t upstreamNodeId, int32 return TSDB_CODE_SUCCESS; } -static int32_t doTaskChkptStatusCheck(SStreamTask* pTask, int32_t num) { +static int32_t doTaskChkptStatusCheck(SStreamTask* pTask, void* param, int32_t num) { SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr; const char* id = pTask->id.idStr; int32_t vgId = pTask->pMeta->vgId; if (pTmrInfo->launchChkptId != pActiveInfo->activeId) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); stWarn("s-task:%s vgId:%d ready-msg send tmr launched by previous checkpoint procedure, checkpointId:%" PRId64 - ", quit, ref:%d", - id, vgId, pTmrInfo->launchChkptId, ref); + ", quit", + id, vgId, pTmrInfo->launchChkptId); return -1; } // active checkpoint info is cleared for now if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (num == 0) || (pTask->chkInfo.startTs == 0)) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from readyMsg send tmr, ref:%d", id, vgId, ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from readyMsg send tmr", id, vgId); return -1; } if (taosArrayGetSize(pTask->upstreamInfo.pList) != num) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stWarn("s-task:%s vgId:%d upstream number:%d not equals sent readyMsg:%d, quit from readyMsg send tmr, ref:%d", id, - vgId, (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList), num, ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stWarn("s-task:%s vgId:%d upstream number:%d not equals sent readyMsg:%d, quit from readyMsg send tmr", id, + vgId, (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList), num); return -1; } @@ -1011,7 +1039,7 @@ static void doSendChkptReadyMsg(SStreamTask* pTask, SArray* pNotRspList, int64_t } } -static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, SArray* pNotRspList) { +static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, void* param, SArray* pNotRspList) { SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr; SArray* pList = pActiveInfo->pReadyMsgList; @@ -1021,16 +1049,15 @@ static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, SArray* pNotRspList) const char* id = pTask->id.idStr; int32_t notRsp = 0; - int32_t code = doTaskChkptStatusCheck(pTask, num); + int32_t code = doTaskChkptStatusCheck(pTask, param, num); if (code) { return code; } code = doFindNotConfirmUpstream(&pNotRspList, pList, num, vgId, pTask->info.taskLevel, id); if (code) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stError("s-task:%s failed to find not rsp checkpoint-ready downstream, code:%s, out of tmr, ref:%d", id, - tstrerror(code), ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stError("s-task:%s failed to find not rsp checkpoint-ready downstream, code:%s, out of tmr", id, tstrerror(code)); return code; } @@ -1045,26 +1072,41 @@ static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, SArray* pNotRspList) } static void chkptReadyMsgSendMonitorFn(void* param, void* tmrId) { - SStreamTask* pTask = param; - int32_t vgId = pTask->pMeta->vgId; - const char* id = pTask->id.idStr; - SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; - SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr; SArray* pNotRspList = NULL; int32_t code = 0; int32_t notRsp = 0; + int64_t taskRefId = *(int64_t*)param; + int32_t vgId = -1; + const char* id = NULL; + SActiveCheckpointInfo* pActiveInfo = NULL; + SStreamTmrInfo* pTmrInfo = NULL; + + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); + streamTaskFreeRefId(param); + return; + } + + vgId = pTask->pMeta->vgId; + id = pTask->id.idStr; + pActiveInfo = pTask->chkInfo.pActiveInfo; + pTmrInfo = &pActiveInfo->chkptReadyMsgTmr; // check the status every 100ms if (streamTaskShouldStop(pTask)) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s vgId:%d status:stop, quit from monitor checkpoint-trigger, ref:%d", id, vgId, ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s vgId:%d status:stop, quit from monitor checkpoint-trigger", id, vgId); streamMetaReleaseTask(pTask->pMeta, pTask); + taosArrayDestroy(pNotRspList); return; } if (++pTmrInfo->activeCounter < 50) { - streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + streamTmrStart(chkptReadyMsgSendMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor"); + streamMetaReleaseTask(pTask->pMeta, pTask); + taosArrayDestroy(pNotRspList); return; } @@ -1078,15 +1120,16 @@ static void chkptReadyMsgSendMonitorFn(void* param, void* tmrId) { // 1. check status in the first place if (state.state != TASK_STATUS__CK) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s vgId:%d status:%s not in checkpoint, quit from monitor checkpoint-ready, ref:%d", id, vgId, - state.name, ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s vgId:%d status:%s not in checkpoint, quit from monitor checkpoint-ready", id, vgId, + state.name); streamMetaReleaseTask(pTask->pMeta, pTask); + taosArrayDestroy(pNotRspList); return; } streamMutexLock(&pActiveInfo->lock); - code = chkptReadyMsgSendHelper(pTask, pNotRspList); + code = chkptReadyMsgSendHelper(pTask, param, pNotRspList); streamMutexUnlock(&pActiveInfo->lock); if (code != TSDB_CODE_SUCCESS) { @@ -1098,18 +1141,18 @@ static void chkptReadyMsgSendMonitorFn(void* param, void* tmrId) { notRsp = taosArrayGetSize(pNotRspList); if (notRsp > 0) { // send checkpoint-ready msg again stDebug("s-task:%s start to monitor checkpoint-ready msg recv status in 10s", id); - streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + streamTmrStart(chkptReadyMsgSendMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor"); } else { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); stDebug( "s-task:%s vgId:%d checkpoint-ready msg confirmed by all upstream task(s), clear checkpoint-ready msg and quit " - "from timer, ref:%d", - id, vgId, ref); - // release should be the last execution, since pTask may be destroy after it immidiately. - streamMetaReleaseTask(pTask->pMeta, pTask); + "from timer", + id, vgId); } + // release should be the last execution, since pTask may be destroyed after it immediately. + streamMetaReleaseTask(pTask->pMeta, pTask); taosArrayDestroy(pNotRspList); } @@ -1160,15 +1203,17 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) { int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1); if (old == 0) { - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s start checkpoint-ready monitor in 10s, ref:%d ", pTask->id.idStr, ref); - int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); + stDebug("s-task:%s start checkpoint-ready monitor in 10s", pTask->id.idStr); - streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, - "chkpt-ready-monitor"); + int64_t* pTaskRefId = NULL; + int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTaskRefId, streamTimer, &pTmrInfo->tmrHandle, vgId, + "chkpt-ready-monitor"); - // mark the timer monitor checkpointId - pTmrInfo->launchChkptId = pActiveInfo->activeId; + // mark the timer monitor checkpointId + pTmrInfo->launchChkptId = pActiveInfo->activeId; + } } else { stError("s-task:%s previous checkpoint-ready monitor tmr is set, not start new one", pTask->id.idStr); } @@ -1203,7 +1248,8 @@ int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask) { } int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq) { - int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; + size_t dataEncodeSize = blockGetEncodeSize(pBlock); + int32_t dataStrLen = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; void* buf = taosMemoryCalloc(1, dataStrLen); if (buf == NULL) { return terrno; @@ -1225,7 +1271,7 @@ int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatch int32_t numOfCols = (int32_t)taosArrayGetSize(pBlock->pDataBlock); pRetrieve->numOfCols = htonl(numOfCols); - int32_t actualLen = blockEncode(pBlock, pRetrieve->data + PAYLOAD_PREFIX_LEN, numOfCols); + int32_t actualLen = blockEncode(pBlock, pRetrieve->data + PAYLOAD_PREFIX_LEN, dataEncodeSize, numOfCols); if (actualLen < 0) { taosMemoryFree(buf); return terrno; diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 2e068130712..318720b5b0e 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -154,7 +154,7 @@ int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, int64_t* if ((code = qExecTask(pExecutor, &output, &ts)) < 0) { if (code == TSDB_CODE_QRY_IN_EXEC) { - resetTaskInfo(pExecutor); + qResetTaskInfoCode(pExecutor); } if (code == TSDB_CODE_OUT_OF_MEMORY || code == TSDB_CODE_INVALID_PARA || code == TSDB_CODE_FILE_CORRUPTED) { @@ -188,15 +188,13 @@ int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, int64_t* continue; // checkpoint block not dispatch to downstream tasks } - SSDataBlock block = {0}; + SSDataBlock block = {.info.childId = pTask->info.selfChildId}; code = assignOneDataBlock(&block, output); if (code) { stError("s-task:%s failed to build result block due to out of memory", pTask->id.idStr); continue; } - block.info.childId = pTask->info.selfChildId; - size += blockDataGetSize(output) + sizeof(SSDataBlock) + sizeof(SColumnInfoData) * blockDataGetNumOfCols(&block); numOfBlocks += 1; diff --git a/source/libs/stream/src/streamHb.c b/source/libs/stream/src/streamHb.c index 19391bf7a05..25cb28f77c5 100644 --- a/source/libs/stream/src/streamHb.c +++ b/source/libs/stream/src/streamHb.c @@ -21,7 +21,7 @@ #include "ttimer.h" #include "wal.h" -int32_t streamMetaId = 0; +int32_t streamMetaRefPool = 0; struct SMetaHbInfo { tmr_h hbTmr; @@ -123,17 +123,21 @@ int32_t streamMetaSendHbHelper(SStreamMeta* pMeta) { for(int32_t i = 0; i < numOfTasks; ++i) { SStreamTaskId* pId = taosArrayGet(pMeta->pTaskList, i); - STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; - SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (pTask == NULL) { + STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; + SStreamTask* pTask = NULL; + + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code != 0) { continue; } - if ((*pTask)->info.fillHistory == 1) { + if (pTask->info.fillHistory == 1) { + streamMetaReleaseTask(pMeta, pTask); continue; } - epsetAssign(&epset, &(*pTask)->info.mnodeEpset); + epsetAssign(&epset, &pTask->info.mnodeEpset); + streamMetaReleaseTask(pMeta, pTask); break; } @@ -159,28 +163,30 @@ int32_t streamMetaSendHbHelper(SStreamMeta* pMeta) { for (int32_t i = 0; i < numOfTasks; ++i) { SStreamTaskId* pId = taosArrayGet(pMeta->pTaskList, i); - STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; - SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (pTask == NULL) { + STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; + SStreamTask* pTask = NULL; + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code != 0) { continue; } // not report the status of fill-history task - if ((*pTask)->info.fillHistory == 1) { + if (pTask->info.fillHistory == 1) { + streamMetaReleaseTask(pMeta, pTask); continue; } - streamMutexLock(&(*pTask)->lock); - STaskStatusEntry entry = streamTaskGetStatusEntry(*pTask); - streamMutexUnlock(&(*pTask)->lock); + streamMutexLock(&pTask->lock); + STaskStatusEntry entry = streamTaskGetStatusEntry(pTask); + streamMutexUnlock(&pTask->lock); entry.inputRate = entry.inputQUsed * 100.0 / (2 * STREAM_TASK_QUEUE_CAPACITY_IN_SIZE); - if ((*pTask)->info.taskLevel == TASK_LEVEL__SINK) { - entry.sinkQuota = (*pTask)->outputInfo.pTokenBucket->quotaRate; - entry.sinkDataSize = SIZE_IN_MiB((*pTask)->execInfo.sink.dataSize); + if (pTask->info.taskLevel == TASK_LEVEL__SINK) { + entry.sinkQuota = pTask->outputInfo.pTokenBucket->quotaRate; + entry.sinkDataSize = SIZE_IN_MiB(pTask->execInfo.sink.dataSize); } - SActiveCheckpointInfo* p = (*pTask)->chkInfo.pActiveInfo; + SActiveCheckpointInfo* p = pTask->chkInfo.pActiveInfo; if (p->activeId != 0) { entry.checkpointInfo.failed = (p->failedId >= p->activeId) ? 1 : 0; entry.checkpointInfo.activeId = p->activeId; @@ -188,40 +194,42 @@ int32_t streamMetaSendHbHelper(SStreamMeta* pMeta) { if (entry.checkpointInfo.failed) { stInfo("s-task:%s set kill checkpoint trans in hbMsg, transId:%d, clear the active checkpointInfo", - (*pTask)->id.idStr, p->transId); + pTask->id.idStr, p->transId); - streamMutexLock(&(*pTask)->lock); - streamTaskClearCheckInfo((*pTask), true); - streamMutexUnlock(&(*pTask)->lock); + streamMutexLock(&pTask->lock); + streamTaskClearCheckInfo(pTask, true); + streamMutexUnlock(&pTask->lock); } } - streamMutexLock(&(*pTask)->lock); - entry.checkpointInfo.consensusChkptId = streamTaskCheckIfReqConsenChkptId(*pTask, pMsg->ts); + streamMutexLock(&pTask->lock); + entry.checkpointInfo.consensusChkptId = streamTaskCheckIfReqConsenChkptId(pTask, pMsg->ts); if (entry.checkpointInfo.consensusChkptId) { entry.checkpointInfo.consensusTs = pMsg->ts; } - streamMutexUnlock(&(*pTask)->lock); + streamMutexUnlock(&pTask->lock); - if ((*pTask)->exec.pWalReader != NULL) { - entry.processedVer = walReaderGetCurrentVer((*pTask)->exec.pWalReader) - 1; + if (pTask->exec.pWalReader != NULL) { + entry.processedVer = walReaderGetCurrentVer(pTask->exec.pWalReader) - 1; if (entry.processedVer < 0) { - entry.processedVer = (*pTask)->chkInfo.processedVer; + entry.processedVer = pTask->chkInfo.processedVer; } - walReaderValidVersionRange((*pTask)->exec.pWalReader, &entry.verRange.minVer, &entry.verRange.maxVer); + walReaderValidVersionRange(pTask->exec.pWalReader, &entry.verRange.minVer, &entry.verRange.maxVer); } - addUpdateNodeIntoHbMsg(*pTask, pMsg); + addUpdateNodeIntoHbMsg(pTask, pMsg); p = taosArrayPush(pMsg->pTaskStatus, &entry); if (p == NULL) { - stError("failed to add taskInfo:0x%x in hbMsg, vgId:%d", (*pTask)->id.taskId, pMeta->vgId); + stError("failed to add taskInfo:0x%x in hbMsg, vgId:%d", pTask->id.taskId, pMeta->vgId); } if (!hasMnodeEpset) { - epsetAssign(&epset, &(*pTask)->info.mnodeEpset); + epsetAssign(&epset, &pTask->info.mnodeEpset); hasMnodeEpset = true; } + + streamMetaReleaseTask(pMeta, pTask); } pMsg->numOfTasks = taosArrayGetSize(pMsg->pTaskStatus); @@ -244,9 +252,10 @@ void streamMetaHbToMnode(void* param, void* tmrId) { int32_t vgId = 0; int32_t role = 0; - SStreamMeta* pMeta = taosAcquireRef(streamMetaId, rid); + SStreamMeta* pMeta = taosAcquireRef(streamMetaRefPool, rid); if (pMeta == NULL) { - stError("invalid rid:%" PRId64 " failed to acquired stream-meta", rid); + stError("invalid meta rid:%" PRId64 " failed to acquired stream-meta", rid); +// taosMemoryFree(param); return; } @@ -256,24 +265,26 @@ void streamMetaHbToMnode(void* param, void* tmrId) { // need to stop, stop now if (pMeta->closeFlag) { pMeta->pHbInfo->hbStart = 0; - code = taosReleaseRef(streamMetaId, rid); + code = taosReleaseRef(streamMetaRefPool, rid); if (code == TSDB_CODE_SUCCESS) { stDebug("vgId:%d jump out of meta timer", vgId); } else { stError("vgId:%d jump out of meta timer, failed to release the meta rid:%" PRId64, vgId, rid); } +// taosMemoryFree(param); return; } // not leader not send msg if (pMeta->role != NODE_ROLE_LEADER) { pMeta->pHbInfo->hbStart = 0; - code = taosReleaseRef(streamMetaId, rid); + code = taosReleaseRef(streamMetaRefPool, rid); if (code == TSDB_CODE_SUCCESS) { stInfo("vgId:%d role:%d not leader not send hb to mnode", vgId, role); } else { stError("vgId:%d role:%d not leader not send hb to mnodefailed to release the meta rid:%" PRId64, vgId, role, rid); } +// taosMemoryFree(param); return; } @@ -281,7 +292,7 @@ void streamMetaHbToMnode(void* param, void* tmrId) { streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr, vgId, "meta-hb-tmr"); - code = taosReleaseRef(streamMetaId, rid); + code = taosReleaseRef(streamMetaRefPool, rid); if (code) { stError("vgId:%d in meta timer, failed to release the meta rid:%" PRId64, vgId, rid); } @@ -298,12 +309,13 @@ void streamMetaHbToMnode(void* param, void* tmrId) { if (code) { stError("vgId:%d failed to send hmMsg to mnode, try again in 5s, code:%s", pMeta->vgId, tstrerror(code)); } + streamMetaRUnLock(pMeta); streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr, pMeta->vgId, "meta-hb-tmr"); - code = taosReleaseRef(streamMetaId, rid); + code = taosReleaseRef(streamMetaRefPool, rid); if (code) { stError("vgId:%d in meta timer, failed to release the meta rid:%" PRId64, vgId, rid); } @@ -316,12 +328,13 @@ int32_t createMetaHbInfo(int64_t* pRid, SMetaHbInfo** pRes) { return terrno; } - streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, pRid, streamTimer, &pInfo->hbTmr, 0, "stream-hb"); pInfo->tickCounter = 0; pInfo->msgSendTs = -1; pInfo->hbCount = 0; *pRes = pInfo; + + streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, pRid, streamTimer, &pInfo->hbTmr, 0, "stream-hb"); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 7e9b60b61a8..86f305df60e 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -13,7 +13,6 @@ * along with this program. If not, see . */ -#include "executor.h" #include "streamBackendRocksdb.h" #include "streamInt.h" #include "tmisce.h" @@ -28,6 +27,7 @@ static TdThreadOnce streamMetaModuleInit = PTHREAD_ONCE_INIT; int32_t streamBackendId = 0; int32_t streamBackendCfWrapperId = 0; int32_t taskDbWrapperId = 0; +int32_t streamTaskRefPool = 0; static int32_t streamMetaBegin(SStreamMeta* pMeta); static void streamMetaCloseImpl(void* arg); @@ -41,14 +41,14 @@ SMetaRefMgt gMetaRefMgt; int32_t metaRefMgtInit(); void metaRefMgtCleanup(); -int32_t metaRefMgtAdd(int64_t vgId, int64_t* rid); static void streamMetaEnvInit() { streamBackendId = taosOpenRef(64, streamBackendCleanup); streamBackendCfWrapperId = taosOpenRef(64, streamBackendHandleCleanup); taskDbWrapperId = taosOpenRef(64, taskDbDestroy2); - streamMetaId = taosOpenRef(64, streamMetaCloseImpl); + streamMetaRefPool = taosOpenRef(64, streamMetaCloseImpl); + streamTaskRefPool = taosOpenRef(64, tFreeStreamTask); int32_t code = metaRefMgtInit(); if (code) { @@ -72,7 +72,8 @@ void streamMetaInit() { void streamMetaCleanup() { taosCloseRef(streamBackendId); taosCloseRef(streamBackendCfWrapperId); - taosCloseRef(streamMetaId); + taosCloseRef(streamMetaRefPool); + taosCloseRef(streamTaskRefPool); metaRefMgtCleanup(); streamTimerCleanUp(); @@ -98,16 +99,12 @@ int32_t metaRefMgtInit() { void metaRefMgtCleanup() { void* pIter = taosHashIterate(gMetaRefMgt.pTable, NULL); while (pIter) { - SArray* list = *(SArray**)pIter; - for (int i = 0; i < taosArrayGetSize(list); i++) { - void* rid = taosArrayGetP(list, i); - taosMemoryFree(rid); - } - taosArrayDestroy(list); + int64_t* p = *(int64_t**) pIter; + taosMemoryFree(p); pIter = taosHashIterate(gMetaRefMgt.pTable, pIter); } - taosHashCleanup(gMetaRefMgt.pTable); + taosHashCleanup(gMetaRefMgt.pTable); streamMutexDestroy(&gMetaRefMgt.mutex); } @@ -117,35 +114,32 @@ int32_t metaRefMgtAdd(int64_t vgId, int64_t* rid) { streamMutexLock(&gMetaRefMgt.mutex); - p = taosHashGet(gMetaRefMgt.pTable, &vgId, sizeof(vgId)); + p = taosHashGet(gMetaRefMgt.pTable, &rid, sizeof(rid)); if (p == NULL) { - SArray* pList = taosArrayInit(8, POINTER_BYTES); - if (pList == NULL) { - return terrno; - } - - p = taosArrayPush(pList, &rid); - if (p == NULL) { - return terrno; - } - - code = taosHashPut(gMetaRefMgt.pTable, &vgId, sizeof(vgId), &pList, sizeof(void*)); + code = taosHashPut(gMetaRefMgt.pTable, &rid, sizeof(rid), &rid, sizeof(void*)); if (code) { - stError("vgId:%d failed to put into metaRef table, rid:%" PRId64, (int32_t)vgId, *rid); + stError("vgId:%d failed to put into refId mgt, refId:%" PRId64" %p, code:%s", (int32_t)vgId, *rid, rid, + tstrerror(code)); return code; + } else { // not +// stInfo("add refId:%"PRId64" vgId:%d, %p", *rid, (int32_t)vgId, rid); } } else { - SArray* list = *(SArray**)p; - void* px = taosArrayPush(list, &rid); - if (px == NULL) { - code = terrno; - } + stFatal("try to add refId:%"PRId64" vgId:%d, %p that already added into mgt", *rid, (int32_t) vgId, rid); } streamMutexUnlock(&gMetaRefMgt.mutex); return code; } +void metaRefMgtRemove(int64_t* pRefId) { + streamMutexLock(&gMetaRefMgt.mutex); + + int32_t code = taosHashRemove(gMetaRefMgt.pTable, &pRefId, sizeof(pRefId)); + taosMemoryFree(pRefId); + streamMutexUnlock(&gMetaRefMgt.mutex); +} + int32_t streamMetaOpenTdb(SStreamMeta* pMeta) { if (tdbOpen(pMeta->path, 16 * 1024, 1, &pMeta->db, 0, 0, NULL) < 0) { stError("vgId:%d open file:%s failed, stream meta open failed", pMeta->vgId, pMeta->path); @@ -434,7 +428,6 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, pMeta->closeFlag = false; stInfo("vgId:%d open stream meta succ, latest checkpoint:%" PRId64 ", stage:%" PRId64, vgId, pMeta->chkpId, stage); - pMeta->rid = taosAddRef(streamMetaId, pMeta); // set the attribute when running on Linux OS TdThreadRwlockAttr attr; @@ -452,20 +445,25 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, code = taosThreadRwlockAttrDestroy(&attr); TSDB_CHECK_CODE(code, lino, _err); + code = bkdMgtCreate(tpath, (SBkdMgt**)&pMeta->bkdChkptMgt); + TSDB_CHECK_CODE(code, lino, _err); + + code = taosThreadMutexInit(&pMeta->backendMutex, NULL); + TSDB_CHECK_CODE(code, lino, _err); + + // add refId at the end of initialization function + pMeta->rid = taosAddRef(streamMetaRefPool, pMeta); + int64_t* pRid = taosMemoryMalloc(sizeof(int64_t)); TSDB_CHECK_NULL(pRid, code, lino, _err, terrno); memcpy(pRid, &pMeta->rid, sizeof(pMeta->rid)); + code = metaRefMgtAdd(pMeta->vgId, pRid); TSDB_CHECK_CODE(code, lino, _err); code = createMetaHbInfo(pRid, &pMeta->pHbInfo); - TSDB_CHECK_CODE(code, lino, _err); - - code = bkdMgtCreate(tpath, (SBkdMgt**)&pMeta->bkdChkptMgt); - TSDB_CHECK_CODE(code, lino, _err); - code = taosThreadMutexInit(&pMeta->backendMutex, NULL); TSDB_CHECK_CODE(code, lino, _err); *p = pMeta; @@ -527,17 +525,28 @@ void streamMetaClear(SStreamMeta* pMeta) { // remove all existed tasks in this vnode void* pIter = NULL; while ((pIter = taosHashIterate(pMeta->pTasksMap, pIter)) != NULL) { - SStreamTask* p = *(SStreamTask**)pIter; + int64_t refId = *(int64_t*)pIter; + SStreamTask* p = taosAcquireRef(streamTaskRefPool, refId); + if (p == NULL) { + continue; + } // release the ref by timer if (p->info.delaySchedParam != 0 && p->info.fillHistory == 0) { // one more ref in timer - stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", p->id.idStr, p->refCnt); + stDebug("s-task:%s stop schedTimer", p->id.idStr); streamTmrStop(p->schedInfo.pDelayTimer); p->info.delaySchedParam = 0; - streamMetaReleaseTask(pMeta, p); } - streamMetaReleaseTask(pMeta, p); + int32_t code = taosRemoveRef(streamTaskRefPool, refId); + if (code) { + stError("vgId:%d remove task refId failed, refId:%" PRId64, pMeta->vgId, refId); + } + + code = taosReleaseRef(streamTaskRefPool, refId); + if (code) { + stError("vgId:%d failed to release refId:%" PRId64, pMeta->vgId, refId); + } } if (pMeta->streamBackendRid != 0) { @@ -567,9 +576,9 @@ void streamMetaClose(SStreamMeta* pMeta) { if (pMeta == NULL) { return; } - int32_t code = taosRemoveRef(streamMetaId, pMeta->rid); + int32_t code = taosRemoveRef(streamMetaRefPool, pMeta->rid); if (code) { - stError("vgId:%d failed to remove ref:%" PRId64 ", code:%s", pMeta->vgId, pMeta->rid, tstrerror(code)); + stError("vgId:%d failed to remove meta ref:%" PRId64 ", code:%s", pMeta->vgId, pMeta->rid, tstrerror(code)); } } @@ -656,9 +665,16 @@ int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) { code = tdbTbUpsert(pMeta->pTaskDb, id, STREAM_TASK_KEY_LEN, buf, len, pMeta->txn); if (code != TSDB_CODE_SUCCESS) { code = terrno; - stError("s-task:%s vgId:%d task meta save to disk failed, code:%s", pTask->id.idStr, vgId, tstrerror(terrno)); + stError("s-task:%s vgId:%d refId:%" PRId64 " task meta save to disk failed, remove ref, code:%s", pTask->id.idStr, + vgId, pTask->id.refId, tstrerror(code)); + + int64_t refId = pTask->id.refId; + int32_t ret = taosRemoveRef(streamTaskRefPool, pTask->id.refId); + if (ret != 0) { + stError("s-task:0x%x failed to remove ref, refId:%"PRId64, (int32_t) id[1], refId); + } } else { - stDebug("s-task:%s vgId:%d task meta save to disk", pTask->id.idStr, vgId); + stDebug("s-task:%s vgId:%d refId:%" PRId64 " task meta save to disk", pTask->id.idStr, vgId, pTask->id.refId); } taosMemoryFree(buf); @@ -683,34 +699,54 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa *pAdded = false; int32_t code = 0; + int64_t refId = 0; STaskId id = streamTaskGetTaskId(pTask); void* p = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); + if (p != NULL) { stDebug("s-task:%" PRIx64 " already exist in meta, no need to register", id.taskId); + tFreeStreamTask(pTask); return code; } if ((code = pMeta->buildTaskFn(pMeta->ahandle, pTask, ver)) != 0) { + tFreeStreamTask(pTask); return code; } p = taosArrayPush(pMeta->pTaskList, &pTask->id); if (p == NULL) { stError("s-task:0x%" PRIx64 " failed to register task into meta-list, code: out of memory", id.taskId); + tFreeStreamTask(pTask); return terrno; } - code = taosHashPut(pMeta->pTasksMap, &id, sizeof(id), &pTask, POINTER_BYTES); - if (code) { + pTask->id.refId = refId = taosAddRef(streamTaskRefPool, pTask); + code = taosHashPut(pMeta->pTasksMap, &id, sizeof(id), &pTask->id.refId, sizeof(int64_t)); + if (code) { // todo remove it from task list stError("s-task:0x%" PRIx64 " failed to register task into meta-list, code: out of memory", id.taskId); + + int32_t ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret != 0) { + stError("s-task:0x%x failed to remove ref, refId:%"PRId64, (int32_t) id.taskId, refId); + } return code; } if ((code = streamMetaSaveTask(pMeta, pTask)) != 0) { + int32_t ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret) { + stError("vgId:%d remove task refId failed, refId:%" PRId64, pMeta->vgId, refId); + } return code; } if ((code = streamMetaCommit(pMeta)) != 0) { + int32_t ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret) { + stError("vgId:%d remove task refId failed, refId:%" PRId64, pMeta->vgId, refId); + } + return code; } @@ -733,16 +769,72 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta) { } int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask) { - STaskId id = {.streamId = streamId, .taskId = taskId}; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask == NULL || streamTaskShouldStop(*ppTask)) { - *pTask = NULL; + QRY_PARAM_CHECK(pTask); + STaskId id = {.streamId = streamId, .taskId = taskId}; + int64_t* pTaskRefId = (int64_t*)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); + if (pTaskRefId == NULL) { + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + SStreamTask* p = taosAcquireRef(streamTaskRefPool, *pTaskRefId); + if (p == NULL) { + stDebug("s-task:%x failed to acquire task refId:%"PRId64", may have been destoried", taskId, *pTaskRefId); return TSDB_CODE_STREAM_TASK_NOT_EXIST; } - int32_t ref = atomic_add_fetch_32(&(*ppTask)->refCnt, 1); - stTrace("s-task:%s acquire task, ref:%d", (*ppTask)->id.idStr, ref); - *pTask = *ppTask; + if (p->id.refId != *pTaskRefId) { + stFatal("s-task:%x inconsistent refId, task refId:%" PRId64 " try acquire:%" PRId64, taskId, *pTaskRefId, + p->id.refId); + int32_t ret = taosReleaseRef(streamTaskRefPool, *pTaskRefId); + if (ret) { + stError("s-task:0x%x failed to release task refId:%" PRId64, taskId, *pTaskRefId); + } + + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + if (streamTaskShouldStop(p)) { + stDebug("s-task:%s is stopped, failed to acquire it now", p->id.idStr); + int32_t ret = taosReleaseRef(streamTaskRefPool, *pTaskRefId); + if (ret) { + stError("s-task:0x%x failed to release task refId:%" PRId64, taskId, *pTaskRefId); + } + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + stDebug("s-task:%s acquire task, refId:%" PRId64, p->id.idStr, p->id.refId); + *pTask = p; + return TSDB_CODE_SUCCESS; +} + +int32_t streamMetaAcquireTaskUnsafe(SStreamMeta* pMeta, STaskId* pId, SStreamTask** pTask) { + QRY_PARAM_CHECK(pTask); + int64_t* pTaskRefId = (int64_t*)taosHashGet(pMeta->pTasksMap, pId, sizeof(*pId)); + + if (pTaskRefId == NULL) { + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + SStreamTask* p = taosAcquireRef(streamTaskRefPool, *pTaskRefId); + if (p == NULL) { + stDebug("s-task:%" PRIx64 " failed to acquire task refId:%" PRId64 ", may have been destoried", pId->taskId, + *pTaskRefId); + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + if (p->id.refId != *pTaskRefId) { + stFatal("s-task:%" PRIx64 " inconsistent refId, task refId:%" PRId64 " try acquire:%" PRId64, pId->taskId, + *pTaskRefId, p->id.refId); + int32_t ret = taosReleaseRef(streamTaskRefPool, *pTaskRefId); + if (ret) { + stError("s-task:0x%" PRIx64 " failed to release task refId:%" PRId64, pId->taskId, *pTaskRefId); + } + + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + stDebug("s-task:%s acquire task, refId:%" PRId64, p->id.idStr, p->id.refId); + *pTask = p; return TSDB_CODE_SUCCESS; } @@ -753,28 +845,17 @@ int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t task return code; } -int32_t streamMetaAcquireOneTask(SStreamTask* pTask) { - int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1); - stTrace("s-task:%s acquire task, ref:%d", pTask->id.idStr, ref); - return ref; -} - void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) { if (pTask == NULL) { return; } int32_t taskId = pTask->id.taskId; - int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1); - - // not safe to use the pTask->id.idStr, since pTask may be released by other threads when print logs. - if (ref > 0) { - stTrace("s-task:0x%x release task, ref:%d", taskId, ref); - } else if (ref == 0) { - stTrace("s-task:0x%x all refs are gone, free it", taskId); - tFreeStreamTask(pTask); - } else if (ref < 0) { - stError("task ref is invalid, ref:%d, 0x%x", ref, taskId); + int64_t refId = pTask->id.refId; + stDebug("s-task:0x%x release task, refId:%" PRId64, taskId, pTask->id.refId); + int32_t ret = taosReleaseRef(streamTaskRefPool, pTask->id.refId); + if (ret) { + stError("s-task:0x%x failed to release task refId:%" PRId64, taskId, refId); } } @@ -803,6 +884,14 @@ static int32_t streamTaskSendTransSuccessMsg(SStreamTask* pTask, void* param) { tstrerror(code)); } } + + // let's kill the query procedure within stream, to end it ASAP. + if (pTask->info.taskLevel != TASK_LEVEL__SINK && pTask->exec.pExecutor != NULL) { + code = qKillTask(pTask->exec.pExecutor, TSDB_CODE_SUCCESS); + if (code != TSDB_CODE_SUCCESS) { + stError("s-task:%s failed to kill task related query handle, code:%s", pTask->id.idStr, tstrerror(code)); + } + } return code; } @@ -812,13 +901,10 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t int32_t code = 0; STaskId id = {.streamId = streamId, .taskId = taskId}; - // pre-delete operation streamMetaWLock(pMeta); - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask) { - pTask = *ppTask; - + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code == 0) { // desc the paused task counter if (streamTaskShouldPause(pTask)) { int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1); @@ -830,43 +916,9 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t if (code) { stError("s-task:0x%" PRIx64 " failed to handle dropping event async, code:%s", id.taskId, tstrerror(code)); } - } else { - stDebug("vgId:%d failed to find the task:0x%x, it may be dropped already", vgId, taskId); - streamMetaWUnLock(pMeta); - return 0; - } - streamMetaWUnLock(pMeta); + stDebug("s-task:0x%x vgId:%d set task status:dropping and start to unregister it", taskId, vgId); - stDebug("s-task:0x%x vgId:%d set task status:dropping and start to unregister it", taskId, vgId); - - while (1) { - int32_t timerActive = 0; - - streamMetaRLock(pMeta); - ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask) { - // to make sure check status will not start the check downstream status when we start to check timerActive count. - streamMutexLock(&pTask->taskCheckInfo.checkInfoLock); - timerActive = (*ppTask)->status.timerActive; - streamMutexUnlock(&pTask->taskCheckInfo.checkInfoLock); - } - streamMetaRUnLock(pMeta); - - if (timerActive > 0) { - taosMsleep(100); - stDebug("s-task:0x%" PRIx64 " wait for quit from timer", id.taskId); - } else { - break; - } - } - - // let's do delete of stream task - streamMetaWLock(pMeta); - - ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask) { - pTask = *ppTask; // it is a fill-history task, remove the related stream task's id that points to it if (pTask->info.fillHistory == 0) { int32_t ret = atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1); @@ -884,21 +936,22 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t if (sizeInList != size) { stError("vgId:%d tasks number not consistent in list:%d and map:%d, ", vgId, sizeInList, size); } - streamMetaWUnLock(pMeta); - - int32_t numOfTmr = pTask->status.timerActive; - if (numOfTmr != 0) { - stError("s-task:%s vgId:%d invalid timer Active record:%d, internal error", pTask->id.idStr, vgId, numOfTmr); - } if (pTask->info.delaySchedParam != 0 && pTask->info.fillHistory == 0) { - stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt); + stDebug("s-task:%s stop schedTimer", pTask->id.idStr); streamTmrStop(pTask->schedInfo.pDelayTimer); pTask->info.delaySchedParam = 0; - streamMetaReleaseTask(pMeta, pTask); + } + + + int64_t refId = pTask->id.refId; + int32_t ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret != 0) { + stError("s-task:0x%x failed to remove ref, refId:%"PRId64, (int32_t) id.taskId, refId); } streamMetaReleaseTask(pMeta, pTask); + streamMetaWUnLock(pMeta); } else { stDebug("vgId:%d failed to find the task:0x%x, it may have been dropped already", vgId, taskId); streamMetaWUnLock(pMeta); @@ -1008,13 +1061,13 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { return; } + vgId = pMeta->vgId; pRecycleList = taosArrayInit(4, sizeof(STaskId)); if (pRecycleList == NULL) { stError("vgId:%d failed prepare load all tasks, code:out of memory", vgId); return; } - vgId = pMeta->vgId; stInfo("vgId:%d load stream tasks from meta files", vgId); code = tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL); @@ -1058,9 +1111,9 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { if (pTask->status.taskStatus == TASK_STATUS__DROPPING) { int32_t taskId = pTask->id.taskId; - tFreeStreamTask(pTask); - STaskId id = streamTaskGetTaskId(pTask); + + tFreeStreamTask(pTask); void* px = taosArrayPush(pRecycleList, &id); if (px == NULL) { stError("s-task:0x%x failed record the task into recycle list due to out of memory", taskId); @@ -1096,13 +1149,22 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { continue; } - if (taosHashPut(pMeta->pTasksMap, &id, sizeof(id), &pTask, POINTER_BYTES) != 0) { - stError("s-task:0x%x failed to put into hashTable, code:%s, continue", pTask->id.taskId, tstrerror(terrno)); - void* px = taosArrayPop(pMeta->pTaskList); - tFreeStreamTask(pTask); + pTask->id.refId = taosAddRef(streamTaskRefPool, pTask); + + if (taosHashPut(pMeta->pTasksMap, &id, sizeof(id), &pTask->id.refId, sizeof(int64_t)) != 0) { + int64_t refId = pTask->id.refId; + stError("s-task:0x%x failed to put into hashTable, code:%s, remove task ref, refId:%" PRId64 " continue", + pTask->id.taskId, tstrerror(terrno), refId); + + void* px = taosArrayPop(pMeta->pTaskList); + int32_t ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret != 0) { + stError("s-task:0x%x failed to remove ref, refId:%" PRId64, (int32_t)id.taskId, refId); + } continue; } + stInfo("s-task:0x%x vgId:%d set refId:%"PRId64, (int32_t) id.taskId, vgId, pTask->id.refId); if (pTask->info.fillHistory == 0) { int32_t val = atomic_add_fetch_32(&pMeta->numOfStreamTasks, 1); } @@ -1138,72 +1200,22 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { } } -bool streamMetaTaskInTimer(SStreamMeta* pMeta) { - bool inTimer = false; - streamMetaRLock(pMeta); - - void* pIter = NULL; - while (1) { - pIter = taosHashIterate(pMeta->pTasksMap, pIter); - if (pIter == NULL) { - break; - } - - SStreamTask* pTask = *(SStreamTask**)pIter; - if (pTask->status.timerActive >= 1) { - stDebug("s-task:%s in timer, blocking tasks in vgId:%d restart, set closing again", pTask->id.idStr, pMeta->vgId); - int32_t code = streamTaskStop(pTask); - if (code) { - stError("s-task:%s failed to stop task, code:%s", pTask->id.idStr, tstrerror(code)); - } - inTimer = true; - } - } - - streamMetaRUnLock(pMeta); - return inTimer; -} - void streamMetaNotifyClose(SStreamMeta* pMeta) { int32_t vgId = pMeta->vgId; int64_t startTs = 0; int32_t sendCount = 0; - streamMetaGetHbSendInfo(pMeta->pHbInfo, &startTs, &sendCount); + streamMetaGetHbSendInfo(pMeta->pHbInfo, &startTs, &sendCount); stInfo("vgId:%d notify all stream tasks that current vnode is closing. isLeader:%d startHb:%" PRId64 ", totalHb:%d", vgId, (pMeta->role == NODE_ROLE_LEADER), startTs, sendCount); // wait for the stream meta hb function stopping streamMetaWaitForHbTmrQuit(pMeta); - - streamMetaWLock(pMeta); - pMeta->closeFlag = true; - void* pIter = NULL; - while (1) { - pIter = taosHashIterate(pMeta->pTasksMap, pIter); - if (pIter == NULL) { - break; - } - - SStreamTask* pTask = *(SStreamTask**)pIter; - stDebug("vgId:%d s-task:%s set task closing flag", vgId, pTask->id.idStr); - int32_t code = streamTaskStop(pTask); - if (code) { - stError("vgId:%d failed to stop task:0x%x, code:%s", vgId, pTask->id.taskId, tstrerror(code)); - } - } - - streamMetaWUnLock(pMeta); stDebug("vgId:%d start to check all tasks for closing", vgId); int64_t st = taosGetTimestampMs(); - while (streamMetaTaskInTimer(pMeta)) { - stDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId); - taosMsleep(100); - } - streamMetaRLock(pMeta); SArray* pTaskList = NULL; @@ -1211,14 +1223,34 @@ void streamMetaNotifyClose(SStreamMeta* pMeta) { if (code != TSDB_CODE_SUCCESS) { } - streamMetaRUnLock(pMeta); + int32_t numOfTasks = taosArrayGetSize(pTaskList); + for (int32_t i = 0; i < numOfTasks; ++i) { + SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); + SStreamTask* pTask = NULL; - if (pTaskList != NULL) { - taosArrayDestroy(pTaskList); + code = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask); + if (code != TSDB_CODE_SUCCESS) { + continue; + } + + int64_t refId = pTask->id.refId; + int32_t ret = streamTaskStop(pTask); + if (ret) { + stError("s-task:0x%x failed to stop task, code:%s", pTaskId->taskId, tstrerror(ret)); + } + + streamMetaReleaseTask(pMeta, pTask); + ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret) { + stError("vgId:%d failed to remove task:0x%x, refId:%" PRId64, pMeta->vgId, pTaskId->taskId, refId); + } } - int64_t el = taosGetTimestampMs() - st; - stDebug("vgId:%d all stream tasks are not in timer, continue close, elapsed time:%" PRId64 " ms", pMeta->vgId, el); + taosArrayDestroy(pTaskList); + + double el = (taosGetTimestampMs() - st) / 1000.0; + stDebug("vgId:%d stop all %d task(s) completed, elapsed time:%.2f Sec.", pMeta->vgId, numOfTasks, el); + streamMetaRUnLock(pMeta); } void streamMetaStartHb(SStreamMeta* pMeta) { @@ -1228,12 +1260,12 @@ void streamMetaStartHb(SStreamMeta* pMeta) { return; } + *pRid = pMeta->rid; int32_t code = metaRefMgtAdd(pMeta->vgId, pRid); if (code) { return; } - *pRid = pMeta->rid; streamMetaHbToMnode(pRid, NULL); } @@ -1308,13 +1340,15 @@ bool streamMetaAllTasksReady(const SStreamMeta* pMeta) { for (int32_t i = 0; i < num; ++i) { SStreamTaskId* pId = taosArrayGet(pMeta->pTaskList, i); STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; - SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask == NULL) { - continue; - } + SStreamTask* pTask = NULL; + int32_t code = streamMetaAcquireTaskUnsafe((SStreamMeta*)pMeta, &id, &pTask); - if ((*ppTask)->status.downstreamReady == 0) { - return false; + if (code == 0) { + if (pTask->status.downstreamReady == 0) { + streamMetaReleaseTask((SStreamMeta*)pMeta, pTask); + return false; + } + streamMetaReleaseTask((SStreamMeta*)pMeta, pTask); } } @@ -1331,10 +1365,13 @@ int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta) { for (int32_t i = 0; i < numOfTasks; ++i) { SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i); - - STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId}; - SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - streamTaskResetStatus(*pTask); + STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId}; + SStreamTask* pTask = NULL; + int32_t code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code == 0) { + streamTaskResetStatus(pTask); + streamMetaReleaseTask(pMeta, pTask); + } } return 0; @@ -1343,7 +1380,7 @@ int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta) { void streamMetaAddIntoUpdateTaskList(SStreamMeta* pMeta, SStreamTask* pTask, SStreamTask* pHTask, int32_t transId, int64_t startTs) { const char* id = pTask->id.idStr; - int32_t vgId = pTask->pMeta->vgId; + int32_t vgId = pMeta->vgId; int32_t code = 0; // keep the already updated info diff --git a/source/libs/stream/src/streamMsg.c b/source/libs/stream/src/streamMsg.c index 8250548c328..7c317a4543f 100644 --- a/source/libs/stream/src/streamMsg.c +++ b/source/libs/stream/src/streamMsg.c @@ -613,7 +613,7 @@ int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->ver)); TAOS_CHECK_EXIT(tEncodeI64(pEncoder, pTask->id.streamId)); TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->id.taskId)); - TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.totalLevel)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, pTask->info.trigger)); TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->info.taskLevel)); TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->outputInfo.type)); TAOS_CHECK_EXIT(tEncodeI16(pEncoder, pTask->msgInfo.msgType)); @@ -692,7 +692,7 @@ int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { TAOS_CHECK_EXIT(tDecodeI64(pDecoder, &pTask->id.streamId)); TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->id.taskId)); - TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.totalLevel)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &pTask->info.trigger)); TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->info.taskLevel)); TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pTask->outputInfo.type)); TAOS_CHECK_EXIT(tDecodeI16(pDecoder, &pTask->msgInfo.msgType)); diff --git a/source/libs/stream/src/streamQueue.c b/source/libs/stream/src/streamQueue.c index 6af6ebd0441..20c3e5a6b93 100644 --- a/source/libs/stream/src/streamQueue.c +++ b/source/libs/stream/src/streamQueue.c @@ -304,8 +304,7 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem) // use the local variable to avoid the pItem be freed by other threads, since it has been put into queue already. stDebug("s-task:%s submit enqueue msgLen:%d ver:%" PRId64 ", total in queue:%d, size:%.2fMiB", pTask->id.idStr, msgLen, ver, total, size + SIZE_IN_MiB(msgLen)); - } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE || - type == STREAM_INPUT__REF_DATA_BLOCK) { + } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__REF_DATA_BLOCK) { if (streamQueueIsFull(pTask->inputq.queue)) { double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue)); @@ -324,7 +323,7 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem) double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue)); stDebug("s-task:%s blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr, total, size); } else if (type == STREAM_INPUT__CHECKPOINT || type == STREAM_INPUT__CHECKPOINT_TRIGGER || - type == STREAM_INPUT__TRANS_STATE) { + type == STREAM_INPUT__TRANS_STATE || type == STREAM_INPUT__DATA_RETRIEVE) { int32_t code = taosWriteQitem(pQueue, pItem); if (code != TSDB_CODE_SUCCESS) { streamFreeQitem(pItem); @@ -352,7 +351,7 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem) if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && type != STREAM_INPUT__CHECKPOINT_TRIGGER && (pTask->info.delaySchedParam != 0)) { (void)atomic_val_compare_exchange_8(&pTask->schedInfo.status, TASK_TRIGGER_STATUS__INACTIVE, - TASK_TRIGGER_STATUS__ACTIVE); + TASK_TRIGGER_STATUS__MAY_ACTIVE); stDebug("s-task:%s new data arrived, active the sched-trigger, triggerStatus:%d", pTask->id.idStr, pTask->schedInfo.status); } diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index cdaa603e386..8c79abfd021 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#include "ttime.h" #include "streamInt.h" #include "ttimer.h" @@ -20,14 +21,53 @@ static void streamTaskResumeHelper(void* param, void* tmrId); static void streamTaskSchedHelper(void* param, void* tmrId); void streamSetupScheduleTrigger(SStreamTask* pTask) { - int64_t delaySchema = pTask->info.delaySchedParam; - if (delaySchema != 0 && pTask->info.fillHistory == 0) { - int32_t ref = streamMetaAcquireOneTask(pTask); - stDebug("s-task:%s setup scheduler trigger, ref:%d delay:%" PRId64 " ms", pTask->id.idStr, ref, - pTask->info.delaySchedParam); - - streamTmrStart(streamTaskSchedHelper, (int32_t)delaySchema, pTask, streamTimer, &pTask->schedInfo.pDelayTimer, - pTask->pMeta->vgId, "sched-tmr"); + int64_t delay = 0; + int32_t code = 0; + const char* id = pTask->id.idStr; + int64_t* pTaskRefId = NULL; + + if (pTask->info.fillHistory == 1) { + return; + } + + // dynamic set the trigger & triggerParam for STREAM_TRIGGER_FORCE_WINDOW_CLOSE + if ((pTask->info.trigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) && (pTask->info.taskLevel == TASK_LEVEL__SOURCE)) { + int64_t waterMark = 0; + SInterval interval = {0}; + STimeWindow lastTimeWindow = {0}; + code = qGetStreamIntervalExecInfo(pTask->exec.pExecutor, &waterMark, &interval, &lastTimeWindow); + if (code) { + stError("s-task:%s failed to init scheduler info, code:%s", id, tstrerror(code)); + return; + } + + pTask->status.latestForceWindow = lastTimeWindow; + pTask->info.delaySchedParam = interval.sliding; + pTask->info.watermark = waterMark; + pTask->info.interval = interval; + + // calculate the first start timestamp + int64_t now = taosGetTimestamp(interval.precision); + STimeWindow curWin = getAlignQueryTimeWindow(&pTask->info.interval, now); + delay = (curWin.ekey + 1) - now + waterMark; + + stInfo("s-task:%s extract interval info from executor, wm:%" PRId64 " interval:%" PRId64 " unit:%c sliding:%" PRId64 + " unit:%c, initial start after:%" PRId64, + id, waterMark, interval.interval, interval.intervalUnit, interval.sliding, interval.slidingUnit, delay); + } else { + delay = pTask->info.delaySchedParam; + if (delay == 0) { + return; + } + } + + code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + stDebug("s-task:%s refId:%" PRId64 " enable the scheduler trigger, delay:%" PRId64, pTask->id.idStr, + pTask->id.refId, delay); + + streamTmrStart(streamTaskSchedHelper, (int32_t)delay, pTaskRefId, streamTimer, + &pTask->schedInfo.pDelayTimer, pTask->pMeta->vgId, "sched-tmr"); pTask->schedInfo.status = TASK_TRIGGER_STATUS__INACTIVE; } } @@ -75,102 +115,165 @@ void streamTaskClearSchedIdleInfo(SStreamTask* pTask) { pTask->status.schedIdleT void streamTaskSetIdleInfo(SStreamTask* pTask, int32_t idleTime) { pTask->status.schedIdleTime = idleTime; } void streamTaskResumeInFuture(SStreamTask* pTask) { - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s task should idle, add into timer to retry in %dms, ref:%d", pTask->id.idStr, - pTask->status.schedIdleTime, ref); + stDebug("s-task:%s task should idle, add into timer to retry in %dms", pTask->id.idStr, + pTask->status.schedIdleTime); // add one ref count for task - int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); - streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTask, streamTimer, &pTask->schedInfo.pIdleTimer, - pTask->pMeta->vgId, "resume-task-tmr"); + int64_t* pTaskRefId = NULL; + int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTaskRefId, streamTimer, + &pTask->schedInfo.pIdleTimer, pTask->pMeta->vgId, "resume-task-tmr"); + } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void streamTaskResumeHelper(void* param, void* tmrId) { - SStreamTask* pTask = (SStreamTask*)param; + int32_t code = 0; + int64_t taskRefId = *(int64_t*)param; + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); + streamTaskFreeRefId(param); + return; + } + SStreamTaskId* pId = &pTask->id; SStreamTaskState p = streamTaskGetStatus(pTask); - int32_t code = 0; if (p.state == TASK_STATUS__DROPPING || p.state == TASK_STATUS__STOP) { int8_t status = streamTaskSetSchedStatusInactive(pTask); TAOS_UNUSED(status); - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s status:%s not resume task, ref:%d", pId->idStr, p.name, ref); - + stDebug("s-task:%s status:%s not resume task", pId->idStr, p.name); streamMetaReleaseTask(pTask->pMeta, pTask); + streamTaskFreeRefId(param); return; } code = streamTaskSchedTask(pTask->pMsgCb, pTask->info.nodeId, pId->streamId, pId->taskId, STREAM_EXEC_T_RESUME_TASK); - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); if (code) { - stError("s-task:%s sched task failed, code:%s, ref:%d", pId->idStr, tstrerror(code), ref); + stError("s-task:%s sched task failed, code:%s", pId->idStr, tstrerror(code)); } else { - stDebug("trigger to resume s-task:%s after idled for %dms, ref:%d", pId->idStr, pTask->status.schedIdleTime, ref); + stDebug("trigger to resume s-task:%s after idled for %dms", pId->idStr, pTask->status.schedIdleTime); // release the task ref count streamTaskClearSchedIdleInfo(pTask); - streamMetaReleaseTask(pTask->pMeta, pTask); } + + streamMetaReleaseTask(pTask->pMeta, pTask); + streamTaskFreeRefId(param); } void streamTaskSchedHelper(void* param, void* tmrId) { - SStreamTask* pTask = (void*)param; + int64_t taskRefId = *(int64_t*)param; + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); + streamTaskFreeRefId(param); + return; + } + + stDebug("s-task:%s acquire task, refId:%"PRId64, pTask->id.idStr, pTask->id.refId); + const char* id = pTask->id.idStr; int32_t nextTrigger = (int32_t)pTask->info.delaySchedParam; int32_t vgId = pTask->pMeta->vgId; + int32_t code = 0; int8_t status = atomic_load_8(&pTask->schedInfo.status); stTrace("s-task:%s in scheduler, trigger status:%d, next:%dms", id, status, nextTrigger); - if (streamTaskShouldStop(pTask) || streamTaskShouldPause(pTask)) { + if (streamTaskShouldStop(pTask)) { stDebug("s-task:%s should stop, jump out of schedTimer", id); + streamMetaReleaseTask(pTask->pMeta, pTask); + streamTaskFreeRefId(param); return; } + if (streamTaskShouldPause(pTask)) { + stDebug("s-task:%s is paused, recheck in %.2fs", id, nextTrigger/1000.0); + streamTmrStart(streamTaskSchedHelper, nextTrigger, param, streamTimer, &pTask->schedInfo.pDelayTimer, vgId, + "sched-run-tmr"); + streamMetaReleaseTask(pTask->pMeta, pTask); + return; + } + + if (streamTaskShouldPause(pTask)) { + stDebug("s-task:%s is paused, check in nextTrigger:%ds", id, nextTrigger/1000); + streamTmrStart(streamTaskSchedHelper, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pDelayTimer, vgId, + "sched-run-tmr"); + } + if (streamTaskGetStatus(pTask).state == TASK_STATUS__CK) { stDebug("s-task:%s in checkpoint procedure, not retrieve result, next:%dms", id, nextTrigger); } else { - if (status == TASK_TRIGGER_STATUS__ACTIVE) { - SStreamTrigger* pTrigger; + if (pTask->info.trigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE && pTask->info.taskLevel == TASK_LEVEL__SOURCE) { + SStreamTrigger* pTrigger = NULL; - int32_t code = taosAllocateQitem(sizeof(SStreamTrigger), DEF_QITEM, 0, (void**)&pTrigger); - if (code) { - stError("s-task:%s failed to prepare retrieve data trigger, code:%s, try again in %dms", id, "out of memory", - nextTrigger); - terrno = code; - goto _end; - } + while (1) { + code = streamCreateForcewindowTrigger(&pTrigger, pTask->info.delaySchedParam, &pTask->info.interval, + &pTask->status.latestForceWindow, id); + if (code != 0) { + stError("s-task:%s failed to prepare force window close trigger, code:%s, try again in %dms", id, + tstrerror(code), nextTrigger); + goto _end; + } + + // in the force window close model, status trigger does not matter. So we do not set the trigger model + code = streamTaskPutDataIntoInputQ(pTask, (SStreamQueueItem*)pTrigger); + if (code != TSDB_CODE_SUCCESS) { + stError("s-task:%s failed to put retrieve aggRes block into q, code:%s", pTask->id.idStr, tstrerror(code)); + goto _end; + } + + // check whether the time window gaps exist or not + int64_t now = taosGetTimestamp(pTask->info.interval.precision); + int64_t intervalEndTs = pTrigger->pBlock->info.window.skey + pTask->info.interval.interval; + + // there are gaps, needs to be filled + STimeWindow w = pTrigger->pBlock->info.window; + w.ekey = w.skey + pTask->info.interval.interval; + if (w.skey <= pTask->status.latestForceWindow.skey) { + stFatal("s-task:%s invalid new time window in force_window_close model, skey:%" PRId64 + " should be greater than latestForceWindow skey:%" PRId64, + pTask->id.idStr, w.skey, pTask->status.latestForceWindow.skey); + } - pTrigger->type = STREAM_INPUT__GET_RES; - pTrigger->pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); - if (pTrigger->pBlock == NULL) { - taosFreeQitem(pTrigger); + pTask->status.latestForceWindow = w; + if (intervalEndTs + pTask->info.watermark + pTask->info.interval.interval > now) { + break; + } else { + stDebug("s-task:%s gap exist for force_window_close, current force_window_skey:%" PRId64, id, w.skey); + } + } - stError("s-task:%s failed to build retrieve data trigger, code:out of memory, try again in %dms", id, + } else if (status == TASK_TRIGGER_STATUS__MAY_ACTIVE) { + SStreamTrigger* pTrigger = NULL; + code = streamCreateSinkResTrigger(&pTrigger); + if (code) { + stError("s-task:%s failed to prepare retrieve data trigger, code:%s, try again in %dms", id, tstrerror(code), nextTrigger); goto _end; } atomic_store_8(&pTask->schedInfo.status, TASK_TRIGGER_STATUS__INACTIVE); - pTrigger->pBlock->info.type = STREAM_GET_ALL; code = streamTaskPutDataIntoInputQ(pTask, (SStreamQueueItem*)pTrigger); if (code != TSDB_CODE_SUCCESS) { - stError("s-task:%s failed to put retrieve block into trigger, code:%s", pTask->id.idStr, tstrerror(code)); + stError("s-task:%s failed to put retrieve aggRes block into q, code:%s", pTask->id.idStr, tstrerror(code)); goto _end; } + } - code = streamTrySchedExec(pTask); - if (code != TSDB_CODE_SUCCESS) { - stError("s-task:%s failed to sched to run, wait for next time", pTask->id.idStr); - } + code = streamTrySchedExec(pTask); + if (code != TSDB_CODE_SUCCESS) { + stError("s-task:%s failed to sched to run, wait for next time", pTask->id.idStr); } } _end: - streamTmrStart(streamTaskSchedHelper, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pDelayTimer, vgId, + streamTmrStart(streamTaskSchedHelper, nextTrigger, param, streamTimer, &pTask->schedInfo.pDelayTimer, vgId, "sched-run-tmr"); + streamMetaReleaseTask(pTask->pMeta, pTask); } diff --git a/source/libs/stream/src/streamSessionState.c b/source/libs/stream/src/streamSessionState.c index 536636533fa..d2d7c7b11bf 100644 --- a/source/libs/stream/src/streamSessionState.c +++ b/source/libs/stream/src/streamSessionState.c @@ -20,12 +20,10 @@ #include "tcommon.h" #include "tsimplehash.h" -typedef int (*__session_compare_fn_t)(const SSessionKey* pWin, const void* pDatas, int pos); - -int sessionStateKeyCompare(const SSessionKey* pWin1, const void* pDatas, int pos) { +int sessionStateKeyCompare(const void* pWin1, const void* pDatas, int pos) { SRowBuffPos* pPos2 = taosArrayGetP(pDatas, pos); SSessionKey* pWin2 = (SSessionKey*)pPos2->pKey; - return sessionWinKeyCmpr(pWin1, pWin2); + return sessionWinKeyCmpr((SSessionKey*)pWin1, pWin2); } int sessionStateRangeKeyCompare(const SSessionKey* pWin1, const void* pDatas, int pos) { @@ -79,7 +77,7 @@ bool inSessionWindow(SSessionKey* pKey, TSKEY ts, int64_t gap) { return false; } -SStreamStateCur* createSessionStateCursor(SStreamFileState* pFileState) { +SStreamStateCur* createStateCursor(SStreamFileState* pFileState) { SStreamStateCur* pCur = createStreamStateCursor(); if (pCur == NULL) { return NULL; @@ -536,7 +534,7 @@ static SStreamStateCur* seekKeyCurrentPrev_buff(SStreamFileState* pFileState, co } if (index >= 0) { - pCur = createSessionStateCursor(pFileState); + pCur = createStateCursor(pFileState); if (pCur == NULL) { return NULL; } @@ -580,7 +578,7 @@ static void checkAndTransformCursor(SStreamFileState* pFileState, const uint64_t if (taosArrayGetSize(pWinStates) > 0 && (code == TSDB_CODE_FAILED || sessionStateKeyCompare(&key, pWinStates, 0) >= 0)) { if (!(*ppCur)) { - (*ppCur) = createSessionStateCursor(pFileState); + (*ppCur) = createStateCursor(pFileState); } transformCursor(pFileState, *ppCur); } else if (*ppCur) { @@ -640,7 +638,7 @@ SStreamStateCur* countWinStateSeekKeyPrev(SStreamFileState* pFileState, const SS } pBuffCur->buffIndex = 0; } else if (taosArrayGetSize(pWinStates) > 0) { - pBuffCur = createSessionStateCursor(pFileState); + pBuffCur = createStateCursor(pFileState); if (pBuffCur == NULL) { return NULL; } diff --git a/source/libs/stream/src/streamSliceState.c b/source/libs/stream/src/streamSliceState.c new file mode 100644 index 00000000000..238bff8afca --- /dev/null +++ b/source/libs/stream/src/streamSliceState.c @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tstreamFileState.h" + +#include "query.h" +#include "streamBackendRocksdb.h" +#include "tcommon.h" +#include "tsimplehash.h" + +#define NUM_OF_CACHE_WIN 64 +#define MAX_NUM_OF_CACHE_WIN 128 + +int32_t getHashSortRowBuff(SStreamFileState* pFileState, const SWinKey* pKey, void** pVal, int32_t* pVLen, + int32_t* pWinCode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + code = addRowBuffIfNotExist(pFileState, (void*)pKey, sizeof(SWinKey), pVal, pVLen, pWinCode); + QUERY_CHECK_CODE(code, lino, _end); + + SArray* pWinStates = NULL; + SSHashObj* pSearchBuff = getSearchBuff(pFileState); + code = addArrayBuffIfNotExist(pSearchBuff, pKey->groupId, &pWinStates); + QUERY_CHECK_CODE(code, lino, _end); + + // recover + if (taosArrayGetSize(pWinStates) == 0 && needClearDiskBuff(pFileState)) { + TSKEY ts = getFlushMark(pFileState); + SWinKey start = {.groupId = pKey->groupId, .ts = INT64_MAX}; + void* pState = getStateFileStore(pFileState); + SStreamStateCur* pCur = streamStateFillSeekKeyPrev_rocksdb(pState, &start); + for (int32_t i = 0; i < NUM_OF_CACHE_WIN; i++) { + SWinKey tmpKey = {.groupId = pKey->groupId}; + int32_t tmpRes = streamStateFillGetGroupKVByCur_rocksdb(pCur, &tmpKey, NULL, 0); + if (tmpRes != TSDB_CODE_SUCCESS) { + break; + } + void* tmp = taosArrayPush(pWinStates, &tmpKey); + QUERY_CHECK_NULL(tmp, code, lino, _end, terrno); + streamStateCurPrev_rocksdb(pCur); + } + taosArraySort(pWinStates, winKeyCmprImpl); + streamStateFreeCur(pCur); + } + + code = addSearchItem(pFileState, pWinStates, pKey); + QUERY_CHECK_CODE(code, lino, _end); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t hashSortFileGetFn(SStreamFileState* pFileState, void* pKey, void** data, int32_t* pDataLen) { + void* pState = getStateFileStore(pFileState); + return streamStateFillGet_rocksdb(pState, pKey, data, pDataLen); +} + +int32_t hashSortFileRemoveFn(SStreamFileState* pFileState, const void* pKey) { + void* pState = getStateFileStore(pFileState); + return streamStateFillDel_rocksdb(pState, pKey); +} + +void clearSearchBuff(SStreamFileState* pFileState) { + SSHashObj* pSearchBuff = getSearchBuff(pFileState); + if (!pSearchBuff) { + return; + } + TSKEY flushMark = getFlushMark(pFileState); + void* pIte = NULL; + int32_t iter = 0; + while ((pIte = tSimpleHashIterate(pSearchBuff, pIte, &iter)) != NULL) { + SArray* pWinStates = *((void**)pIte); + int32_t size = taosArrayGetSize(pWinStates); + if (size > 0) { + int64_t gpId = *(int64_t*)tSimpleHashGetKey(pIte, NULL); + SWinKey key = {.ts = flushMark, .groupId = gpId}; + int32_t num = binarySearch(pWinStates, size, &key, fillStateKeyCompare); + if (size > NUM_OF_CACHE_WIN) { + num = TMIN(num, size - NUM_OF_CACHE_WIN); + taosArrayRemoveBatch(pWinStates, 0, num, NULL); + } + } + } +} + +int32_t getStateFromRocksdbByCur(SStreamFileState* pFileState, SStreamStateCur* pCur, SWinKey* pResKey, SRowBuffPos** ppPos, int32_t* pVLen, int32_t* pWinCode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + void* tmpVal = NULL; + int32_t len = 0; + (*pWinCode) = streamStateFillGetGroupKVByCur_rocksdb(pCur, pResKey, (const void**)&tmpVal, &len); + if ((*pWinCode) == TSDB_CODE_SUCCESS) { + SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); + if (!pNewPos || !pNewPos->pRowBuff) { + code = TSDB_CODE_OUT_OF_MEMORY; + QUERY_CHECK_CODE(code, lino, _end); + } + memcpy(pNewPos->pRowBuff, tmpVal, len); + taosMemoryFreeClear(tmpVal); + *pVLen = getRowStateRowSize(pFileState); + (*ppPos) = pNewPos; + } +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t getHashSortNextRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** ppVal, + int32_t* pVLen, int32_t* pWinCode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SArray* pWinStates = NULL; + SSHashObj* pSearchBuff = getSearchBuff(pFileState); + void* pState = getStateFileStore(pFileState); + void** ppBuff = tSimpleHashGet(pSearchBuff, &pKey->groupId, sizeof(uint64_t)); + if (ppBuff) { + pWinStates = (SArray*)(*ppBuff); + } else { + SStreamStateCur* pCur = streamStateFillSeekKeyNext_rocksdb(pState, pKey); + void* tmpVal = NULL; + int32_t len = 0; + (*pWinCode) = streamStateFillGetGroupKVByCur_rocksdb(pCur, pResKey, (const void**)&tmpVal, &len); + if ((*pWinCode) == TSDB_CODE_SUCCESS && ppVal != NULL) { + SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); + if (!pNewPos || !pNewPos->pRowBuff) { + code = TSDB_CODE_OUT_OF_MEMORY; + QUERY_CHECK_CODE(code, lino, _end); + } + memcpy(pNewPos->pRowBuff, tmpVal, len); + *pVLen = getRowStateRowSize(pFileState); + (*ppVal) = pNewPos; + } + taosMemoryFreeClear(tmpVal); + streamStateFreeCur(pCur); + return code; + } + int32_t size = taosArrayGetSize(pWinStates); + int32_t index = binarySearch(pWinStates, size, pKey, fillStateKeyCompare); + if (index == -1) { + SStreamStateCur* pCur = streamStateFillSeekKeyNext_rocksdb(pState, pKey); + void* tmpVal = NULL; + int32_t len = 0; + (*pWinCode) = streamStateFillGetGroupKVByCur_rocksdb(pCur, pResKey, (const void**)&tmpVal, &len); + if ((*pWinCode) == TSDB_CODE_SUCCESS) { + if (ppVal != NULL) { + SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); + if (!pNewPos || !pNewPos->pRowBuff) { + code = TSDB_CODE_OUT_OF_MEMORY; + QUERY_CHECK_CODE(code, lino, _end); + } + memcpy(pNewPos->pRowBuff, tmpVal, len); + *pVLen = getRowStateRowSize(pFileState); + (*ppVal) = pNewPos; + } + taosMemoryFreeClear(tmpVal); + streamStateFreeCur(pCur); + return code; + } + streamStateFreeCur(pCur); + } + + if (index == size - 1) { + (*pWinCode) = TSDB_CODE_FAILED; + return code; + } + SWinKey* pNext = taosArrayGet(pWinStates, index + 1); + *pResKey = *pNext; + if (ppVal == NULL) { + (*pWinCode) = TSDB_CODE_SUCCESS; + return code; + } + return getHashSortRowBuff(pFileState, pResKey, ppVal, pVLen, pWinCode); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t getHashSortPrevRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** ppVal, + int32_t* pVLen, int32_t* pWinCode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SArray* pWinStates = NULL; + SSHashObj* pSearchBuff = getSearchBuff(pFileState); + void* pState = getStateFileStore(pFileState); + void** ppBuff = (void**) tSimpleHashGet(pSearchBuff, &pKey->groupId, sizeof(uint64_t)); + if (ppBuff) { + pWinStates = (SArray*)(*ppBuff); + } else { + qDebug("===stream=== search buff is empty.group id:%" PRId64, pKey->groupId); + SStreamStateCur* pCur = streamStateFillSeekKeyPrev_rocksdb(pState, pKey); + void* tmpVal = NULL; + int32_t len = 0; + (*pWinCode) = streamStateFillGetGroupKVByCur_rocksdb(pCur, pResKey, (const void**)&tmpVal, &len); + if ((*pWinCode) == TSDB_CODE_SUCCESS) { + SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); + if (!pNewPos || !pNewPos->pRowBuff) { + code = TSDB_CODE_OUT_OF_MEMORY; + QUERY_CHECK_CODE(code, lino, _end); + } + memcpy(pNewPos->pRowBuff, tmpVal, len); + taosMemoryFreeClear(tmpVal); + *pVLen = getRowStateRowSize(pFileState); + (*ppVal) = pNewPos; + } + streamStateFreeCur(pCur); + return code; + } + int32_t size = taosArrayGetSize(pWinStates); + int32_t index = binarySearch(pWinStates, size, pKey, fillStateKeyCompare); + if (index >= 0) { + SWinKey* pCurKey = taosArrayGet(pWinStates, index); + if (winKeyCmprImpl(pCurKey, pKey) == 0) { + index--; + } else { + qDebug("%s failed at line %d since do not find cur SWinKey. trigger may be force window close", __func__, __LINE__); + } + } + if (index == -1) { + SStreamStateCur* pCur = streamStateFillSeekKeyPrev_rocksdb(pState, pKey); + void* tmpVal = NULL; + int32_t len = 0; + (*pWinCode) = streamStateFillGetGroupKVByCur_rocksdb(pCur, pResKey, (const void**)&tmpVal, &len); + if ((*pWinCode) == TSDB_CODE_SUCCESS) { + SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); + if (!pNewPos || !pNewPos->pRowBuff) { + code = TSDB_CODE_OUT_OF_MEMORY; + QUERY_CHECK_CODE(code, lino, _end); + } + memcpy(pNewPos->pRowBuff, tmpVal, len); + taosMemoryFreeClear(tmpVal); + *pVLen = getRowStateRowSize(pFileState); + (*ppVal) = pNewPos; + } + streamStateFreeCur(pCur); + return code; + } else { + SWinKey* pPrevKey = taosArrayGet(pWinStates, index); + *pResKey = *pPrevKey; + return getHashSortRowBuff(pFileState, pResKey, ppVal, pVLen, pWinCode); + } + (*pWinCode) = TSDB_CODE_FAILED; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +void deleteHashSortRowBuff(SStreamFileState* pFileState, const SWinKey* pKey) { + SSHashObj* pSearchBuff = getSearchBuff(pFileState); + void** ppBuff = tSimpleHashGet(pSearchBuff, &pKey->groupId, sizeof(uint64_t)); + if (!ppBuff) { + return; + } + SArray* pWinStates = *ppBuff; + int32_t size = taosArrayGetSize(pWinStates); + if (!isFlushedState(pFileState, pKey->ts, 0)) { + // find the first position which is smaller than the pKey + int32_t index = binarySearch(pWinStates, size, pKey, fillStateKeyCompare); + if (index == -1) { + index = 0; + } + SWinKey* pTmpKey = taosArrayGet(pWinStates, index); + if (winKeyCmprImpl(pTmpKey, pKey) == 0) { + taosArrayRemove(pWinStates, index); + } + } +} diff --git a/source/libs/stream/src/streamStartHistory.c b/source/libs/stream/src/streamStartHistory.c index 4d7bf2ba875..54a8929123c 100644 --- a/source/libs/stream/src/streamStartHistory.c +++ b/source/libs/stream/src/streamStartHistory.c @@ -15,6 +15,7 @@ #include "streamInt.h" #include "streamsm.h" +#include "tref.h" #include "trpc.h" #include "ttimer.h" #include "wal.h" @@ -24,7 +25,7 @@ #define SCANHISTORY_IDLE_TICK ((SCANHISTORY_MAX_IDLE_TIME * 1000) / SCANHISTORY_IDLE_TIME_SLICE) typedef struct SLaunchHTaskInfo { - SStreamMeta* pMeta; + int64_t metaRid; STaskId id; STaskId hTaskId; } SLaunchHTaskInfo; @@ -87,21 +88,15 @@ void streamExecScanHistoryInFuture(SStreamTask* pTask, int32_t idleDuration) { numOfTicks = SCANHISTORY_IDLE_TICK; } - // add ref for task - SStreamTask* p = NULL; - int32_t code = streamMetaAcquireTask(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, &p); - if (p == NULL || code != 0) { - stError("s-task:0x%x failed to acquire task, status:%s, not exec scan-history data", pTask->id.taskId, - streamTaskGetStatus(pTask).name); - return; - } - pTask->schedHistoryInfo.numOfTicks = numOfTicks; - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s scan-history resumed in %.2fs, ref:%d", pTask->id.idStr, numOfTicks * 0.1, ref); - streamTmrStart(doExecScanhistoryInFuture, SCANHISTORY_IDLE_TIME_SLICE, pTask, streamTimer, - &pTask->schedHistoryInfo.pTimer, vgId, "history-task"); + stDebug("s-task:%s scan-history resumed in %.2fs", pTask->id.idStr, numOfTicks * 0.1); + int64_t* pTaskRefId = NULL; + int32_t ret = streamTaskAllocRefId(pTask, &pTaskRefId); + if (ret == 0) { + streamTmrStart(doExecScanhistoryInFuture, SCANHISTORY_IDLE_TIME_SLICE, pTaskRefId, streamTimer, + &pTask->schedHistoryInfo.pTimer, vgId, "history-task"); + } } int32_t streamTaskStartScanHistory(SStreamTask* pTask) { @@ -220,42 +215,32 @@ int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) { // Set the execution conditions, including the query time window and the version range streamMetaRLock(pMeta); - SStreamTask** pHTask = taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id)); + SStreamTask* pHisTask = NULL; + code = streamMetaAcquireTaskUnsafe(pMeta, &pTask->hTaskInfo.id, &pHisTask); streamMetaRUnLock(pMeta); - if (pHTask != NULL) { // it is already added into stream meta store. - SStreamTask* pHisTask = NULL; - code = streamMetaAcquireTask(pMeta, hStreamId, hTaskId, &pHisTask); - if (pHisTask == NULL) { - stDebug("s-task:%s failed acquire and start fill-history task, it may have been dropped/stopped", idStr); - code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, false); + if (code == 0) { // it is already added into stream meta store. + if (pHisTask->status.downstreamReady == 1) { // it's ready now, do nothing + stDebug("s-task:%s fill-history task is ready, no need to check downstream", pHisTask->id.idStr); + code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, true); if (code) { stError("s-task:%s failed to record start task status, code:%s", idStr, tstrerror(code)); } - } else { - if (pHisTask->status.downstreamReady == 1) { // it's ready now, do nothing - stDebug("s-task:%s fill-history task is ready, no need to check downstream", pHisTask->id.idStr); - code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, true); - if (code) { - stError("s-task:%s failed to record start task status, code:%s", idStr, tstrerror(code)); - } - } else { // exist, but not ready, continue check downstream task status - if (pHisTask->pBackend == NULL) { - code = pMeta->expandTaskFn(pHisTask); - if (code != TSDB_CODE_SUCCESS) { - streamMetaAddFailedTaskSelf(pHisTask, now); - stError("s-task:%s failed to expand fill-history task, code:%s", pHisTask->id.idStr, tstrerror(code)); - } - } - - if (code == TSDB_CODE_SUCCESS) { - checkFillhistoryTaskStatus(pTask, pHisTask); + } else { // exist, but not ready, continue check downstream task status + if (pHisTask->pBackend == NULL) { + code = pMeta->expandTaskFn(pHisTask); + if (code != TSDB_CODE_SUCCESS) { + streamMetaAddFailedTaskSelf(pHisTask, now); + stError("s-task:%s failed to expand fill-history task, code:%s", pHisTask->id.idStr, tstrerror(code)); } } - streamMetaReleaseTask(pMeta, pHisTask); + if (code == TSDB_CODE_SUCCESS) { + checkFillhistoryTaskStatus(pTask, pHisTask); + } } + streamMetaReleaseTask(pMeta, pHisTask); return code; } else { return launchNotBuiltFillHistoryTask(pTask); @@ -296,14 +281,14 @@ void notRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo, SStreamMeta* pMeta = pTask->pMeta; SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo; - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); +// int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); int32_t code = streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false); if (code) { stError("s-task:%s failed to record the start task status, code:%s", pTask->id.idStr, tstrerror(code)); } else { - stError("s-task:%s max retry:%d reached, quit from retrying launch related fill-history task:0x%x, ref:%d", - pTask->id.idStr, MAX_RETRY_LAUNCH_HISTORY_TASK, (int32_t)pHTaskInfo->id.taskId, ref); + stError("s-task:%s max retry:%d reached, quit from retrying launch related fill-history task:0x%x", + pTask->id.idStr, MAX_RETRY_LAUNCH_HISTORY_TASK, (int32_t)pHTaskInfo->id.taskId); } pHTaskInfo->id.taskId = 0; @@ -315,9 +300,9 @@ void doRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo, i SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo; if (streamTaskShouldStop(pTask)) { // record the failure - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:0x%" PRIx64 " stopped, not launch rel history task:0x%" PRIx64 ", ref:%d", pInfo->id.taskId, - pInfo->hTaskId.taskId, ref); +// int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); + stDebug("s-task:0x%" PRIx64 " stopped, not launch rel history task:0x%" PRIx64, pInfo->id.taskId, + pInfo->hTaskId.taskId); int32_t code = streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false); if (code) { @@ -336,30 +321,60 @@ void doRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo, i } } +static void doCleanup(SStreamTask* pTask, int64_t metaRid, SLaunchHTaskInfo* pInfo) { + SStreamMeta* pMeta = pTask->pMeta; + int32_t vgId = pMeta->vgId; + + streamMetaReleaseTask(pMeta, pTask); + int32_t ret = taosReleaseRef(streamMetaRefPool, metaRid); + if (ret) { + stError("vgId:%d failed to release meta refId:%"PRId64, vgId, metaRid); + } + + if (pInfo != NULL) { + taosMemoryFree(pInfo); + } +} + void tryLaunchHistoryTask(void* param, void* tmrId) { SLaunchHTaskInfo* pInfo = param; - SStreamMeta* pMeta = pInfo->pMeta; + int64_t metaRid = pInfo->metaRid; int64_t now = taosGetTimestampMs(); int32_t code = 0; + SStreamTask* pTask = NULL; + int32_t vgId = 0; + + SStreamMeta* pMeta = taosAcquireRef(streamMetaRefPool, metaRid); + if (pMeta == NULL) { + stError("invalid meta rid:%" PRId64 " failed to acquired stream-meta", metaRid); + taosMemoryFree(pInfo); + return; + } + + vgId = pMeta->vgId; streamMetaWLock(pMeta); - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pInfo->id, sizeof(pInfo->id)); - if (ppTask == NULL || *ppTask == NULL) { + code = streamMetaAcquireTaskUnsafe(pMeta, &pInfo->id, &pTask); + if (code != 0) { stError("s-task:0x%x and rel fill-history task:0x%" PRIx64 " all have been destroyed, not launch", (int32_t)pInfo->id.taskId, pInfo->hTaskId.taskId); streamMetaWUnLock(pMeta); + int32_t ret = taosReleaseRef(streamMetaRefPool, metaRid); + if (ret) { + stError("vgId:%d failed to release meta refId:%"PRId64, vgId, metaRid); + } + // already dropped, no need to set the failure info into the stream task meta. taosMemoryFree(pInfo); return; } - if (streamTaskShouldStop(*ppTask)) { - char* p = streamTaskGetStatus(*ppTask).name; - int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1); - stDebug("s-task:%s status:%s should stop, quit launch fill-history task timer, retry:%d, ref:%d", - (*ppTask)->id.idStr, p, (*ppTask)->hTaskInfo.retryTimes, ref); + if (streamTaskShouldStop(pTask)) { + char* p = streamTaskGetStatus(pTask).name; + stDebug("s-task:%s status:%s should stop, quit launch fill-history task timer, retry:%d", pTask->id.idStr, p, + pTask->hTaskInfo.retryTimes); streamMetaWUnLock(pMeta); @@ -369,77 +384,54 @@ void tryLaunchHistoryTask(void* param, void* tmrId) { stError("s-task:0x%" PRId64 " failed to record the start task status, code:%s", pInfo->hTaskId.taskId, tstrerror(code)); } - taosMemoryFree(pInfo); + + doCleanup(pTask, metaRid, pInfo); return; } - SStreamTask* pTask = NULL; - code = streamMetaAcquireTaskNoLock(pMeta, pInfo->id.streamId, pInfo->id.taskId, &pTask); - if (code != TSDB_CODE_SUCCESS) { - // todo - } streamMetaWUnLock(pMeta); - if (pTask != NULL) { - SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo; + SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo; + pHTaskInfo->tickCount -= 1; + if (pHTaskInfo->tickCount > 0) { + streamTmrStart(tryLaunchHistoryTask, LAUNCH_HTASK_INTERVAL, pInfo, streamTimer, &pHTaskInfo->pTimer, + pTask->pMeta->vgId, " start-history-task-tmr"); + doCleanup(pTask, metaRid, NULL); + return; + } - pHTaskInfo->tickCount -= 1; - if (pHTaskInfo->tickCount > 0) { - streamTmrStart(tryLaunchHistoryTask, LAUNCH_HTASK_INTERVAL, pInfo, streamTimer, &pHTaskInfo->pTimer, - pTask->pMeta->vgId, " start-history-task-tmr"); - streamMetaReleaseTask(pMeta, pTask); + if (pHTaskInfo->retryTimes > MAX_RETRY_LAUNCH_HISTORY_TASK) { + notRetryLaunchFillHistoryTask(pTask, pInfo, now); + } else { // not reach the limitation yet, let's continue retrying launch related fill-history task. + streamTaskSetRetryInfoForLaunch(pHTaskInfo); + + // abort the timer if intend to stop task + SStreamTask* pHTask = NULL; + code = streamMetaAcquireTask(pMeta, pHTaskInfo->id.streamId, pHTaskInfo->id.taskId, &pHTask); + if (pHTask == NULL) { + doRetryLaunchFillHistoryTask(pTask, pInfo, now); + doCleanup(pTask, metaRid, NULL); return; - } - - if (pHTaskInfo->retryTimes > MAX_RETRY_LAUNCH_HISTORY_TASK) { - notRetryLaunchFillHistoryTask(pTask, pInfo, now); - } else { // not reach the limitation yet, let's continue retrying launch related fill-history task. - streamTaskSetRetryInfoForLaunch(pHTaskInfo); - if (pTask->status.timerActive < 1) { - stError("s-task:%s invalid timerActive recorder:%d, abort timer", pTask->id.idStr, pTask->status.timerActive); - return; - } - - // abort the timer if intend to stop task - SStreamTask* pHTask = NULL; - code = streamMetaAcquireTask(pMeta, pHTaskInfo->id.streamId, pHTaskInfo->id.taskId, &pHTask); - if (pHTask == NULL) { - doRetryLaunchFillHistoryTask(pTask, pInfo, now); - streamMetaReleaseTask(pMeta, pTask); - return; - } else { - if (pHTask->pBackend == NULL) { - code = pMeta->expandTaskFn(pHTask); - if (code != TSDB_CODE_SUCCESS) { - streamMetaAddFailedTaskSelf(pHTask, now); - stError("failed to expand fill-history task:%s, code:%s", pHTask->id.idStr, tstrerror(code)); - } - } - - if (code == TSDB_CODE_SUCCESS) { - checkFillhistoryTaskStatus(pTask, pHTask); - // not in timer anymore - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:0x%x fill-history task launch completed, retry times:%d, ref:%d", (int32_t)pInfo->id.taskId, - pHTaskInfo->retryTimes, ref); + } else { + if (pHTask->pBackend == NULL) { + code = pMeta->expandTaskFn(pHTask); + if (code != TSDB_CODE_SUCCESS) { + streamMetaAddFailedTaskSelf(pHTask, now); + stError("failed to expand fill-history task:%s, code:%s", pHTask->id.idStr, tstrerror(code)); } - streamMetaReleaseTask(pMeta, pHTask); } - } - streamMetaReleaseTask(pMeta, pTask); - } else { - code = streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false); - if (code) { - stError("s-task:%s failed to record the start task status, code:%s", pTask->id.idStr, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + checkFillhistoryTaskStatus(pTask, pHTask); + // not in timer anymore + stDebug("s-task:0x%x fill-history task launch completed, retry times:%d", (int32_t)pInfo->id.taskId, + pHTaskInfo->retryTimes); + } + streamMetaReleaseTask(pMeta, pHTask); } - - int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1); - stError("s-task:0x%x rel fill-history task:0x%" PRIx64 " may have been destroyed, not launch, ref:%d", - (int32_t)pInfo->id.taskId, pInfo->hTaskId.taskId, ref); } - taosMemoryFree(pInfo); + doCleanup(pTask, metaRid, pInfo); } int32_t createHTaskLaunchInfo(SStreamMeta* pMeta, STaskId* pTaskId, int64_t hStreamId, int32_t hTaskId, @@ -455,7 +447,7 @@ int32_t createHTaskLaunchInfo(SStreamMeta* pMeta, STaskId* pTaskId, int64_t hStr (*pInfo)->hTaskId.streamId = hStreamId; (*pInfo)->hTaskId.taskId = hTaskId; - (*pInfo)->pMeta = pMeta; + (*pInfo)->metaRid = pMeta->rid; return TSDB_CODE_SUCCESS; } @@ -485,12 +477,10 @@ int32_t launchNotBuiltFillHistoryTask(SStreamTask* pTask) { // check for the timer if (pTask->hTaskInfo.pTimer == NULL) { - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); pTask->hTaskInfo.pTimer = taosTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer); if (pTask->hTaskInfo.pTimer == NULL) { - ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stError("s-task:%s failed to start timer, related fill-history task not launched, ref:%d", idStr, ref); + stError("s-task:%s failed to start timer, related fill-history task not launched", idStr); taosMemoryFree(pInfo); code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, false); @@ -500,18 +490,8 @@ int32_t launchNotBuiltFillHistoryTask(SStreamTask* pTask) { return terrno; } - if (ref < 1) { - stError("s-task:%s invalid timerActive recorder:%d, abort timer", pTask->id.idStr, pTask->status.timerActive); - return TSDB_CODE_STREAM_INTERNAL_ERROR; - } - - stDebug("s-task:%s set timer active flag, ref:%d", idStr, ref); + stDebug("s-task:%s set timer active flag", idStr); } else { // timer exists - if (pTask->status.timerActive < 1) { - stError("s-task:%s invalid timerActive recorder:%d, abort timer", pTask->id.idStr, pTask->status.timerActive); - return TSDB_CODE_STREAM_INTERNAL_ERROR; - } - stDebug("s-task:%s set timer active flag, task timer not null", idStr); streamTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer, &pTask->hTaskInfo.pTimer, pTask->pMeta->vgId, " start-history-task-tmr"); @@ -590,15 +570,22 @@ int32_t streamTaskSetRangeStreamCalc(SStreamTask* pTask) { } void doExecScanhistoryInFuture(void* param, void* tmrId) { - SStreamTask* pTask = param; + int64_t taskRefId = *(int64_t*) param; + + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); + streamTaskFreeRefId(param); + return; + } + pTask->schedHistoryInfo.numOfTicks -= 1; SStreamTaskState p = streamTaskGetStatus(pTask); if (p.state == TASK_STATUS__DROPPING || p.state == TASK_STATUS__STOP) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s status:%s not start scan-history again, ref:%d", pTask->id.idStr, p.name, ref); - + stDebug("s-task:%s status:%s not start scan-history again", pTask->id.idStr, p.name); streamMetaReleaseTask(pTask->pMeta, pTask); + streamTaskFreeRefId(param); return; } @@ -608,16 +595,19 @@ void doExecScanhistoryInFuture(void* param, void* tmrId) { stError("s-task:%s async start history task failed", pTask->id.idStr); } - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s fill-history:%d start scan-history data, out of tmr, ref:%d", pTask->id.idStr, - pTask->info.fillHistory, ref); - - // release the task. - streamMetaReleaseTask(pTask->pMeta, pTask); + stDebug("s-task:%s fill-history:%d start scan-history data, out of tmr", pTask->id.idStr, + pTask->info.fillHistory); } else { - streamTmrStart(doExecScanhistoryInFuture, SCANHISTORY_IDLE_TIME_SLICE, pTask, streamTimer, - &pTask->schedHistoryInfo.pTimer, pTask->pMeta->vgId, " start-history-task-tmr"); + int64_t* pTaskRefId = NULL; + int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(doExecScanhistoryInFuture, SCANHISTORY_IDLE_TIME_SLICE, pTaskRefId, streamTimer, + &pTask->schedHistoryInfo.pTimer, pTask->pMeta->vgId, " start-history-task-tmr"); + } } + + streamMetaReleaseTask(pTask->pMeta, pTask); + streamTaskFreeRefId(param); } int32_t doStartScanHistoryTask(SStreamTask* pTask) { diff --git a/source/libs/stream/src/streamStartTask.c b/source/libs/stream/src/streamStartTask.c index 0858f57414a..ed12687e410 100644 --- a/source/libs/stream/src/streamStartTask.c +++ b/source/libs/stream/src/streamStartTask.c @@ -196,19 +196,17 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3 STaskId id = {.streamId = streamId, .taskId = taskId}; int32_t vgId = pMeta->vgId; bool allRsp = true; + SStreamTask* p = NULL; streamMetaWLock(pMeta); - SStreamTask** p = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (p == NULL) { // task does not exists in current vnode, not record the complete info + int32_t code = streamMetaAcquireTaskUnsafe(pMeta, &id, &p); + if (code != 0) { // task does not exist in current vnode, not record the complete info stError("vgId:%d s-task:0x%x not exists discard the check downstream info", vgId, taskId); streamMetaWUnLock(pMeta); return 0; } - // clear the send consensus-checkpointId flag -// streamMutexLock(&(*p)->lock); -// (*p)->status.sendConsensusChkptId = false; -// streamMutexUnlock(&(*p)->lock); + streamMetaReleaseTask(pMeta, p); if (pStartInfo->startAllTasks != 1) { int64_t el = endTs - startTs; @@ -222,7 +220,7 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3 STaskInitTs initTs = {.start = startTs, .end = endTs, .success = ready}; SHashObj* pDst = ready ? pStartInfo->pReadyTaskSet : pStartInfo->pFailedTaskSet; - int32_t code = taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs)); + code = taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs)); if (code) { if (code == TSDB_CODE_DUP_KEY) { stError("vgId:%d record start task result failed, s-task:0x%" PRIx64 @@ -296,13 +294,14 @@ void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ) { while ((pIter = taosHashIterate(pTaskSet, pIter)) != NULL) { STaskInitTs* pInfo = pIter; void* key = taosHashGetKey(pIter, &keyLen); - - SStreamTask** pTask1 = taosHashGet(pMeta->pTasksMap, key, sizeof(STaskId)); - if (pTask1 == NULL) { - stInfo("s-task:0x%x is dropped already, %s", (int32_t)((STaskId*)key)->taskId, succ ? "success" : "failed"); + SStreamTask* pTask = NULL; + int32_t code = streamMetaAcquireTaskUnsafe(pMeta, key, &pTask); + if (code == 0) { + stInfo("s-task:%s level:%d vgId:%d, init:%" PRId64 ", initEnd:%" PRId64 ", %s", pTask->id.idStr, + pTask->info.taskLevel, vgId, pInfo->start, pInfo->end, pInfo->success ? "success" : "failed"); + streamMetaReleaseTask(pMeta, pTask); } else { - stInfo("s-task:%s level:%d vgId:%d, init:%" PRId64 ", initEnd:%" PRId64 ", %s", (*pTask1)->id.idStr, - (*pTask1)->info.taskLevel, vgId, pInfo->start, pInfo->end, pInfo->success ? "success" : "failed"); + stInfo("s-task:0x%x is dropped already, %s", (int32_t)((STaskId*)key)->taskId, succ ? "success" : "failed"); } } } @@ -356,7 +355,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas // fill-history task can only be launched by related stream tasks. STaskExecStatisInfo* pInfo = &pTask->execInfo; if (pTask->info.fillHistory == 1) { - stError("s-task:0x%x vgId:%d fill-histroy task, not start here", taskId, vgId); + stError("s-task:0x%x vgId:%d fill-history task, not start here", taskId, vgId); streamMetaReleaseTask(pMeta, pTask); return TSDB_CODE_SUCCESS; } @@ -364,6 +363,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas // the start all tasks procedure may happen to start the newly deployed stream task, and results in the // concurrently start this task by two threads. streamMutexLock(&pTask->lock); + SStreamTaskState status = streamTaskGetStatus(pTask); if (status.state != TASK_STATUS__UNINIT) { stError("s-task:0x%x vgId:%d status:%s not uninit status, not start stream task", taskId, vgId, status.name); @@ -380,6 +380,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas if(pTask->status.downstreamReady != 0) { stFatal("s-task:0x%x downstream should be not ready, but it ready here, internal error happens", taskId); + streamMetaReleaseTask(pMeta, pTask); return TSDB_CODE_STREAM_INTERNAL_ERROR; } @@ -396,7 +397,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas streamMutexUnlock(&pTask->lock); } - // concurrently start task may cause the later started task be failed, and also failed to added into meta result. + // concurrently start task may cause the latter started task be failed, and also failed to added into meta result. if (code == TSDB_CODE_SUCCESS) { code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT); if (code != TSDB_CODE_SUCCESS) { @@ -417,8 +418,10 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { streamMetaRLock(pMeta); + SArray* pTaskList = NULL; int32_t num = taosArrayGetSize(pMeta->pTaskList); stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num); + if (num == 0) { stDebug("vgId:%d stop all %d task(s) completed, elapsed time:0 Sec.", pMeta->vgId, num); streamMetaRUnLock(pMeta); @@ -428,14 +431,12 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { int64_t st = taosGetTimestampMs(); // send hb msg to mnode before closing all tasks. - SArray* pTaskList = NULL; int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList); if (code != TSDB_CODE_SUCCESS) { return code; } int32_t numOfTasks = taosArrayGetSize(pTaskList); - for (int32_t i = 0; i < numOfTasks; ++i) { SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); SStreamTask* pTask = NULL; @@ -445,10 +446,12 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { continue; } + int64_t refId = pTask->id.refId; int32_t ret = streamTaskStop(pTask); if (ret) { stError("s-task:0x%x failed to stop task, code:%s", pTaskId->taskId, tstrerror(ret)); } + streamMetaReleaseTask(pMeta, pTask); } @@ -466,6 +469,7 @@ int32_t streamTaskCheckIfReqConsenChkptId(SStreamTask* pTask, int64_t ts) { int32_t vgId = pTask->pMeta->vgId; if (pConChkptInfo->status == TASK_CONSEN_CHKPT_REQ) { + // mark the sending of req consensus checkpoint request. pConChkptInfo->status = TASK_CONSEN_CHKPT_SEND; pConChkptInfo->statusTs = ts; stDebug("s-task:%s vgId:%d set requiring consensus-chkptId in hbMsg, ts:%" PRId64, pTask->id.idStr, @@ -473,6 +477,8 @@ int32_t streamTaskCheckIfReqConsenChkptId(SStreamTask* pTask, int64_t ts) { return 1; } else { int32_t el = (ts - pConChkptInfo->statusTs) / 1000; + + // not recv consensus-checkpoint rsp for 60sec, send it again in hb to mnode if ((pConChkptInfo->status == TASK_CONSEN_CHKPT_SEND) && el > 60) { pConChkptInfo->statusTs = ts; @@ -492,7 +498,7 @@ void streamTaskSetConsenChkptIdRecv(SStreamTask* pTask, int32_t transId, int64_t pInfo->status = TASK_CONSEN_CHKPT_RECV; pInfo->statusTs = ts; - stDebug("s-task:%s set recv consen-checkpointId, transId:%d", pTask->id.idStr, transId); + stInfo("s-task:%s set recv consen-checkpointId, transId:%d", pTask->id.idStr, transId); } void streamTaskSetReqConsenChkptId(SStreamTask* pTask, int64_t ts) { @@ -507,23 +513,24 @@ void streamTaskSetReqConsenChkptId(SStreamTask* pTask, int64_t ts) { } int32_t streamMetaAddFailedTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { - int32_t code = TSDB_CODE_SUCCESS; - int64_t now = taosGetTimestampMs(); - int64_t startTs = 0; - bool hasFillhistoryTask = false; - STaskId hId = {0}; + int32_t code = TSDB_CODE_SUCCESS; + int64_t now = taosGetTimestampMs(); + int64_t startTs = 0; + bool hasFillhistoryTask = false; + STaskId hId = {0}; + STaskId id = {.streamId = streamId, .taskId = taskId}; + SStreamTask* pTask = NULL; stDebug("vgId:%d add start failed task:0x%x", pMeta->vgId, taskId); streamMetaRLock(pMeta); - STaskId id = {.streamId = streamId, .taskId = taskId}; - SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - - if (ppTask != NULL) { - startTs = (*ppTask)->taskCheckInfo.startTs; - hasFillhistoryTask = HAS_RELATED_FILLHISTORY_TASK(*ppTask); - hId = (*ppTask)->hTaskInfo.id; + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code == 0) { + startTs = pTask->taskCheckInfo.startTs; + hasFillhistoryTask = HAS_RELATED_FILLHISTORY_TASK(pTask); + hId = pTask->hTaskInfo.id; + streamMetaReleaseTask(pMeta, pTask); streamMetaRUnLock(pMeta); diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index cfe476540cd..794fc346bff 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -120,7 +120,7 @@ SStreamState* streamStateOpen(const char* path, void* pTask, int64_t streamId, i SStreamTask* pStreamTask = pTask; pState->streamId = streamId; pState->taskId = taskId; - sprintf(pState->pTdbState->idstr, "0x%" PRIx64 "-0x%x", pState->streamId, pState->taskId); + TAOS_UNUSED(tsnprintf(pState->pTdbState->idstr, sizeof(pState->pTdbState->idstr), "0x%" PRIx64 "-0x%x", pState->streamId, pState->taskId)); code = streamTaskSetDb(pStreamTask->pMeta, pTask, pState->pTdbState->idstr); QUERY_CHECK_CODE(code, lino, _end); @@ -130,10 +130,8 @@ SStreamState* streamStateOpen(const char* path, void* pTask, int64_t streamId, i pState->pFileState = NULL; _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT); pState->parNameMap = tSimpleHashInit(1024, hashFn); - if (!pState->parNameMap) { - code = TSDB_CODE_OUT_OF_MEMORY; - QUERY_CHECK_CODE(code, lino, _end); - } + QUERY_CHECK_NULL(pState->parNameMap, code, lino, _end, terrno); + stInfo("open state %p on backend %p 0x%" PRIx64 "-%d succ", pState, pMeta->streamBackend, pState->streamId, pState->taskId); return pState; @@ -205,14 +203,10 @@ int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVa return code; } -// todo refactor -int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { - return 0; - // return streamStatePut_rocksdb(pState, key, value, vLen); -} +int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { return 0; } int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode) { - return getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), pVal, pVLen, pWinCode); + return addRowBuffIfNotExist(pState->pFileState, (void*)key, sizeof(SWinKey), pVal, pVLen, pWinCode); } bool streamStateCheck(SStreamState* pState, const SWinKey* key) { @@ -225,22 +219,36 @@ int32_t streamStateGetByPos(SStreamState* pState, void* pos, void** pVal) { return code; } -// todo refactor void streamStateDel(SStreamState* pState, const SWinKey* key) { deleteRowBuff(pState->pFileState, key, sizeof(SWinKey)); } -// todo refactor int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { return streamStateFillPut_rocksdb(pState, key, value, vLen); } -// todo refactor -int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) { +int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode) { + if (pState->pFileState) { + return getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), pVal, pVLen, pWinCode); + } return streamStateFillGet_rocksdb(pState, key, pVal, pVLen); } -// todo refactor +int32_t streamStateFillAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, + int32_t* pWinCode) { + return getHashSortRowBuff(pState->pFileState, key, pVal, pVLen, pWinCode); +} + +int32_t streamStateFillGetNext(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen, + int32_t* pWinCode) { + return getHashSortNextRow(pState->pFileState, pKey, pResKey, pVal, pVLen, pWinCode); +} + +int32_t streamStateFillGetPrev(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen, + int32_t* pWinCode) { + return getHashSortPrevRow(pState->pFileState, pKey, pResKey, pVal, pVLen, pWinCode); +} + void streamStateFillDel(SStreamState* pState, const SWinKey* key) { int32_t code = streamStateFillDel_rocksdb(pState, key); qTrace("%s at line %d res %d", __func__, __LINE__, code); @@ -280,11 +288,27 @@ int32_t streamStateGetInfo(SStreamState* pState, void* pKey, int32_t keyLen, voi int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen, int32_t* pWinCode) { - return streamStateGet(pState, key, pVal, pVLen, pWinCode); + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + code = streamStateGet(pState, key, pVal, pVLen, pWinCode); + QUERY_CHECK_CODE(code, lino, _end); + + SSHashObj* pSearchBuff = getSearchBuff(pState->pFileState); + if (pSearchBuff != NULL) { + SArray* pWinStates = NULL; + code = addArrayBuffIfNotExist(pSearchBuff, key->groupId, &pWinStates); + QUERY_CHECK_CODE(code, lino, _end); + code = addSearchItem(pState->pFileState, pWinStates, key); + QUERY_CHECK_CODE(code, lino, _end); + } +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; } void streamStateReleaseBuf(SStreamState* pState, void* pVal, bool used) { - // todo refactor if (!pVal) { return; } @@ -309,8 +333,8 @@ int32_t streamStateFillGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const vo return streamStateFillGetKVByCur_rocksdb(pCur, pKey, pVal, pVLen); } -int32_t streamStateGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { - return streamStateGetGroupKVByCur_rocksdb(pCur, pKey, pVal, pVLen); +int32_t streamStateFillGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) { + return streamStateFillGetGroupKVByCur_rocksdb(pCur, pKey, pVal, pVLen); } SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key) { @@ -447,7 +471,6 @@ int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key, int32_t streamStateStateAddIfNotExist(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen, state_key_cmpr_fn fn, void** pVal, int32_t* pVLen, int32_t* pWinCode) { - // todo refactor return getStateWinResultBuff(pState->pFileState, key, pKeyData, keyDataLen, fn, pVal, pVLen, pWinCode); } @@ -552,3 +575,44 @@ int32_t streamStateCountWinAddIfNotExist(SStreamState* pState, SSessionKey* pKey int32_t streamStateCountWinAdd(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen) { return createCountWinResultBuff(pState->pFileState, pKey, winCount, pVal, pVLen); } + +int32_t streamStateGroupPut(SStreamState* pState, int64_t groupId, void* value, int32_t vLen) { + return streamFileStateGroupPut(pState->pFileState, groupId, value, vLen); +} + +SStreamStateCur* streamStateGroupGetCur(SStreamState* pState) { + SStreamStateCur* pCur = createStateCursor(pState->pFileState); + pCur->hashIter = 0; + pCur->pHashData = NULL; + SSHashObj* pMap = getGroupIdCache(pState->pFileState); + pCur->pHashData = tSimpleHashIterate(pMap, pCur->pHashData, &pCur->hashIter); + if (pCur->pHashData == NULL) { + pCur->hashIter = -1; + streamStateParTagSeekKeyNext_rocksdb(pState, INT64_MIN, pCur); + } + return pCur; +} + +void streamStateGroupCurNext(SStreamStateCur* pCur) { + streamFileStateGroupCurNext(pCur); +} + +int32_t streamStateGroupGetKVByCur(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen) { + if (pVal != NULL) { + return -1; + } + return streamFileStateGroupGetKVByCur(pCur, pKey, pVal, pVLen); +} + +void streamStateClearExpiredState(SStreamState* pState) { + clearExpiredState(pState->pFileState); +} + +void streamStateSetFillInfo(SStreamState* pState) { + setFillInfo(pState->pFileState); +} + +int32_t streamStateGetPrev(SStreamState* pState, const SWinKey* pKey, SWinKey* pResKey, void** pVal, int32_t* pVLen, + int32_t* pWinCode) { + return getRowStatePrevRow(pState->pFileState, pKey, pResKey, pVal, pVLen, pWinCode); +} diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index b359cdfc81b..a044859b805 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -103,8 +103,9 @@ static SStreamUpstreamEpInfo* createStreamTaskEpInfo(const SStreamTask* pTask) { return pEpInfo; } -int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam, - SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** p) { +int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int32_t trigger, + int64_t triggerParam, SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, + SStreamTask** p) { *p = NULL; SStreamTask* pTask = (SStreamTask*)taosMemoryCalloc(1, sizeof(SStreamTask)); @@ -120,6 +121,7 @@ int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool pTask->info.taskLevel = taskLevel; pTask->info.fillHistory = fillHistory; + pTask->info.trigger = trigger; pTask->info.delaySchedParam = triggerParam; pTask->subtableWithoutMd5 = subtableWithoutMd5; @@ -211,22 +213,23 @@ int32_t tDecodeStreamTaskId(SDecoder* pDecoder, STaskId* pTaskId) { return 0; } -void tFreeStreamTask(SStreamTask* pTask) { - char* p = NULL; - int32_t taskId = pTask->id.taskId; +void tFreeStreamTask(void* pParam) { + char* p = NULL; + SStreamTask* pTask = pParam; + int32_t taskId = pTask->id.taskId; STaskExecStatisInfo* pStatis = &pTask->execInfo; ETaskStatus status1 = TASK_STATUS__UNINIT; streamMutexLock(&pTask->lock); if (pTask->status.pSM != NULL) { - SStreamTaskState pStatus = streamTaskGetStatus(pTask); - p = pStatus.name; - status1 = pStatus.state; + SStreamTaskState status = streamTaskGetStatus(pTask); + p = status.name; + status1 = status.state; } streamMutexUnlock(&pTask->lock); - stDebug("start to free s-task:0x%x %p, state:%s", taskId, pTask, p); + stDebug("start to free s-task:0x%x %p, state:%s, refId:%" PRId64, taskId, pTask, p, pTask->id.refId); SCheckpointInfo* pCkInfo = &pTask->chkInfo; stDebug("s-task:0x%x task exec summary: create:%" PRId64 ", init:%" PRId64 ", start:%" PRId64 @@ -235,12 +238,6 @@ void tFreeStreamTask(SStreamTask* pTask) { taskId, pStatis->created, pStatis->checkTs, pStatis->readyTs, pStatis->updateCount, pStatis->latestUpdateTs, pCkInfo->checkpointId, pCkInfo->checkpointVer, pCkInfo->nextProcessVer, pStatis->checkpoint); - // remove the ref by timer - while (pTask->status.timerActive > 0) { - stDebug("s-task:%s wait for task stop timer activities, ref:%d", pTask->id.idStr, pTask->status.timerActive); - taosMsleep(100); - } - if (pTask->schedInfo.pDelayTimer != NULL) { streamTmrStop(pTask->schedInfo.pDelayTimer); pTask->schedInfo.pDelayTimer = NULL; @@ -428,8 +425,7 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i return code; } - pTask->refCnt = 1; - + pTask->id.refId = 0; pTask->inputq.status = TASK_INPUT_STATUS__NORMAL; pTask->outputq.status = TASK_OUTPUT_STATUS__NORMAL; @@ -441,7 +437,6 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i } pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE; - pTask->status.timerActive = 0; code = streamCreateStateMachine(pTask); if (pTask->status.pSM == NULL || code != TSDB_CODE_SUCCESS) { @@ -837,28 +832,31 @@ int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask) { int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t resetRelHalt) { int32_t code = 0; SStreamMeta* pMeta = pTask->pMeta; - STaskId sTaskId = {.streamId = pTask->streamTaskId.streamId, .taskId = pTask->streamTaskId.taskId}; + SStreamTask* pStreamTask = NULL; + if (pTask->info.fillHistory == 0) { return code; } - SStreamTask** ppStreamTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &sTaskId, sizeof(sTaskId)); - if (ppStreamTask != NULL) { + code = streamMetaAcquireTaskUnsafe(pMeta, &pTask->streamTaskId, &pStreamTask); + if (code == 0) { stDebug("s-task:%s clear the related stream task:0x%x attr to fill-history task", pTask->id.idStr, - (int32_t)sTaskId.taskId); + (int32_t)pTask->streamTaskId.taskId); - streamMutexLock(&(*ppStreamTask)->lock); - CLEAR_RELATED_FILLHISTORY_TASK((*ppStreamTask)); + streamMutexLock(&(pStreamTask->lock)); + CLEAR_RELATED_FILLHISTORY_TASK(pStreamTask); if (resetRelHalt) { stDebug("s-task:0x%" PRIx64 " set the persistent status attr to be ready, prev:%s, status in sm:%s", - sTaskId.taskId, streamTaskGetStatusStr((*ppStreamTask)->status.taskStatus), - streamTaskGetStatus(*ppStreamTask).name); - (*ppStreamTask)->status.taskStatus = TASK_STATUS__READY; + pTask->streamTaskId.taskId, streamTaskGetStatusStr(pStreamTask->status.taskStatus), + streamTaskGetStatus(pStreamTask).name); + pStreamTask->status.taskStatus = TASK_STATUS__READY; } - code = streamMetaSaveTask(pMeta, *ppStreamTask); - streamMutexUnlock(&(*ppStreamTask)->lock); + code = streamMetaSaveTask(pMeta, pStreamTask); + streamMutexUnlock(&(pStreamTask->lock)); + + streamMetaReleaseTask(pMeta, pStreamTask); } return code; @@ -887,7 +885,7 @@ int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskI } int32_t streamSendChkptReportMsg(SStreamTask* pTask, SCheckpointInfo* pCheckpointInfo, int8_t dropRelHTask) { - int32_t code; + int32_t code = 0; int32_t tlen = 0; int32_t vgId = pTask->pMeta->vgId; const char* id = pTask->id.idStr; @@ -1282,3 +1280,27 @@ const char* streamTaskGetExecType(int32_t type) { return "invalid-exec-type"; } } + +int32_t streamTaskAllocRefId(SStreamTask* pTask, int64_t** pRefId) { + *pRefId = taosMemoryMalloc(sizeof(int64_t)); + if (*pRefId != NULL) { + **pRefId = pTask->id.refId; + int32_t code = metaRefMgtAdd(pTask->pMeta->vgId, *pRefId); + if (code != 0) { + stError("s-task:%s failed to add refId:%" PRId64 " into refId-mgmt, code:%s", pTask->id.idStr, pTask->id.refId, + tstrerror(code)); + } + return code; + } else { + stError("s-task:%s failed to alloc new ref id, code:%s", pTask->id.idStr, tstrerror(terrno)); + return terrno; + } +} + +void streamTaskFreeRefId(int64_t* pRefId) { + if (pRefId == NULL) { + return; + } + + metaRefMgtRemove(pRefId); +} \ No newline at end of file diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c index c3a2742aa20..f995c486889 100644 --- a/source/libs/stream/src/streamTaskSm.c +++ b/source/libs/stream/src/streamTaskSm.c @@ -500,7 +500,9 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even STaskStateTrans* pTrans = pSM->pActiveTrans; if (pTrans == NULL) { ETaskStatus s = pSM->current.state; - + // when trying to finish current event successfully, another event with high priorities, such as dropping/stop, has + // interrupted this procedure, and changed the status after freeing the activeTrans, resulting in the failure of + // processing of current event. if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP && s != TASK_STATUS__UNINIT && s != TASK_STATUS__READY) { stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name, diff --git a/source/libs/stream/src/streamTimer.c b/source/libs/stream/src/streamTimer.c index 0da9acfd1db..848e9c874ee 100644 --- a/source/libs/stream/src/streamTimer.c +++ b/source/libs/stream/src/streamTimer.c @@ -66,15 +66,9 @@ void streamTmrStop(tmr_h tmrId) { } } -int32_t streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, SStreamTask* pTask) { +void streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, void* param) { pInfo->activeCounter = 0; pInfo->launchChkptId = 0; atomic_store_8(&pInfo->isActive, 0); - - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - if (ref < 0) { - stFatal("invalid task timer ref value:%d, %s", ref, pTask->id.idStr); - } - - return ref; + streamTaskFreeRefId(param); } \ No newline at end of file diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c index 60f2294c204..a3cfa00127b 100644 --- a/source/libs/stream/src/streamUpdate.c +++ b/source/libs/stream/src/streamUpdate.c @@ -214,7 +214,6 @@ int32_t updateInfoInit(int64_t interval, int32_t precision, int64_t watermark, b if (pkLen != 0) { pInfo->comparePkRowFn = compareKeyTsAndPk; pInfo->comparePkCol = getKeyComparFunc(pkType, TSDB_ORDER_ASC); - ; } else { pInfo->comparePkRowFn = compareKeyTs; pInfo->comparePkCol = NULL; @@ -442,76 +441,69 @@ void updateInfoDestoryColseWinSBF(SUpdateInfo* pInfo) { pInfo->pCloseWinSBF = NULL; } -int32_t updateInfoSerialize(void* buf, int32_t bufLen, const SUpdateInfo* pInfo, int32_t* pLen) { +int32_t updateInfoSerialize(SEncoder* pEncoder, const SUpdateInfo* pInfo) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; if (!pInfo) { return TSDB_CODE_SUCCESS; } - SEncoder encoder = {0}; - tEncoderInit(&encoder, buf, bufLen); - if (tStartEncode(&encoder) != 0) { - code = TSDB_CODE_FAILED; - QUERY_CHECK_CODE(code, lino, _end); - } - int32_t size = taosArrayGetSize(pInfo->pTsBuckets); - if (tEncodeI32(&encoder, size) < 0) { + if (tEncodeI32(pEncoder, size) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } for (int32_t i = 0; i < size; i++) { TSKEY* pTs = (TSKEY*)taosArrayGet(pInfo->pTsBuckets, i); - if (tEncodeI64(&encoder, *pTs) < 0) { + if (tEncodeI64(pEncoder, *pTs) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } } - if (tEncodeU64(&encoder, pInfo->numBuckets) < 0) { + if (tEncodeU64(pEncoder, pInfo->numBuckets) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } int32_t sBfSize = taosArrayGetSize(pInfo->pTsSBFs); - if (tEncodeI32(&encoder, sBfSize) < 0) { + if (tEncodeI32(pEncoder, sBfSize) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } for (int32_t i = 0; i < sBfSize; i++) { SScalableBf* pSBf = taosArrayGetP(pInfo->pTsSBFs, i); - if (tScalableBfEncode(pSBf, &encoder) < 0) { + if (tScalableBfEncode(pSBf, pEncoder) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } } - if (tEncodeU64(&encoder, pInfo->numSBFs) < 0) { + if (tEncodeU64(pEncoder, pInfo->numSBFs) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } - if (tEncodeI64(&encoder, pInfo->interval) < 0) { + if (tEncodeI64(pEncoder, pInfo->interval) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } - if (tEncodeI64(&encoder, pInfo->watermark) < 0) { + if (tEncodeI64(pEncoder, pInfo->watermark) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } - if (tEncodeI64(&encoder, pInfo->minTS) < 0) { + if (tEncodeI64(pEncoder, pInfo->minTS) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } - if (tScalableBfEncode(pInfo->pCloseWinSBF, &encoder) < 0) { + if (tScalableBfEncode(pInfo->pCloseWinSBF, pEncoder) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } int32_t mapSize = taosHashGetSize(pInfo->pMap); - if (tEncodeI32(&encoder, mapSize) < 0) { + if (tEncodeI32(pEncoder, mapSize) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } @@ -519,60 +511,51 @@ int32_t updateInfoSerialize(void* buf, int32_t bufLen, const SUpdateInfo* pInfo, size_t keyLen = 0; while ((pIte = taosHashIterate(pInfo->pMap, pIte)) != NULL) { void* key = taosHashGetKey(pIte, &keyLen); - if (tEncodeU64(&encoder, *(uint64_t*)key) < 0) { + if (tEncodeU64(pEncoder, *(uint64_t*)key) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } int32_t valueSize = taosHashGetValueSize(pIte); - if (tEncodeBinary(&encoder, (const uint8_t*)pIte, valueSize) < 0) { + if (tEncodeBinary(pEncoder, (const uint8_t*)pIte, valueSize) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } } - if (tEncodeU64(&encoder, pInfo->maxDataVersion) < 0) { + if (tEncodeU64(pEncoder, pInfo->maxDataVersion) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } - if (tEncodeI32(&encoder, pInfo->pkColLen) < 0) { + if (tEncodeI32(pEncoder, pInfo->pkColLen) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } - if (tEncodeI8(&encoder, pInfo->pkColType) < 0) { + if (tEncodeI8(pEncoder, pInfo->pkColType) < 0) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _end); } - tEndEncode(&encoder); - - int32_t tlen = encoder.pos; - *pLen = tlen; - _end: - tEncoderClear(&encoder); if (code != TSDB_CODE_SUCCESS) { uError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); } return code; } -int32_t updateInfoDeserialize(void* buf, int32_t bufLen, SUpdateInfo* pInfo) { +int32_t updateInfoDeserialize(SDecoder* pDeCoder, SUpdateInfo* pInfo) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; QUERY_CHECK_NULL(pInfo, code, lino, _error, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); - SDecoder decoder = {0}; - tDecoderInit(&decoder, buf, bufLen); - if (tStartDecode(&decoder) < 0) return -1; - + int32_t size = 0; - if (tDecodeI32(&decoder, &size) < 0) return -1; + if (tDecodeI32(pDeCoder, &size) < 0) return -1; pInfo->pTsBuckets = taosArrayInit(size, sizeof(TSKEY)); QUERY_CHECK_NULL(pInfo->pTsBuckets, code, lino, _error, terrno); TSKEY ts = INT64_MIN; for (int32_t i = 0; i < size; i++) { - if (tDecodeI64(&decoder, &ts) < 0) return -1; + if (tDecodeI64(pDeCoder, &ts) < 0) return -1; void* tmp = taosArrayPush(pInfo->pTsBuckets, &ts); if (!tmp) { code = terrno; @@ -580,16 +563,16 @@ int32_t updateInfoDeserialize(void* buf, int32_t bufLen, SUpdateInfo* pInfo) { } } - if (tDecodeU64(&decoder, &pInfo->numBuckets) < 0) return -1; + if (tDecodeU64(pDeCoder, &pInfo->numBuckets) < 0) return -1; int32_t sBfSize = 0; - if (tDecodeI32(&decoder, &sBfSize) < 0) return -1; + if (tDecodeI32(pDeCoder, &sBfSize) < 0) return -1; pInfo->pTsSBFs = taosArrayInit(sBfSize, sizeof(void*)); QUERY_CHECK_NULL(pInfo->pTsSBFs, code, lino, _error, terrno); for (int32_t i = 0; i < sBfSize; i++) { SScalableBf* pSBf = NULL; - code = tScalableBfDecode(&decoder, &pSBf); + code = tScalableBfDecode(pDeCoder, &pSBf); QUERY_CHECK_CODE(code, lino, _error); void* tmp = taosArrayPush(pInfo->pTsSBFs, &pSBf); @@ -599,36 +582,36 @@ int32_t updateInfoDeserialize(void* buf, int32_t bufLen, SUpdateInfo* pInfo) { } } - if (tDecodeU64(&decoder, &pInfo->numSBFs) < 0) return -1; - if (tDecodeI64(&decoder, &pInfo->interval) < 0) return -1; - if (tDecodeI64(&decoder, &pInfo->watermark) < 0) return -1; - if (tDecodeI64(&decoder, &pInfo->minTS) < 0) return -1; + if (tDecodeU64(pDeCoder, &pInfo->numSBFs) < 0) return -1; + if (tDecodeI64(pDeCoder, &pInfo->interval) < 0) return -1; + if (tDecodeI64(pDeCoder, &pInfo->watermark) < 0) return -1; + if (tDecodeI64(pDeCoder, &pInfo->minTS) < 0) return -1; - code = tScalableBfDecode(&decoder, &pInfo->pCloseWinSBF); + code = tScalableBfDecode(pDeCoder, &pInfo->pCloseWinSBF); if (code != TSDB_CODE_SUCCESS) { pInfo->pCloseWinSBF = NULL; code = TSDB_CODE_SUCCESS; } int32_t mapSize = 0; - if (tDecodeI32(&decoder, &mapSize) < 0) return -1; + if (tDecodeI32(pDeCoder, &mapSize) < 0) return -1; _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT); pInfo->pMap = taosHashInit(mapSize, hashFn, true, HASH_NO_LOCK); uint64_t uid = 0; void* pVal = NULL; - int32_t valSize = 0; + uint32_t valSize = 0; for (int32_t i = 0; i < mapSize; i++) { - if (tDecodeU64(&decoder, &uid) < 0) return -1; - if (tDecodeBinary(&decoder, (uint8_t**)&pVal, &valSize) < 0) return -1; + if (tDecodeU64(pDeCoder, &uid) < 0) return -1; + if (tDecodeBinary(pDeCoder, (uint8_t**)&pVal, &valSize) < 0) return -1; code = taosHashPut(pInfo->pMap, &uid, sizeof(uint64_t), pVal, valSize); QUERY_CHECK_CODE(code, lino, _error); } QUERY_CHECK_CONDITION((mapSize == taosHashGetSize(pInfo->pMap)), code, lino, _error, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); - if (tDecodeU64(&decoder, &pInfo->maxDataVersion) < 0) return -1; + if (tDecodeU64(pDeCoder, &pInfo->maxDataVersion) < 0) return -1; - if (tDecodeI32(&decoder, &pInfo->pkColLen) < 0) return -1; - if (tDecodeI8(&decoder, &pInfo->pkColType) < 0) return -1; + if (tDecodeI32(pDeCoder, &pInfo->pkColLen) < 0) return -1; + if (tDecodeI8(pDeCoder, &pInfo->pkColType) < 0) return -1; pInfo->pKeyBuff = taosMemoryCalloc(1, sizeof(TSKEY) + sizeof(int64_t) + pInfo->pkColLen); QUERY_CHECK_NULL(pInfo->pKeyBuff, code, lino, _error, terrno); @@ -644,10 +627,6 @@ int32_t updateInfoDeserialize(void* buf, int32_t bufLen, SUpdateInfo* pInfo) { pInfo->comparePkCol = NULL; } - tEndDecode(&decoder); - - tDecoderClear(&decoder); - _error: if (code != TSDB_CODE_SUCCESS) { uError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index 424845e4f2f..dc4ca7c0e5e 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -27,28 +27,36 @@ #define DEFAULT_MAX_STREAM_BUFFER_SIZE (128 * 1024 * 1024) #define MIN_NUM_OF_ROW_BUFF 10240 #define MIN_NUM_OF_RECOVER_ROW_BUFF 128 +#define MIN_NUM_SEARCH_BUCKET 128 +#define MAX_ARRAY_SIZE 1024 +#define MAX_GROUP_ID_NUM 200000 +#define NUM_OF_CACHE_WIN 64 +#define MAX_NUM_OF_CACHE_WIN 128 #define TASK_KEY "streamFileState" #define STREAM_STATE_INFO_NAME "StreamStateCheckPoint" struct SStreamFileState { - SList* usedBuffs; - SList* freeBuffs; - void* rowStateBuff; - void* pFileStore; - int32_t rowSize; - int32_t selectivityRowSize; - int32_t keyLen; - uint64_t preCheckPointVersion; - uint64_t checkPointVersion; - TSKEY maxTs; - TSKEY deleteMark; - TSKEY flushMark; - uint64_t maxRowCount; - uint64_t curRowCount; - GetTsFun getTs; - char* id; - char* cfName; + SList* usedBuffs; + SList* freeBuffs; + void* rowStateBuff; + void* pFileStore; + int32_t rowSize; + int32_t selectivityRowSize; + int32_t keyLen; + uint64_t preCheckPointVersion; + uint64_t checkPointVersion; + TSKEY maxTs; + TSKEY deleteMark; + TSKEY flushMark; + uint64_t maxRowCount; + uint64_t curRowCount; + GetTsFun getTs; + char* id; + char* cfName; + void* searchBuff; + SSHashObj* pGroupIdMap; + bool hasFillCatch; _state_buff_cleanup_fn stateBuffCleanupFn; _state_buff_remove_fn stateBuffRemoveFn; @@ -63,6 +71,11 @@ struct SStreamFileState { typedef SRowBuffPos SRowBuffInfo; +int fillStateKeyCompare(const void* pWin1, const void* pDatas, int pos) { + SWinKey* pWin2 = taosArrayGet(pDatas, pos); + return winKeyCmprImpl((SWinKey*)pWin1, pWin2); +} + int32_t stateHashBuffRemoveFn(void* pBuff, const void* pKey, size_t keyLen) { SRowBuffPos** pos = tSimpleHashGet(pBuff, pKey, keyLen); if (pos) { @@ -90,7 +103,7 @@ int32_t intervalFileRemoveFn(SStreamFileState* pFileState, const void* pKey) { return streamStateDel_rocksdb(pFileState->pFileStore, pKey); } -int32_t intervalFileGetFn(SStreamFileState* pFileState, void* pKey, void* data, int32_t* pDataLen) { +int32_t intervalFileGetFn(SStreamFileState* pFileState, void* pKey, void** data, int32_t* pDataLen) { return streamStateGet_rocksdb(pFileState->pFileStore, pKey, data, pDataLen); } @@ -106,11 +119,22 @@ void* intervalCreateStateKey(SRowBuffPos* pPos, int64_t num) { return pStateKey; } +void* defaultCreateStateKey(SRowBuffPos* pPos, int64_t num) { + SWinKey* pStateKey = taosMemoryCalloc(1, sizeof(SWinKey)); + if (pStateKey == NULL) { + qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno)); + return NULL; + } + SWinKey* pWinKey = pPos->pKey; + *pStateKey = *pWinKey; + return pStateKey; +} + int32_t sessionFileRemoveFn(SStreamFileState* pFileState, const void* pKey) { return streamStateSessionDel_rocksdb(pFileState->pFileStore, pKey); } -int32_t sessionFileGetFn(SStreamFileState* pFileState, void* pKey, void* data, int32_t* pDataLen) { +int32_t sessionFileGetFn(SStreamFileState* pFileState, void* pKey, void** data, int32_t* pDataLen) { return streamStateSessionGet_rocksdb(pFileState->pFileStore, pKey, data, pDataLen); } @@ -150,23 +174,23 @@ int32_t streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, } if (rowSize == 0) { code = TSDB_CODE_INVALID_PARA; - goto _error; + QUERY_CHECK_CODE(code, lino, _end); } SStreamFileState* pFileState = taosMemoryCalloc(1, sizeof(SStreamFileState)); - QUERY_CHECK_NULL(pFileState, code, lino, _error, terrno); + QUERY_CHECK_NULL(pFileState, code, lino, _end, terrno); rowSize += selectRowSize; pFileState->maxRowCount = TMAX((uint64_t)memSize / rowSize, FLUSH_NUM * 2); pFileState->usedBuffs = tdListNew(POINTER_BYTES); - QUERY_CHECK_NULL(pFileState->usedBuffs, code, lino, _error, terrno); + QUERY_CHECK_NULL(pFileState->usedBuffs, code, lino, _end, terrno); pFileState->freeBuffs = tdListNew(POINTER_BYTES); - QUERY_CHECK_NULL(pFileState->freeBuffs, code, lino, _error, terrno); + QUERY_CHECK_NULL(pFileState->freeBuffs, code, lino, _end, terrno); _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); int32_t cap = TMIN(MIN_NUM_OF_ROW_BUFF, pFileState->maxRowCount); - if (type == STREAM_STATE_BUFF_HASH) { + if (type == STREAM_STATE_BUFF_HASH || type == STREAM_STATE_BUFF_HASH_SEARCH) { pFileState->rowStateBuff = tSimpleHashInit(cap, hashFn); pFileState->stateBuffCleanupFn = stateHashBuffCleanupFn; pFileState->stateBuffRemoveFn = stateHashBuffRemoveFn; @@ -176,8 +200,8 @@ int32_t streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, pFileState->stateFileRemoveFn = intervalFileRemoveFn; pFileState->stateFileGetFn = intervalFileGetFn; pFileState->cfName = taosStrdup("state"); - pFileState->stateFunctionGetFn = getRowBuff; - } else { + pFileState->stateFunctionGetFn = addRowBuffIfNotExist; + } else if (type == STREAM_STATE_BUFF_SORT) { pFileState->rowStateBuff = tSimpleHashInit(cap, hashFn); pFileState->stateBuffCleanupFn = sessionWinStateCleanup; pFileState->stateBuffRemoveFn = deleteSessionWinStateBuffFn; @@ -188,9 +212,30 @@ int32_t streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, pFileState->stateFileGetFn = sessionFileGetFn; pFileState->cfName = taosStrdup("sess"); pFileState->stateFunctionGetFn = getSessionRowBuff; + } else if (type == STREAM_STATE_BUFF_HASH_SORT) { + pFileState->rowStateBuff = tSimpleHashInit(cap, hashFn); + pFileState->searchBuff = tSimpleHashInit(MIN_NUM_SEARCH_BUCKET, hashFn); + QUERY_CHECK_NULL(pFileState->searchBuff, code, lino, _end, terrno); + pFileState->stateBuffCleanupFn = stateHashBuffCleanupFn; + pFileState->stateBuffRemoveFn = stateHashBuffRemoveFn; + pFileState->stateBuffRemoveByPosFn = stateHashBuffRemoveByPosFn; + pFileState->stateBuffCreateStateKeyFn = defaultCreateStateKey; + + pFileState->stateFileRemoveFn = hashSortFileRemoveFn; + pFileState->stateFileGetFn = hashSortFileGetFn; + pFileState->cfName = taosStrdup("fill"); + pFileState->stateFunctionGetFn = NULL; + } + + QUERY_CHECK_NULL(pFileState->usedBuffs, code, lino, _end, terrno); + QUERY_CHECK_NULL(pFileState->freeBuffs, code, lino, _end, terrno); + QUERY_CHECK_NULL(pFileState->rowStateBuff, code, lino, _end, terrno); + QUERY_CHECK_NULL(pFileState->cfName, code, lino, _end, terrno); + + if (type == STREAM_STATE_BUFF_HASH_SEARCH) { + pFileState->searchBuff = tSimpleHashInit(MIN_NUM_SEARCH_BUCKET, hashFn); + QUERY_CHECK_NULL(pFileState->searchBuff, code, lino, _end, terrno); } - QUERY_CHECK_NULL(pFileState->rowStateBuff, code, lino, _error, terrno); - QUERY_CHECK_NULL(pFileState->cfName, code, lino, _error, terrno); pFileState->keyLen = keySize; pFileState->rowSize = rowSize; @@ -204,28 +249,34 @@ int32_t streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, pFileState->flushMark = INT64_MIN; pFileState->maxTs = INT64_MIN; pFileState->id = taosStrdup(taskId); - QUERY_CHECK_NULL(pFileState->id, code, lino, _error, terrno); + QUERY_CHECK_NULL(pFileState->id, code, lino, _end, terrno); + + pFileState->pGroupIdMap = tSimpleHashInit(1024, hashFn); + QUERY_CHECK_NULL(pFileState->pGroupIdMap, code, lino, _end, terrno); - // todo(liuyao) optimize - if (type == STREAM_STATE_BUFF_HASH) { + pFileState->hasFillCatch = true; + + if (type == STREAM_STATE_BUFF_HASH || type == STREAM_STATE_BUFF_HASH_SEARCH) { code = recoverSnapshot(pFileState, checkpointId); - } else { + } else if (type == STREAM_STATE_BUFF_SORT) { code = recoverSesssion(pFileState, checkpointId); + } else if (type == STREAM_STATE_BUFF_HASH_SORT) { + code = recoverFillSnapshot(pFileState, checkpointId); } - QUERY_CHECK_CODE(code, lino, _error); + QUERY_CHECK_CODE(code, lino, _end); void* valBuf = NULL; int32_t len = 0; int32_t tmpRes = streamDefaultGet_rocksdb(pFileState->pFileStore, STREAM_STATE_INFO_NAME, &valBuf, &len); if (tmpRes == TSDB_CODE_SUCCESS) { - QUERY_CHECK_CONDITION((len == sizeof(TSKEY)), code, lino, _error, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); + QUERY_CHECK_CONDITION((len == sizeof(TSKEY)), code, lino, _end, TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR); streamFileStateDecode(&pFileState->flushMark, valBuf, len); qDebug("===stream===flushMark read:%" PRId64, pFileState->flushMark); } taosMemoryFreeClear(valBuf); (*ppFileState) = pFileState; -_error: +_end: if (code != TSDB_CODE_SUCCESS) { streamFileStateDestroy(pFileState); qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); @@ -274,6 +325,8 @@ void streamFileStateDestroy(SStreamFileState* pFileState) { tdListFreeP(pFileState->usedBuffs, destroyRowBuffAllPosPtr); tdListFreeP(pFileState->freeBuffs, destroyRowBuff); pFileState->stateBuffCleanupFn(pFileState->rowStateBuff); + sessionWinStateCleanup(pFileState->searchBuff); + tSimpleHashCleanup(pFileState->pGroupIdMap); taosMemoryFree(pFileState); } @@ -321,7 +374,7 @@ void clearExpiredRowBuff(SStreamFileState* pFileState, TSKEY ts, bool all) { } } -int32_t clearFlushedRowBuff(SStreamFileState* pFileState, SStreamSnapshot* pFlushList, uint64_t max) { +int32_t clearFlushedRowBuff(SStreamFileState* pFileState, SStreamSnapshot* pFlushList, uint64_t max, bool all) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; uint64_t i = 0; @@ -331,16 +384,21 @@ int32_t clearFlushedRowBuff(SStreamFileState* pFileState, SStreamSnapshot* pFlus SListNode* pNode = NULL; while ((pNode = tdListNext(&iter)) != NULL && i < max) { SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data; - if (isFlushedState(pFileState, pFileState->getTs(pPos->pKey), 0) && !pPos->beUsed) { - code = tdListAppend(pFlushList, &pPos); - QUERY_CHECK_CODE(code, lino, _end); + if (isFlushedState(pFileState, pFileState->getTs(pPos->pKey), 0)) { + if (all || !pPos->beUsed) { + if (all && !pPos->pRowBuff) { + continue; + } + code = tdListAppend(pFlushList, &pPos); + QUERY_CHECK_CODE(code, lino, _end); - pFileState->flushMark = TMAX(pFileState->flushMark, pFileState->getTs(pPos->pKey)); - pFileState->stateBuffRemoveByPosFn(pFileState, pPos); - SListNode* tmp = tdListPopNode(pFileState->usedBuffs, pNode); - taosMemoryFreeClear(tmp); - if (pPos->pRowBuff) { - i++; + pFileState->flushMark = TMAX(pFileState->flushMark, pFileState->getTs(pPos->pKey)); + pFileState->stateBuffRemoveByPosFn(pFileState, pPos); + SListNode* tmp = tdListPopNode(pFileState->usedBuffs, pNode); + taosMemoryFreeClear(tmp); + if (pPos->pRowBuff) { + i++; + } } } } @@ -411,7 +469,7 @@ int32_t flushRowBuff(SStreamFileState* pFileState) { uint64_t num = (uint64_t)(pFileState->curRowCount * FLUSH_RATIO); num = TMAX(num, FLUSH_NUM); - code = clearFlushedRowBuff(pFileState, pFlushList, num); + code = clearFlushedRowBuff(pFileState, pFlushList, num, false); QUERY_CHECK_CODE(code, lino, _end); if (isListEmpty(pFlushList)) { @@ -424,6 +482,11 @@ int32_t flushRowBuff(SStreamFileState* pFileState) { } } + if (pFileState->searchBuff) { + code = clearFlushedRowBuff(pFileState, pFlushList, pFileState->curRowCount, true); + QUERY_CHECK_CODE(code, lino, _end); + } + flushSnapshot(pFileState, pFlushList, false); SListIter fIter = {0}; @@ -542,18 +605,20 @@ SRowBuffPos* getNewRowPosForWrite(SStreamFileState* pFileState) { return NULL; } -int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen, - int32_t* pWinCode) { +int32_t addRowBuffIfNotExist(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen, + int32_t* pWinCode) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; (*pWinCode) = TSDB_CODE_SUCCESS; pFileState->maxTs = TMAX(pFileState->maxTs, pFileState->getTs(pKey)); SRowBuffPos** pos = tSimpleHashGet(pFileState->rowStateBuff, pKey, keyLen); if (pos) { - *pVLen = pFileState->rowSize; - *pVal = *pos; - (*pos)->beUsed = true; - (*pos)->beFlushed = false; + if (pVal != NULL) { + *pVLen = pFileState->rowSize; + *pVal = *pos; + (*pos)->beUsed = true; + (*pos)->beFlushed = false; + } goto _end; } SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); @@ -569,7 +634,7 @@ int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, voi if (!isDeteled(pFileState, ts) && isFlushedState(pFileState, ts, 0)) { int32_t len = 0; void* p = NULL; - (*pWinCode) = streamStateGet_rocksdb(pFileState->pFileStore, pKey, &p, &len); + (*pWinCode) = pFileState->stateFileGetFn(pFileState, pKey, &p, &len); qDebug("===stream===get %" PRId64 " from disc, res %d", ts, (*pWinCode)); if ((*pWinCode) == TSDB_CODE_SUCCESS) { memcpy(pNewPos->pRowBuff, p, len); @@ -597,11 +662,17 @@ void deleteRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t keyLe qTrace("%s at line %d res:%d", __func__, __LINE__, code_buff); int32_t code_file = pFileState->stateFileRemoveFn(pFileState, pKey); qTrace("%s at line %d res:%d", __func__, __LINE__, code_file); + if (pFileState->searchBuff != NULL) { + deleteHashSortRowBuff(pFileState, pKey); + } } int32_t resetRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t keyLen) { int32_t code_buff = pFileState->stateBuffRemoveFn(pFileState->rowStateBuff, pKey, keyLen); int32_t code_file = pFileState->stateFileRemoveFn(pFileState, pKey); + if (pFileState->searchBuff != NULL) { + deleteHashSortRowBuff(pFileState, pKey); + } if (code_buff == TSDB_CODE_SUCCESS || code_file == TSDB_CODE_SUCCESS) { return TSDB_CODE_SUCCESS; } @@ -625,18 +696,9 @@ static int32_t recoverSessionRowBuff(SStreamFileState* pFileState, SRowBuffPos* return code; } -int32_t getRowBuffByPos(SStreamFileState* pFileState, SRowBuffPos* pPos, void** pVal) { +static int32_t recoverStateRowBuff(SStreamFileState* pFileState, SRowBuffPos* pPos) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; - if (pPos->pRowBuff) { - if (pPos->needFree) { - code = recoverSessionRowBuff(pFileState, pPos); - QUERY_CHECK_CODE(code, lino, _end); - } - (*pVal) = pPos->pRowBuff; - goto _end; - } - pPos->pRowBuff = getFreeBuff(pFileState); if (!pPos->pRowBuff) { if (pFileState->curRowCount < pFileState->maxRowCount) { @@ -657,9 +719,32 @@ int32_t getRowBuffByPos(SStreamFileState* pFileState, SRowBuffPos* pPos, void** code = recoverSessionRowBuff(pFileState, pPos); QUERY_CHECK_CODE(code, lino, _end); +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t getRowBuffByPos(SStreamFileState* pFileState, SRowBuffPos* pPos, void** pVal) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + if (pPos->pRowBuff) { + if (pPos->needFree) { + code = recoverSessionRowBuff(pFileState, pPos); + QUERY_CHECK_CODE(code, lino, _end); + } + (*pVal) = pPos->pRowBuff; + goto _end; + } + + code = recoverStateRowBuff(pFileState, pPos); + QUERY_CHECK_CODE(code, lino, _end); + (*pVal) = pPos->pRowBuff; if (!pPos->needFree) { code = tdListPrepend(pFileState->usedBuffs, &pPos); + QUERY_CHECK_CODE(code, lino, _end); } _end: @@ -748,6 +833,8 @@ void flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, boo streamStateClearBatch(batch); + clearSearchBuff(pFileState); + int64_t elapsed = taosGetTimestampMs() - st; qDebug("%s flush to disk in batch model completed, rows:%d, batch size:%d, elapsed time:%" PRId64 "ms", pFileState->id, numOfElems, BATCH_LIMIT, elapsed); @@ -777,7 +864,7 @@ void flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, boo int32_t forceRemoveCheckpoint(SStreamFileState* pFileState, int64_t checkpointId) { char keyBuf[128] = {0}; - sprintf(keyBuf, "%s:%" PRId64 "", TASK_KEY, checkpointId); + TAOS_UNUSED(tsnprintf(keyBuf, sizeof(keyBuf), "%s:%" PRId64 "", TASK_KEY, checkpointId)); return streamDefaultDel_rocksdb(pFileState->pFileStore, keyBuf); } @@ -799,14 +886,14 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) { } memcpy(buf, val, len); buf[len] = 0; - maxCheckPointId = atol((char*)buf); + maxCheckPointId = taosStr2Int64((char*)buf, NULL, 10); taosMemoryFree(val); } for (int64_t i = maxCheckPointId; i > 0; i--) { char buf[128] = {0}; void* val = 0; int32_t len = 0; - sprintf(buf, "%s:%" PRId64 "", TASK_KEY, i); + TAOS_UNUSED(tsnprintf(buf, sizeof(buf), "%s:%" PRId64 "", TASK_KEY, i)); code = streamDefaultGet_rocksdb(pFileState->pFileStore, buf, &val, &len); if (code != 0) { return TSDB_CODE_FAILED; @@ -816,7 +903,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) { taosMemoryFree(val); TSKEY ts; - ts = atol((char*)buf); + ts = taosStr2Int64((char*)buf, NULL, 10); if (ts < mark) { // statekey winkey.ts < mark int32_t tmpRes = forceRemoveCheckpoint(pFileState, i); @@ -914,6 +1001,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId) { if (vlen != pFileState->rowSize) { qError("row size mismatch, expect:%d, actual:%d", pFileState->rowSize, vlen); code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; + taosMemoryFreeClear(pVal); QUERY_CHECK_CODE(code, lino, _end); } memcpy(pNewPos->pRowBuff, pVal, vlen); @@ -943,6 +1031,7 @@ void streamFileStateReloadInfo(SStreamFileState* pFileState, TSKEY ts) { } void* getRowStateBuff(SStreamFileState* pFileState) { return pFileState->rowStateBuff; } +void* getSearchBuff(SStreamFileState* pFileState) { return pFileState->searchBuff; } void* getStateFileStore(SStreamFileState* pFileState) { return pFileState->pFileStore; } @@ -953,9 +1042,394 @@ bool isDeteled(SStreamFileState* pFileState, TSKEY ts) { bool isFlushedState(SStreamFileState* pFileState, TSKEY ts, TSKEY gap) { return ts <= (pFileState->flushMark + gap); } +TSKEY getFlushMark(SStreamFileState* pFileState) { return pFileState->flushMark; }; + int32_t getRowStateRowSize(SStreamFileState* pFileState) { return pFileState->rowSize; } int32_t getFunctionRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen) { int32_t winCode = TSDB_CODE_SUCCESS; return pFileState->stateFunctionGetFn(pFileState, pKey, keyLen, pVal, pVLen, &winCode); } + +int32_t recoverFillSnapshot(SStreamFileState* pFileState, int64_t ckId) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + if (pFileState->maxTs != INT64_MIN) { + int64_t mark = (INT64_MIN + pFileState->deleteMark >= pFileState->maxTs) + ? INT64_MIN + : pFileState->maxTs - pFileState->deleteMark; + code = deleteExpiredCheckPoint(pFileState, mark); + QUERY_CHECK_CODE(code, lino, _end); + } + + SStreamStateCur* pCur = streamStateFillSeekToLast_rocksdb(pFileState->pFileStore); + if (pCur == NULL) { + return code; + } + int32_t recoverNum = TMIN(MIN_NUM_OF_RECOVER_ROW_BUFF, pFileState->maxRowCount); + int32_t winRes = TSDB_CODE_SUCCESS; + while (winRes == TSDB_CODE_SUCCESS) { + if (pFileState->curRowCount >= recoverNum) { + break; + } + + void* pVal = NULL; + int32_t vlen = 0; + SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); + winRes = streamStateFillGetKVByCur_rocksdb(pCur, pNewPos->pKey, (const void**)&pVal, &vlen); + if (winRes != TSDB_CODE_SUCCESS || isFlushedState(pFileState, pFileState->getTs(pNewPos->pKey), 0)) { + destroyRowBuffPos(pNewPos); + SListNode* pNode = tdListPopTail(pFileState->usedBuffs); + taosMemoryFreeClear(pNode); + taosMemoryFreeClear(pVal); + break; + } + + memcpy(pNewPos->pRowBuff, pVal, vlen); + taosMemoryFreeClear(pVal); + pNewPos->beFlushed = true; + winRes = tSimpleHashPut(pFileState->rowStateBuff, pNewPos->pKey, pFileState->keyLen, &pNewPos, POINTER_BYTES); + if (winRes != TSDB_CODE_SUCCESS) { + destroyRowBuffPos(pNewPos); + break; + } + streamStateCurPrev_rocksdb(pCur); + } + streamStateFreeCur(pCur); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t getRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen, + int32_t* pWinCode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + (*pWinCode) = TSDB_CODE_FAILED; + pFileState->maxTs = TMAX(pFileState->maxTs, pFileState->getTs(pKey)); + SRowBuffPos** ppPos = tSimpleHashGet(pFileState->rowStateBuff, pKey, keyLen); + if (ppPos) { + *pVLen = pFileState->rowSize; + *pVal = *ppPos; + (*ppPos)->beUsed = true; + (*ppPos)->beFlushed = false; + (*pWinCode) = TSDB_CODE_SUCCESS; + if ((*ppPos)->pRowBuff == NULL) { + code = recoverStateRowBuff(pFileState, *ppPos); + QUERY_CHECK_CODE(code, lino, _end); + } + goto _end; + } + TSKEY ts = pFileState->getTs(pKey); + if (!isDeteled(pFileState, ts) && isFlushedState(pFileState, ts, 0)) { + int32_t len = 0; + void* p = NULL; + (*pWinCode) = pFileState->stateFileGetFn(pFileState, pKey, &p, &len); + qDebug("===stream===get %" PRId64 " from disc, res %d", ts, (*pWinCode)); + if ((*pWinCode) == TSDB_CODE_SUCCESS) { + SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); + if (!pNewPos || !pNewPos->pRowBuff) { + code = TSDB_CODE_OUT_OF_MEMORY; + QUERY_CHECK_CODE(code, lino, _end); + } + + memcpy(pNewPos->pKey, pKey, keyLen); + memcpy(pNewPos->pRowBuff, p, len); + code = tSimpleHashPut(pFileState->rowStateBuff, pKey, keyLen, &pNewPos, POINTER_BYTES); + QUERY_CHECK_CODE(code, lino, _end); + + if (pVal) { + *pVLen = pFileState->rowSize; + *pVal = pNewPos; + } + } + taosMemoryFree(p); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t streamFileStateGroupPut(SStreamFileState* pFileState, int64_t groupId, void* value, int32_t vLen) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + if (value != NULL) { + code = TSDB_CODE_INVALID_PARA; + QUERY_CHECK_CODE(code, lino, _end); + } + + if (tSimpleHashGet(pFileState->pGroupIdMap, &groupId, sizeof(int64_t)) == NULL) { + if (tSimpleHashGetSize(pFileState->pGroupIdMap) <= MAX_GROUP_ID_NUM) { + code = tSimpleHashPut(pFileState->pGroupIdMap, &groupId, sizeof(int64_t), NULL, 0); + QUERY_CHECK_CODE(code, lino, _end); + } + code = streamStatePutParTag_rocksdb(pFileState->pFileStore, groupId, value, vLen); + QUERY_CHECK_CODE(code, lino, _end); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +void streamFileStateGroupCurNext(SStreamStateCur* pCur) { + SStreamFileState* pFileState = (SStreamFileState*)pCur->pStreamFileState; + if (pCur->hashIter == -1) { + streamStateCurNext(pFileState->pFileStore, pCur); + return; + } + + int64_t gpId = *(int64_t*)tSimpleHashGetKey(pCur->pHashData, NULL); + pCur->minGpId = TMAX(pCur->minGpId, gpId); + + SSHashObj* pHash = pFileState->pGroupIdMap; + pCur->pHashData = tSimpleHashIterate(pHash, pCur->pHashData, &pCur->hashIter); + if (!pCur->pHashData) { + pCur->hashIter = -1; + streamStateParTagSeekKeyNext_rocksdb(pFileState->pFileStore, pCur->minGpId, pCur); + return; + } +} + +int32_t streamFileStateGroupGetKVByCur(SStreamStateCur* pCur, int64_t* pKey, void** pVal, int32_t* pVLen) { + int32_t code = TSDB_CODE_SUCCESS; + if (pCur->pHashData) { + *pKey = *(int64_t*)tSimpleHashGetKey(pCur->pHashData, NULL); + return code; + } + return streamStateParTagGetKVByCur_rocksdb(pCur, pKey, NULL, NULL); +} + +SSHashObj* getGroupIdCache(SStreamFileState* pFileState) { + return pFileState->pGroupIdMap; +} + +void setFillInfo(SStreamFileState* pFileState) { + pFileState->hasFillCatch = false; +} + +void clearExpiredState(SStreamFileState* pFileState) { + SSHashObj* pSearchBuff = pFileState->searchBuff; + void* pIte = NULL; + int32_t iter = 0; + while ((pIte = tSimpleHashIterate(pSearchBuff, pIte, &iter)) != NULL) { + SArray* pWinStates = *((void**)pIte); + int32_t size = taosArrayGetSize(pWinStates); + for (int32_t i = 0; i < size - 1; i++) { + SWinKey* pKey = taosArrayGet(pWinStates, i); + int32_t code_buff = pFileState->stateBuffRemoveFn(pFileState->rowStateBuff, pKey, sizeof(SWinKey)); + qTrace("clear expired buff, ts:%" PRId64 ". %s at line %d res:%d", pKey->ts, __func__, __LINE__, code_buff); + + if (isFlushedState(pFileState, pKey->ts, 0)) { + int32_t code_file = pFileState->stateFileRemoveFn(pFileState, pKey); + qTrace("clear expired file, ts:%" PRId64 ". %s at line %d res:%d", pKey->ts, __func__, __LINE__, code_file); + } + + if (pFileState->hasFillCatch == false) { + int32_t code_file = streamStateFillDel_rocksdb(pFileState->pFileStore, pKey); + qTrace("force clear expired file, ts:%" PRId64 ". %s at line %d res %d", pKey->ts, __func__, __LINE__, code_file); + } + } + taosArrayRemoveBatch(pWinStates, 0, size - 1, NULL); + } +} + +int32_t getStateSearchRowBuff(SStreamFileState* pFileState, const SWinKey* pKey, void** pVal, int32_t* pVLen, + int32_t* pWinCode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + code = addRowBuffIfNotExist(pFileState, (void*)pKey, sizeof(SWinKey), pVal, pVLen, pWinCode); + QUERY_CHECK_CODE(code, lino, _end); + + SArray* pWinStates = NULL; + SSHashObj* pSearchBuff = getSearchBuff(pFileState); + void** ppBuff = tSimpleHashGet(pSearchBuff, &pKey->groupId, sizeof(uint64_t)); + if (ppBuff) { + pWinStates = (SArray*)(*ppBuff); + } else { + pWinStates = taosArrayInit(16, sizeof(SWinKey)); + QUERY_CHECK_NULL(pWinStates, code, lino, _end, terrno); + + code = tSimpleHashPut(pSearchBuff, &pKey->groupId, sizeof(uint64_t), &pWinStates, POINTER_BYTES); + QUERY_CHECK_CODE(code, lino, _end); + } + + // recover + if (taosArrayGetSize(pWinStates) == 0 && needClearDiskBuff(pFileState)) { + TSKEY ts = getFlushMark(pFileState); + SWinKey start = {.groupId = pKey->groupId, .ts = INT64_MAX}; + void* pState = getStateFileStore(pFileState); + SStreamStateCur* pCur = streamStateSeekKeyPrev_rocksdb(pState, &start); + for (int32_t i = 0; i < NUM_OF_CACHE_WIN; i++) { + SWinKey tmpKey = {.groupId = pKey->groupId}; + int32_t tmpRes = streamStateGetGroupKVByCur_rocksdb(pState, pCur, &tmpKey, NULL, 0); + if (tmpRes != TSDB_CODE_SUCCESS) { + break; + } + void* tmp = taosArrayPush(pWinStates, &tmpKey); + QUERY_CHECK_NULL(tmp, code, lino, _end, terrno); + streamStateCurPrev_rocksdb(pCur); + } + taosArraySort(pWinStates, winKeyCmprImpl); + streamStateFreeCur(pCur); + } + + int32_t size = taosArrayGetSize(pWinStates); + int32_t index = binarySearch(pWinStates, size, pKey, fillStateKeyCompare); + if (!isFlushedState(pFileState, pKey->ts, 0)|| index >= 0) { + // find the first position which is smaller than the pKey + if (index >= 0) { + SWinKey* pTmpKey = taosArrayGet(pWinStates, index); + if (winKeyCmprImpl(pTmpKey, pKey) == 0) { + goto _end; + } + } + index++; + void* tmp = taosArrayInsert(pWinStates, index, pKey); + QUERY_CHECK_NULL(tmp, code, lino, _end, terrno); + } + + if (size >= MAX_NUM_OF_CACHE_WIN) { + int32_t num = size - NUM_OF_CACHE_WIN; + taosArrayRemoveBatch(pWinStates, 0, num, NULL); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t getRowStatePrevRow(SStreamFileState* pFileState, const SWinKey* pKey, SWinKey* pResKey, void** ppVal, + int32_t* pVLen, int32_t* pWinCode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SArray* pWinStates = NULL; + SSHashObj* pSearchBuff = getSearchBuff(pFileState); + void* pState = getStateFileStore(pFileState); + void** ppBuff = (void**) tSimpleHashGet(pSearchBuff, &pKey->groupId, sizeof(uint64_t)); + if (ppBuff) { + pWinStates = (SArray*)(*ppBuff); + } else { + qDebug("===stream=== search buff is empty.group id:%" PRId64, pKey->groupId); + SStreamStateCur* pCur = streamStateSeekKeyPrev_rocksdb(pState, pKey); + void* tmpVal = NULL; + int32_t len = 0; + (*pWinCode) = streamStateGetGroupKVByCur_rocksdb(pState, pCur, pResKey, (const void**)&tmpVal, &len); + if ((*pWinCode) == TSDB_CODE_SUCCESS) { + SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); + if (!pNewPos || !pNewPos->pRowBuff) { + code = TSDB_CODE_OUT_OF_MEMORY; + QUERY_CHECK_CODE(code, lino, _end); + } + memcpy(pNewPos->pRowBuff, tmpVal, len); + taosMemoryFreeClear(tmpVal); + *pVLen = getRowStateRowSize(pFileState); + (*ppVal) = pNewPos; + } + streamStateFreeCur(pCur); + return code; + } + int32_t size = taosArrayGetSize(pWinStates); + int32_t index = binarySearch(pWinStates, size, pKey, fillStateKeyCompare); + if (index >= 0) { + SWinKey* pCurKey = taosArrayGet(pWinStates, index); + if (winKeyCmprImpl(pCurKey, pKey) == 0) { + index--; + } else { + qDebug("%s failed at line %d since do not find cur SWinKey. trigger may be force window close", __func__, __LINE__); + } + } + if (index == -1) { + SStreamStateCur* pCur = streamStateSeekKeyPrev_rocksdb(pState, pKey); + void* tmpVal = NULL; + int32_t len = 0; + (*pWinCode) = streamStateGetGroupKVByCur_rocksdb(pState, pCur, pResKey, (const void**)&tmpVal, &len); + if ((*pWinCode) == TSDB_CODE_SUCCESS) { + SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); + if (!pNewPos || !pNewPos->pRowBuff) { + code = TSDB_CODE_OUT_OF_MEMORY; + QUERY_CHECK_CODE(code, lino, _end); + } + memcpy(pNewPos->pRowBuff, tmpVal, len); + taosMemoryFreeClear(tmpVal); + *pVLen = getRowStateRowSize(pFileState); + (*ppVal) = pNewPos; + } + streamStateFreeCur(pCur); + return code; + } else { + SWinKey* pPrevKey = taosArrayGet(pWinStates, index); + *pResKey = *pPrevKey; + return addRowBuffIfNotExist(pFileState, (void*)pPrevKey, sizeof(SWinKey), ppVal, pVLen, pWinCode); + } + (*pWinCode) = TSDB_CODE_FAILED; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t addSearchItem(SStreamFileState* pFileState, SArray* pWinStates, const SWinKey* pKey) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t size = taosArrayGetSize(pWinStates); + int32_t index = binarySearch(pWinStates, size, pKey, fillStateKeyCompare); + if (!isFlushedState(pFileState, pKey->ts, 0) || index >= 0 || size == 0) { + if (index >= 0) { + SWinKey* pTmpKey = taosArrayGet(pWinStates, index); + if (winKeyCmprImpl(pTmpKey, pKey) == 0) { + goto _end; + } + } + index++; + void* tmp = taosArrayInsert(pWinStates, index, pKey); + QUERY_CHECK_NULL(tmp, code, lino, _end, terrno); + } + + if (size >= MAX_NUM_OF_CACHE_WIN) { + int32_t num = size - NUM_OF_CACHE_WIN; + taosArrayRemoveBatch(pWinStates, 0, num, NULL); + } +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +int32_t addArrayBuffIfNotExist(SSHashObj* pSearchBuff, uint64_t groupId, SArray** ppResStates) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SArray* pWinStates = NULL; + void** ppBuff = tSimpleHashGet(pSearchBuff, &groupId, sizeof(uint64_t)); + if (ppBuff) { + pWinStates = (SArray*)(*ppBuff); + } else { + pWinStates = taosArrayInit(16, sizeof(SWinKey)); + QUERY_CHECK_NULL(pWinStates, code, lino, _end, terrno); + + code = tSimpleHashPut(pSearchBuff, &groupId, sizeof(uint64_t), &pWinStates, POINTER_BYTES); + QUERY_CHECK_CODE(code, lino, _end); + } + + (*ppResStates) = pWinStates; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} diff --git a/source/libs/sync/CMakeLists.txt b/source/libs/sync/CMakeLists.txt index 6025070cb72..57e1ec63b2f 100644 --- a/source/libs/sync/CMakeLists.txt +++ b/source/libs/sync/CMakeLists.txt @@ -1,6 +1,10 @@ aux_source_directory(src SYNC_SRC) add_library(sync STATIC ${SYNC_SRC}) +if(${TD_DARWIN}) + target_compile_options(sync PRIVATE -Wno-error=deprecated-non-prototype) +endif() + target_link_libraries( sync PUBLIC common diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index e3f94c1c9a6..0345880874c 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -95,6 +95,8 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { bool accepted = false; SSyncRaftEntry* pEntry = NULL; bool resetElect = false; + const STraceId* trace = &pRpcMsg->info.traceId; + char tbuf[40] = {0}; // if already drop replica, do not process if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) { @@ -150,10 +152,10 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { goto _IGNORE; } - sTrace("vgId:%d, recv append entries msg. index:%" PRId64 ", term:%" PRId64 ", preLogIndex:%" PRId64 - ", prevLogTerm:%" PRId64 " commitIndex:%" PRId64 " entryterm:%" PRId64, - pMsg->vgId, pMsg->prevLogIndex + 1, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, - pEntry->term); + sGTrace("vgId:%d, recv append entries msg. index:%" PRId64 ", term:%" PRId64 ", preLogIndex:%" PRId64 + ", prevLogTerm:%" PRId64 " commitIndex:%" PRId64 " entryterm:%" PRId64, + pMsg->vgId, pMsg->prevLogIndex + 1, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, + pEntry->term); if (ths->fsmState == SYNC_FSM_STATE_INCOMPLETE) { pReply->fsmState = ths->fsmState; @@ -179,6 +181,11 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { sTrace("vgId:%d, update commit return index %" PRId64 "", ths->vgId, returnIndex); } + TRACE_SET_MSGID(&(rpcRsp.info.traceId), tGenIdPI64()); + trace = &(rpcRsp.info.traceId); + sGTrace("vgId:%d, send append reply matchIndex:%" PRId64 " term:%" PRId64 " lastSendIndex:%" PRId64 + " to dest: 0x%016" PRIx64, + ths->vgId, pReply->matchIndex, pReply->term, pReply->lastSendIndex, pReply->destId.addr); // ack, i.e. send response TAOS_CHECK_RETURN(syncNodeSendMsgById(&pReply->destId, ths, &rpcRsp)); diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index 005cf4337d4..a7f36be9e92 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -43,6 +43,8 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { int32_t code = 0; SyncAppendEntriesReply* pMsg = (SyncAppendEntriesReply*)pRpcMsg->pCont; int32_t ret = 0; + const STraceId* trace = &pRpcMsg->info.traceId; + char tbuf[40] = {0}; // if already drop replica, do not process if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) { @@ -63,8 +65,8 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { return TSDB_CODE_SYN_WRONG_TERM; } - sTrace("vgId:%d, received append entries reply. srcId:0x%016" PRIx64 ", term:%" PRId64 ", matchIndex:%" PRId64 "", - pMsg->vgId, pMsg->srcId.addr, pMsg->term, pMsg->matchIndex); + sGTrace("vgId:%d, received append entries reply. srcId:0x%016" PRIx64 ", term:%" PRId64 ", matchIndex:%" PRId64 "", + pMsg->vgId, pMsg->srcId.addr, pMsg->term, pMsg->matchIndex); if (pMsg->success) { SyncIndex oldMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId)); diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 9f6acf6d837..efb71b57148 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -1026,6 +1026,14 @@ int32_t syncLogReplRecover(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEn int32_t code = 0; if (pMgr->restored != false) return TSDB_CODE_SYN_INTERNAL_ERROR; + sTrace("vgId:%d, begin to recover sync log repl. peer: dnode:%d (%" PRIx64 "), repl-mgr:[%" PRId64 ", %" PRId64 + ", %" PRId64 ") restore:%d, buffer: [%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 + "), msg: {lastSendIndex:%" PRId64 ", matchIndex:%" PRId64 ", fsmState:%d, success:%d, lastMatchTerm:%" PRId64 + "}", + pNode->vgId, DID(&destId), destId.addr, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, pMgr->restored, + pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex, pMsg->lastSendIndex, pMsg->matchIndex, + pMsg->fsmState, pMsg->success, pMsg->lastMatchTerm); + if (pMgr->endIndex == 0) { if (pMgr->startIndex != 0) return TSDB_CODE_SYN_INTERNAL_ERROR; if (pMgr->matchIndex != 0) return TSDB_CODE_SYN_INTERNAL_ERROR; @@ -1171,6 +1179,11 @@ int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex inde int64_t nowMs = taosGetMonoTimestampMs(); int32_t code = 0; + sTrace("vgId:%d, begin to probe peer:%" PRIx64 " with msg of index:%" PRId64 ". repl-mgr:[%" PRId64 ", %" PRId64 + ", %" PRId64 "), restored:%d", + pNode->vgId, pNode->replicasId[pMgr->peerId].addr, index, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, + pMgr->restored); + if (pMgr->endIndex > pMgr->startIndex && nowMs < pMgr->states[pMgr->startIndex % pMgr->size].timeMs + retryMaxWaitMs) { return 0; @@ -1206,6 +1219,10 @@ int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex inde int32_t syncLogReplAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { if (!pMgr->restored) return TSDB_CODE_SYN_INTERNAL_ERROR; + sTrace("vgId:%d, begin to attempt replicate log entries from end to match. repl-mgr:[%" PRId64 ", %" PRId64 + ", %" PRId64 "), restore:%d", + pNode->vgId, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, pMgr->restored); + SRaftId* pDestId = &pNode->replicasId[pMgr->peerId]; int32_t batchSize = TMAX(1, pMgr->size >> (4 + pMgr->retryBackoff)); int32_t code = 0; @@ -1527,11 +1544,12 @@ int32_t syncLogReplSendTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex ind goto _err; } + TRACE_SET_MSGID(&(msgOut.info.traceId), tGenIdPI64()); + STraceId* trace = &(msgOut.info.traceId); + sGTrace("vgId:%d, replicate one msg index:%" PRId64 " term:%" PRId64 " prevterm:%" PRId64 " to dest: 0x%016" PRIx64, + pNode->vgId, pEntry->index, pEntry->term, prevLogTerm, pDestId->addr); TAOS_CHECK_GOTO(syncNodeSendAppendEntries(pNode, pDestId, &msgOut), &lino, _err); - sTrace("vgId:%d, replicate one msg index:%" PRId64 " term:%" PRId64 " prevterm:%" PRId64 " to dest: 0x%016" PRIx64, - pNode->vgId, pEntry->index, pEntry->term, prevLogTerm, pDestId->addr); - if (!inBuf) { syncEntryDestroy(pEntry); pEntry = NULL; diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index 212a75c2ae4..9058b6ecefa 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -152,8 +152,8 @@ static void syncLogReplStates2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLe for (int32_t i = 0; i < pSyncNode->replicaNum; i++) { SSyncLogReplMgr* pMgr = pSyncNode->logReplMgrs[i]; if (pMgr == NULL) break; - len += tsnprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 " %" PRId64 ", %" PRId64 "]", i, pMgr->restored, - pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex); + len += tsnprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 ", %" PRId64 ", %" PRId64 "]", i, pMgr->restored, + pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex); if (i + 1 < pSyncNode->replicaNum) { len += tsnprintf(buf + len, bufLen - len, "%s", ", "); } @@ -234,14 +234,14 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo ", elect-times:%d, as-leader-times:%d, as-assigned-leader-times:%d, cfg-ch-times:%d, hb-slow:%d, hbr-slow:%d, " "aq-items:%d, snaping:%" PRId64 ", replicas:%d, last-cfg:%" PRId64 ", chging:%d, restore:%d, quorum:%d, elect-lc-timer:%" PRId64 ", hb:%" PRId64 - ", buffer:%s, repl-mgrs:%s, members:%s, hb:%s, hb-reply:%s", + ", buffer:%s, repl-mgrs:%s, members:%s, hb:%s, hb-reply:%s, arb-token:%s", pNode->vgId, eventLog, syncStr(pNode->state), currentTerm, pNode->commitIndex, pNode->assignedCommitIndex, appliedIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->electNum, pNode->becomeLeaderNum, pNode->becomeAssignedLeaderNum, pNode->configChangeNum, pNode->hbSlowNum, pNode->hbrSlowNum, aqItems, pNode->snapshottingIndex, pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, bufferStatesStr, - replMgrStatesStr, cfgStr, hbTimeStr, hbrTimeStr); + replMgrStatesStr, cfgStr, hbTimeStr, hbrTimeStr, pNode->arbToken); } } diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index 3a4f11ac81c..5c79b379eda 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -278,19 +278,19 @@ bool transAsyncPoolIsEmpty(SAsyncPool* pool); } \ } while (0) -#define ASYNC_CHECK_HANDLE(idMgt, id, exh1) \ - do { \ - if (id > 0) { \ - SExHandle* exh2 = transAcquireExHandle(idMgt, id); \ - if (exh2 == NULL || exh1 != exh2 || (exh2 != NULL && exh2->refId != id)) { \ - tError("handle not match, exh1:%p, exh2:%p, refId:%"PRId64"", exh1, exh2, id); \ - code = TSDB_CODE_INVALID_MSG; \ - goto _return1; \ - } \ - } else { \ - tError("invalid handle to release"); \ - goto _return2; \ - } \ +#define ASYNC_CHECK_HANDLE(idMgt, id, exh1) \ + do { \ + if (id > 0) { \ + SExHandle* exh2 = transAcquireExHandle(idMgt, id); \ + if (exh2 == NULL || exh1 != exh2 || (exh2 != NULL && exh2->refId != id)) { \ + tDebug("handle not match, exh1:%p, exh2:%p, refId:%" PRId64 "", exh1, exh2, id); \ + code = TSDB_CODE_INVALID_MSG; \ + goto _return1; \ + } \ + } else { \ + tDebug("invalid handle to release"); \ + goto _return2; \ + } \ } while (0) int32_t transInitBuffer(SConnBuffer* buf); @@ -353,6 +353,7 @@ typedef struct { queue node; void (*freeFunc)(void* arg); int32_t size; + int8_t inited; } STransQueue; /* diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index c3e214b5e3b..2aeffc63958 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -127,10 +127,12 @@ typedef struct { typedef struct SCliReq { SReqCtx* ctx; queue q; + queue sendQ; STransMsgType type; uint64_t st; int64_t seq; int32_t sent; //(0: no send, 1: alread sent) + int8_t inSendQ; STransMsg msg; int8_t inRetry; @@ -274,6 +276,8 @@ static FORCE_INLINE void destroyReqAndAhanlde(void* cmsg); static FORCE_INLINE int cliRBChoseIdx(STrans* pInst); static FORCE_INLINE void destroyReqCtx(SReqCtx* ctx); +static FORCE_INLINE void removeReqFromSendQ(SCliReq* pReq); + static int32_t cliHandleState_mayUpdateState(SCliConn* pConn, SCliReq* pReq); static int32_t cliHandleState_mayHandleReleaseResp(SCliConn* conn, STransMsgHead* pHead); static int32_t cliHandleState_mayCreateAhandle(SCliConn* conn, STransMsgHead* pHead, STransMsg* pResp); @@ -453,6 +457,7 @@ static bool filteBySeq(void* key, void* arg) { SFiterArg* targ = arg; SCliReq* pReq = QUEUE_DATA(key, SCliReq, q); if (pReq->seq == targ->seq && pReq->msg.msgType + 1 == targ->msgType) { + removeReqFromSendQ(pReq); return true; } else { return false; @@ -539,6 +544,7 @@ bool filterByQid(void* key, void* arg) { SCliReq* pReq = QUEUE_DATA(key, SCliReq, q); if (pReq->msg.info.qId == *qid) { + removeReqFromSendQ(pReq); return true; } else { return false; @@ -600,7 +606,7 @@ int32_t cliHandleState_mayHandleReleaseResp(SCliConn* conn, STransMsgHead* pHead queue* el = QUEUE_HEAD(&set); QUEUE_REMOVE(el); SCliReq* pReq = QUEUE_DATA(el, SCliReq, q); - + removeReqFromSendQ(pReq); STraceId* trace = &pReq->msg.info.traceId; tGDebug("start to free msg %p", pReq); destroyReqWrapper(pReq, pThrd); @@ -614,7 +620,7 @@ int32_t cliHandleState_mayCreateAhandle(SCliConn* conn, STransMsgHead* pHead, ST int32_t code = 0; int64_t qId = taosHton64(pHead->qid); if (qId == 0) { - return 0; + return TSDB_CODE_RPC_NO_STATE; } STransCtx* pCtx = taosHashGet(conn->pQTable, &qId, sizeof(qId)); @@ -700,6 +706,7 @@ void cliHandleResp(SCliConn* conn) { tstrerror(code)); } } + removeReqFromSendQ(pReq); code = cliBuildRespFromCont(pReq, &resp, pHead); STraceId* trace = &resp.info.traceId; @@ -905,6 +912,10 @@ static void addConnToPool(void* pool, SCliConn* conn) { } SCliThrd* thrd = conn->hostThrd; + if (thrd->quit == true) { + return; + } + cliResetConnTimer(conn); if (conn->list == NULL && conn->dstAddr != NULL) { conn->list = taosHashGet((SHashObj*)pool, conn->dstAddr, strlen(conn->dstAddr)); @@ -1092,6 +1103,7 @@ static int32_t cliCreateConn(SCliThrd* pThrd, SCliConn** pCliConn, char* ip, int transQueueDestroy(&conn->reqsToSend); transQueueDestroy(&conn->reqsSentOut); taosMemoryFree(conn->dstAddr); + taosMemoryFree(conn->ipStr); } tError("failed to create conn, code:%d", code); taosMemoryFree(conn); @@ -1216,6 +1228,7 @@ static FORCE_INLINE void destroyReqInQueue(SCliConn* conn, queue* set, int32_t c QUEUE_REMOVE(el); SCliReq* pReq = QUEUE_DATA(el, SCliReq, q); + removeReqFromSendQ(pReq); notifyAndDestroyReq(conn, pReq, code); } } @@ -1246,8 +1259,8 @@ static void cliHandleException(SCliConn* conn) { } cliDestroyAllQidFromThrd(conn); - QUEUE_REMOVE(&conn->q); - if (conn->list) { + if (pThrd->quit == false && conn->list) { + QUEUE_REMOVE(&conn->q); conn->list->totalSize -= 1; conn->list = NULL; } @@ -1273,7 +1286,8 @@ static void cliHandleException(SCliConn* conn) { bool filterToRmReq(void* h, void* arg) { queue* el = h; SCliReq* pReq = QUEUE_DATA(el, SCliReq, q); - if (pReq->sent == 1 && REQUEST_NO_RESP(&pReq->msg)) { + if (pReq->sent == 1 && pReq->inSendQ == 0 && REQUEST_NO_RESP(&pReq->msg)) { + removeReqFromSendQ(pReq); return true; } return false; @@ -1300,12 +1314,18 @@ static void cliBatchSendCb(uv_write_t* req, int status) { SCliThrd* pThrd = conn->hostThrd; STrans* pInst = pThrd->pInst; + while (!QUEUE_IS_EMPTY(&wrapper->node)) { + queue* h = QUEUE_HEAD(&wrapper->node); + SCliReq* pReq = QUEUE_DATA(h, SCliReq, sendQ); + removeReqFromSendQ(pReq); + } freeWReqToWQ(&conn->wq, wrapper); int32_t ref = transUnrefCliHandle(conn); if (ref <= 0) { return; } + cliConnRmReqs(conn); if (status != 0) { tDebug("%s conn %p failed to send msg since %s", CONN_GET_INST_LABEL(conn), conn, uv_err_name(status)); @@ -1340,6 +1360,9 @@ bool cliConnMayAddUserInfo(SCliConn* pConn, STransMsgHead** ppHead, int32_t* msg } STransMsgHead* pHead = *ppHead; STransMsgHead* tHead = taosMemoryCalloc(1, *msgLen + sizeof(pInst->user)); + if (tHead == NULL) { + return false; + } memcpy((char*)tHead, (char*)pHead, TRANS_MSG_OVERHEAD); memcpy((char*)tHead + TRANS_MSG_OVERHEAD, pInst->user, sizeof(pInst->user)); @@ -1398,6 +1421,10 @@ int32_t cliBatchSend(SCliConn* pConn, int8_t direct) { int j = 0; int32_t batchLimit = 64; + + queue reqToSend; + QUEUE_INIT(&reqToSend); + while (!transQueueEmpty(&pConn->reqsToSend)) { queue* h = transQueuePop(&pConn->reqsToSend); SCliReq* pCliMsg = QUEUE_DATA(h, SCliReq, q); @@ -1422,6 +1449,10 @@ int32_t cliBatchSend(SCliConn* pConn, int8_t direct) { if (cliConnMayAddUserInfo(pConn, &pHead, &msgLen)) { content = transContFromHead(pHead); contLen = transContLenFromMsg(msgLen); + } else { + if (pConn->userInited == 0) { + return terrno; + } } if (pHead->comp == 0) { pHead->noResp = REQUEST_NO_RESP(pReq) ? 1 : 0; @@ -1447,30 +1478,51 @@ int32_t cliBatchSend(SCliConn* pConn, int8_t direct) { wb[j++] = uv_buf_init((char*)pHead, msgLen); totalLen += msgLen; - pCliMsg->sent = 1; pCliMsg->seq = pConn->seq; + pCliMsg->sent = 1; STraceId* trace = &pCliMsg->msg.info.traceId; tGDebug("%s conn %p %s is sent to %s, local info:%s, seq:%" PRId64 ", sid:%" PRId64 "", CONN_GET_INST_LABEL(pConn), pConn, TMSG_INFO(pReq->msgType), pConn->dst, pConn->src, pConn->seq, pReq->info.qId); + transQueuePush(&pConn->reqsSentOut, &pCliMsg->q); + QUEUE_INIT(&pCliMsg->sendQ); + QUEUE_PUSH(&reqToSend, &pCliMsg->sendQ); + + pCliMsg->inSendQ = 1; if (j >= batchLimit) { break; } } transRefCliHandle(pConn); uv_write_t* req = allocWReqFromWQ(&pConn->wq, pConn); + if (req == NULL) { tError("%s conn %p failed to send msg since %s", CONN_GET_INST_LABEL(pConn), pConn, tstrerror(terrno)); + while (!QUEUE_IS_EMPTY(&reqToSend)) { + queue* h = QUEUE_HEAD(&reqToSend); + SCliReq* pCliMsg = QUEUE_DATA(h, SCliReq, sendQ); + removeReqFromSendQ(pCliMsg); + } + transRefCliHandle(pConn); return terrno; } + SWReqsWrapper* pWreq = req->data; + + QUEUE_MOVE(&reqToSend, &pWreq->node); tDebug("%s conn %p start to send msg, batch size:%d, len:%d", CONN_GET_INST_LABEL(pConn), pConn, j, totalLen); int32_t ret = uv_write(req, (uv_stream_t*)pConn->stream, wb, j, cliBatchSendCb); if (ret != 0) { tError("%s conn %p failed to send msg since %s", CONN_GET_INST_LABEL(pConn), pConn, uv_err_name(ret)); + while (!QUEUE_IS_EMPTY(&pWreq->node)) { + queue* h = QUEUE_HEAD(&pWreq->node); + SCliReq* pCliMsg = QUEUE_DATA(h, SCliReq, sendQ); + removeReqFromSendQ(pCliMsg); + } + freeWReqToWQ(&pConn->wq, req->data); code = TSDB_CODE_THIRDPARTY_ERROR; TAOS_UNUSED(transUnrefCliHandle(pConn)); @@ -1556,6 +1608,7 @@ static int32_t cliDoConn(SCliThrd* pThrd, SCliConn* conn) { ret = uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, cliConnCb); if (ret != 0) { tError("failed connect to %s since %s", conn->dstAddr, uv_err_name(ret)); + cliMayUpdateFqdnCache(pThrd->fqdn2ipCache, conn->dstAddr); TAOS_CHECK_GOTO(TSDB_CODE_THIRDPARTY_ERROR, &lino, _exception1); } @@ -1647,6 +1700,7 @@ void cliConnCb(uv_connect_t* req, int status) { if (status != 0) { tDebug("%s conn %p failed to connect to %s since %s", CONN_GET_INST_LABEL(pConn), pConn, pConn->dstAddr, uv_strerror(status)); + cliMayUpdateFqdnCache(pThrd->fqdn2ipCache, pConn->dstAddr); TAOS_UNUSED(transUnrefCliHandle(pConn)); return; } @@ -1798,7 +1852,7 @@ static FORCE_INLINE int32_t cliUpdateFqdnCache(SHashObj* cache, char* fqdn) { size_t len = strlen(fqdn); uint32_t* v = taosHashGet(cache, fqdn, len); if (addr != *v) { - char old[TD_IP_LEN] = {0}, new[TD_IP_LEN] = {0}; + char old[TSDB_FQDN_LEN] = {0}, new[TSDB_FQDN_LEN] = {0}; tinet_ntoa(old, *v); tinet_ntoa(new, addr); tWarn("update ip of fqdn:%s, old: %s, new: %s", fqdn, old, new); @@ -1818,7 +1872,7 @@ static void cliMayUpdateFqdnCache(SHashObj* cache, char* dst) { if (dst[i] == ':') break; } if (i > 0) { - char fqdn[TSDB_FQDN_LEN + 1] = {0}; + char fqdn[TSDB_FQDN_LEN] = {0}; memcpy(fqdn, dst, i); TAOS_UNUSED(cliUpdateFqdnCache(cache, fqdn)); } @@ -2182,11 +2236,21 @@ static void cliAsyncCb(uv_async_t* handle) { if (pThrd->stopMsg != NULL) cliHandleQuit(pThrd, pThrd->stopMsg); } +static FORCE_INLINE void removeReqFromSendQ(SCliReq* pReq) { + if (pReq == NULL || pReq->inSendQ == 0) { + return; + } + QUEUE_REMOVE(&pReq->sendQ); + pReq->inSendQ = 0; +} + static FORCE_INLINE void destroyReq(void* arg) { SCliReq* pReq = arg; if (pReq == NULL) { return; } + + removeReqFromSendQ(pReq); STraceId* trace = &pReq->msg.info.traceId; tGDebug("free memory:%p, free ctx: %p", pReq, pReq->ctx); @@ -2855,6 +2919,7 @@ bool cliMayRetry(SCliConn* pConn, SCliReq* pReq, STransMsg* pResp) { noDelay = cliResetEpset(pCtx, pResp, false); transFreeMsg(pResp->pCont); } + pResp->pCont = NULL; if (code != TSDB_CODE_RPC_BROKEN_LINK && code != TSDB_CODE_RPC_NETWORK_UNAVAIL && code != TSDB_CODE_SUCCESS) { // save one internal code pCtx->retryCode = code; @@ -2961,6 +3026,7 @@ int32_t cliNotifyCb(SCliConn* pConn, SCliReq* pReq, STransMsg* pResp) { STrans* pInst = pThrd->pInst; if (pReq != NULL) { + removeReqFromSendQ(pReq); if (pResp->code != TSDB_CODE_SUCCESS) { if (cliMayRetry(pConn, pReq, pResp)) { return TSDB_CODE_RPC_ASYNC_IN_PROCESS; @@ -3090,7 +3156,7 @@ int32_t transReleaseCliHandle(void* handle) { static int32_t transInitMsg(void* pInstRef, const SEpSet* pEpSet, STransMsg* pReq, STransCtx* ctx, SCliReq** pCliMsg) { int32_t code = 0; - TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64()); + if (pReq->info.traceId.msgId == 0) TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64()); SCliReq* pCliReq = NULL; SReqCtx* pCtx = taosMemoryCalloc(1, sizeof(SReqCtx)); @@ -3114,7 +3180,7 @@ static int32_t transInitMsg(void* pInstRef, const SEpSet* pEpSet, STransMsg* pRe if (ctx != NULL) pCtx->userCtx = *ctx; pCliReq = taosMemoryCalloc(1, sizeof(SCliReq)); - if (pReq == NULL) { + if (pCliReq == NULL) { TAOS_CHECK_GOTO(terrno, NULL, _exception); } @@ -3183,6 +3249,7 @@ int32_t transSendRequestWithId(void* pInstRef, const SEpSet* pEpSet, STransMsg* return TSDB_CODE_INVALID_PARA; } int32_t code = 0; + int8_t transIdInited = 0; STrans* pInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)pInstRef); if (pInst == NULL) { @@ -3200,6 +3267,7 @@ int32_t transSendRequestWithId(void* pInstRef, const SEpSet* pEpSet, STransMsg* if (exh == NULL) { TAOS_CHECK_GOTO(TSDB_CODE_RPC_MODULE_QUIT, NULL, _exception); } + transIdInited = 1; pReq->info.handle = (void*)(*transpointId); pReq->info.qId = *transpointId; @@ -3216,9 +3284,6 @@ int32_t transSendRequestWithId(void* pInstRef, const SEpSet* pEpSet, STransMsg* return (code == TSDB_CODE_RPC_ASYNC_MODULE_QUIT ? TSDB_CODE_RPC_MODULE_QUIT : code); } - // if (pReq->msgType == TDMT_SCH_DROP_TASK) { - // TAOS_UNUSED(transReleaseCliHandle(pReq->info.handle)); - // } transReleaseExHandle(transGetRefMgt(), *transpointId); transReleaseExHandle(transGetInstMgt(), (int64_t)pInstRef); return 0; @@ -3226,6 +3291,7 @@ int32_t transSendRequestWithId(void* pInstRef, const SEpSet* pEpSet, STransMsg* _exception: transFreeMsg(pReq->pCont); pReq->pCont = NULL; + if (transIdInited) transReleaseExHandle(transGetRefMgt(), *transpointId); transReleaseExHandle(transGetInstMgt(), (int64_t)pInstRef); tError("failed to send request since %s", tstrerror(code)); @@ -3641,6 +3707,7 @@ bool filterTimeoutReq(void* key, void* arg) { if (pReq->msg.info.qId == 0 && !REQUEST_NO_RESP(&pReq->msg) && pReq->ctx) { int64_t elapse = ((st - pReq->st) / 1000000); if (listArg && elapse >= listArg->pInst->readTimeout) { + removeReqFromSendQ(pReq); return true; } else { return false; diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index 35ca6678b87..66bd4a08f3b 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -423,6 +423,7 @@ int32_t transQueueInit(STransQueue* wq, void (*freeFunc)(void* arg)) { QUEUE_INIT(&wq->node); wq->freeFunc = (void (*)(void*))freeFunc; wq->size = 0; + wq->inited = 1; return 0; } void transQueuePush(STransQueue* q, void* arg) { @@ -497,6 +498,7 @@ void transQueueRemove(STransQueue* q, void* e) { bool transQueueEmpty(STransQueue* q) { return q->size == 0 ? true : false; } void transQueueClear(STransQueue* q) { + if (q->inited == 0) return; while (!QUEUE_IS_EMPTY(&q->node)) { queue* h = QUEUE_HEAD(&q->node); QUEUE_REMOVE(h); diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 5723f2ff237..d02bfb8281e 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -1289,8 +1289,8 @@ static FORCE_INLINE SSvrConn* createConn(void* hThrd) { int32_t code = 0; SWorkThrd* pThrd = hThrd; int32_t lino; - - SSvrConn* pConn = (SSvrConn*)taosMemoryCalloc(1, sizeof(SSvrConn)); + int8_t wqInited = 0; + SSvrConn* pConn = (SSvrConn*)taosMemoryCalloc(1, sizeof(SSvrConn)); if (pConn == NULL) { TAOS_CHECK_GOTO(TSDB_CODE_OUT_OF_MEMORY, &lino, _end); } @@ -1340,6 +1340,7 @@ static FORCE_INLINE SSvrConn* createConn(void* hThrd) { code = initWQ(&pConn->wq); TAOS_CHECK_GOTO(code, &lino, _end); + wqInited = 1; // init client handle pConn->pTcp = (uv_tcp_t*)taosMemoryMalloc(sizeof(uv_tcp_t)); @@ -1372,7 +1373,7 @@ static FORCE_INLINE SSvrConn* createConn(void* hThrd) { transDestroyBuffer(&pConn->readBuf); taosHashCleanup(pConn->pQTable); taosMemoryFree(pConn->pTcp); - destroyWQ(&pConn->wq); + if (wqInited) destroyWQ(&pConn->wq); taosMemoryFree(pConn->buf); taosMemoryFree(pConn); pConn = NULL; diff --git a/source/libs/transport/test/CMakeLists.txt b/source/libs/transport/test/CMakeLists.txt index e68e93c48e3..f30dfc56a2d 100644 --- a/source/libs/transport/test/CMakeLists.txt +++ b/source/libs/transport/test/CMakeLists.txt @@ -1,5 +1,6 @@ add_executable(transportTest "") add_executable(transUT "") +add_executable(transUT2 "") add_executable(svrBench "") add_executable(cliBench "") add_executable(httpBench "") @@ -9,7 +10,11 @@ target_sources(transUT "transUT.cpp" ) -target_sources(transportTest +target_sources(transUT2 + PRIVATE + "transUT2.cpp" +) +target_sources(transportTest PRIVATE "transportTests.cpp" ) @@ -25,16 +30,16 @@ target_sources(cliBench target_sources(httpBench PRIVATE "http_test.c" -) +) -target_include_directories(transportTest +target_include_directories(transportTest PUBLIC - "${TD_SOURCE_DIR}/include/libs/transport" + "${TD_SOURCE_DIR}/include/libs/transport" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_link_libraries (transportTest - os +target_link_libraries(transportTest + os util common gtest_main @@ -42,67 +47,81 @@ target_link_libraries (transportTest function ) -target_link_libraries (transUT - os +target_link_libraries(transUT + os util common gtest_main - transport + transport ) target_include_directories(transUT PUBLIC - "${TD_SOURCE_DIR}/include/libs/transport" + "${TD_SOURCE_DIR}/include/libs/transport" + "${CMAKE_CURRENT_SOURCE_DIR}/../inc" +) + +target_link_libraries(transUT2 + os + util + common + gtest_main + transport +) + +target_include_directories(transUT2 + PUBLIC + "${TD_SOURCE_DIR}/include/libs/transport" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) target_include_directories(svrBench PUBLIC - "${TD_SOURCE_DIR}/include/libs/transport" + "${TD_SOURCE_DIR}/include/libs/transport" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_link_libraries (svrBench - os +target_link_libraries(svrBench + os util common gtest_main - transport + transport ) target_include_directories(cliBench PUBLIC - "${TD_SOURCE_DIR}/include/libs/transport" + "${TD_SOURCE_DIR}/include/libs/transport" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) target_include_directories(httpBench PUBLIC - "${TD_SOURCE_DIR}/include/libs/transport" + "${TD_SOURCE_DIR}/include/libs/transport" "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -target_link_libraries (cliBench - os +target_link_libraries(cliBench + os util common gtest_main - transport + transport ) target_link_libraries(httpBench - os + os util common gtest_main - transport + transport ) add_test( - NAME transUT - COMMAND transUT + NAME transUT + COMMAND transUT ) add_test( - NAME transUtilUt + NAME transUtilUt COMMAND transportTest ) diff --git a/source/libs/transport/test/cliBench.c b/source/libs/transport/test/cliBench.c index e73c209d558..0a5cb5d1bb2 100644 --- a/source/libs/transport/test/cliBench.c +++ b/source/libs/transport/test/cliBench.c @@ -53,8 +53,6 @@ static void processResponse(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { tDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen, pMsg->code); - if (pEpSet) pInfo->epSet = *pEpSet; - rpcFreeCont(pMsg->pCont); tsem_post(&pInfo->rspSem); } @@ -72,12 +70,12 @@ static void *sendRequest(void *param) { rpcMsg.pCont = rpcMallocCont(pInfo->msgSize); rpcMsg.contLen = pInfo->msgSize; rpcMsg.info.ahandle = pInfo; - rpcMsg.info.noResp = 1; + rpcMsg.info.noResp = 0; rpcMsg.msgType = 1; tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num); rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg, NULL); if (pInfo->num % 20000 == 0) tInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num); - // tsem_wait(&pInfo->rspSem); + tsem_wait(&pInfo->rspSem); } tDebug("thread:%d, it is over", pInfo->index); @@ -110,17 +108,15 @@ int main(int argc, char *argv[]) { rpcInit.label = "APP"; rpcInit.numOfThreads = 1; rpcInit.cfp = processResponse; - rpcInit.sessions = 100; + rpcInit.sessions = 1000; rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInit.user = "michael"; rpcInit.connType = TAOS_CONN_CLIENT; - rpcInit.connLimitNum = 10; - rpcInit.connLimitLock = 1; - rpcInit.shareConnLimit = 16 * 1024; + rpcInit.shareConnLimit = tsShareConnLimit; rpcInit.supportBatch = 1; - - rpcDebugFlag = 135; + rpcInit.compressSize = -1; + rpcDebugFlag = 143; for (int i = 1; i < argc; ++i) { if (strcmp(argv[i], "-p") == 0 && i < argc - 1) { } else if (strcmp(argv[i], "-i") == 0 && i < argc - 1) { @@ -139,6 +135,10 @@ int main(int argc, char *argv[]) { } else if (strcmp(argv[i], "-u") == 0 && i < argc - 1) { } else if (strcmp(argv[i], "-k") == 0 && i < argc - 1) { } else if (strcmp(argv[i], "-spi") == 0 && i < argc - 1) { + } else if (strcmp(argv[i], "-l") == 0 && i < argc - 1) { + rpcInit.shareConnLimit = atoi(argv[++i]); + } else if (strcmp(argv[i], "-c") == 0 && i < argc - 1) { + rpcInit.compressSize = atoi(argv[++i]); } else if (strcmp(argv[i], "-d") == 0 && i < argc - 1) { rpcDebugFlag = atoi(argv[++i]); } else { @@ -150,13 +150,15 @@ int main(int argc, char *argv[]) { printf(" [-n requests]: number of requests per thread, default is:%d\n", numOfReqs); printf(" [-u user]: user name for the connection, default is:%s\n", rpcInit.user); printf(" [-d debugFlag]: debug flag, default:%d\n", rpcDebugFlag); + printf(" [-c compressSize]: compress size, default:%d\n", tsCompressMsgSize); + printf(" [-l shareConnLimit]: share conn limit, default:%d\n", tsShareConnLimit); printf(" [-h help]: print out this help\n\n"); exit(0); } } initLogEnv(); - taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit.compatibilityVer)); void *pRpc = rpcOpen(&rpcInit); if (pRpc == NULL) { tError("failed to initialize RPC"); @@ -168,18 +170,18 @@ int main(int argc, char *argv[]) { int64_t now = taosGetTimestampUs(); - SInfo *pInfo = (SInfo *)taosMemoryCalloc(1, sizeof(SInfo) * appThreads); - SInfo *p = pInfo; + SInfo **pInfo = (SInfo **)taosMemoryCalloc(1, sizeof(SInfo *) * appThreads); for (int i = 0; i < appThreads; ++i) { - pInfo->index = i; - pInfo->epSet = epSet; - pInfo->numOfReqs = numOfReqs; - pInfo->msgSize = msgSize; - tsem_init(&pInfo->rspSem, 0, 0); - pInfo->pRpc = pRpc; - - taosThreadCreate(&pInfo->thread, NULL, sendRequest, pInfo); - pInfo++; + SInfo *p = taosMemoryCalloc(1, sizeof(SInfo)); + p->index = i; + p->epSet = epSet; + p->numOfReqs = numOfReqs; + p->msgSize = msgSize; + tsem_init(&p->rspSem, 0, 0); + p->pRpc = pRpc; + pInfo[i] = p; + + taosThreadCreate(&p->thread, NULL, sendRequest, pInfo[i]); } do { @@ -192,12 +194,14 @@ int main(int argc, char *argv[]) { tInfo("Performance: %.3f requests per second, msgSize:%d bytes", 1000.0 * numOfReqs * appThreads / usedTime, msgSize); for (int i = 0; i < appThreads; i++) { - SInfo *pInfo = p; - taosThreadJoin(pInfo->thread, NULL); - p++; + SInfo *p = pInfo[i]; + taosThreadJoin(p->thread, NULL); + taosMemoryFree(p); } - int ch = getchar(); - UNUSED(ch); + taosMemoryFree(pInfo); + + // int ch = getchar(); + // UNUSED(ch); taosCloseLog(); diff --git a/source/libs/transport/test/svrBench.c b/source/libs/transport/test/svrBench.c index 6408e4dcb2d..44299d86a3a 100644 --- a/source/libs/transport/test/svrBench.c +++ b/source/libs/transport/test/svrBench.c @@ -76,23 +76,6 @@ void *processShellMsg(void *arg) { for (int i = 0; i < numOfMsgs; ++i) { taosGetQitem(qall, (void **)&pRpcMsg); - - if (pDataFile != NULL) { - if (taosWriteFile(pDataFile, pRpcMsg->pCont, pRpcMsg->contLen) < 0) { - tInfo("failed to write data file, reason:%s", strerror(errno)); - } - } - } - - if (commit >= 2) { - num += numOfMsgs; - // if (taosFsync(pDataFile) < 0) { - // tInfo("failed to flush data to file, reason:%s", strerror(errno)); - //} - - if (num % 10000 == 0) { - tInfo("%d request have been written into disk", num); - } } taosResetQitems(qall); @@ -107,16 +90,7 @@ void *processShellMsg(void *arg) { rpcMsg.code = 0; rpcSendResponse(&rpcMsg); - void *handle = pRpcMsg->info.handle; taosFreeQitem(pRpcMsg); - //{ - // SRpcMsg nRpcMsg = {0}; - // nRpcMsg.pCont = rpcMallocCont(msgSize); - // nRpcMsg.contLen = msgSize; - // nRpcMsg.info.handle = handle; - // nRpcMsg.code = TSDB_CODE_CTG_NOT_READY; - // rpcSendResponse(&nRpcMsg); - //} } taosUpdateItemSize(qinfo.queue, numOfMsgs); @@ -149,12 +123,13 @@ int main(int argc, char *argv[]) { rpcInit.localPort = 7000; memcpy(rpcInit.localFqdn, "localhost", strlen("localhost")); rpcInit.label = "SER"; - rpcInit.numOfThreads = 1; + rpcInit.numOfThreads = 10; rpcInit.cfp = processRequestMsg; rpcInit.idleTime = 2 * 1500; - taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit.compatibilityVer)); rpcDebugFlag = 131; + rpcInit.compressSize = -1; for (int i = 1; i < argc; ++i) { if (strcmp(argv[i], "-p") == 0 && i < argc - 1) { @@ -190,7 +165,7 @@ int main(int argc, char *argv[]) { rpcInit.connType = TAOS_CONN_SERVER; initLogEnv(); - taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit.compatibilityVer)); void *pRpc = rpcOpen(&rpcInit); if (pRpc == NULL) { tError("failed to start RPC server"); @@ -205,8 +180,8 @@ int main(int argc, char *argv[]) { if (pDataFile == NULL) tInfo("failed to open data file, reason:%s", strerror(errno)); } - int32_t numOfAthread = 5; - multiQ = taosMemoryMalloc(sizeof(numOfAthread)); + int32_t numOfAthread = 1; + multiQ = taosMemoryMalloc(sizeof(MultiThreadQhandle)); multiQ->numOfThread = numOfAthread; multiQ->qhandle = (STaosQueue **)taosMemoryMalloc(sizeof(STaosQueue *) * numOfAthread); multiQ->qset = (STaosQset **)taosMemoryMalloc(sizeof(STaosQset *) * numOfAthread); @@ -221,11 +196,6 @@ int main(int argc, char *argv[]) { threads[i].idx = i; taosThreadCreate(&(threads[i].thread), NULL, processShellMsg, (void *)&threads[i]); } - // qhandle = taosOpenQueue(); - // qset = taosOpenQset(); - // taosAddIntoQset(qset, qhandle, NULL); - - // processShellMsg(); if (pDataFile != NULL) { taosCloseFile(&pDataFile); diff --git a/source/libs/transport/test/transUT.cpp b/source/libs/transport/test/transUT.cpp index e57d01bcbc7..8e396d59d77 100644 --- a/source/libs/transport/test/transUT.cpp +++ b/source/libs/transport/test/transUT.cpp @@ -54,8 +54,9 @@ class Client { rpcInit_.user = (char *)user; rpcInit_.parent = this; rpcInit_.connType = TAOS_CONN_CLIENT; + rpcInit_.shareConnLimit = 200; - taosVersionStrToInt(version, &(rpcInit_.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer)); this->transCli = rpcOpen(&rpcInit_); tsem_init(&this->sem, 0, 0); } @@ -68,7 +69,7 @@ class Client { void Restart(CB cb) { rpcClose(this->transCli); rpcInit_.cfp = cb; - taosVersionStrToInt(version, &(rpcInit_.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer)); this->transCli = rpcOpen(&rpcInit_); } void Stop() { @@ -85,6 +86,14 @@ class Client { SemWait(); *resp = this->resp; } + void sendReq(SRpcMsg *req) { + SEpSet epSet = {0}; + epSet.inUse = 0; + addEpIntoEpSet(&epSet, "127.0.0.1", 7000); + + rpcSendRequest(this->transCli, &epSet, req, NULL); + + } void SendAndRecvNoHandle(SRpcMsg *req, SRpcMsg *resp) { if (req->info.handle != NULL) { rpcReleaseHandle(req->info.handle, TAOS_CONN_CLIENT); @@ -120,7 +129,7 @@ class Server { rpcInit_.cfp = processReq; rpcInit_.user = (char *)user; rpcInit_.connType = TAOS_CONN_SERVER; - taosVersionStrToInt(version, &(rpcInit_.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer)); } void Start() { this->transSrv = rpcOpen(&this->rpcInit_); @@ -160,6 +169,7 @@ static void processReq(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { rpcMsg.contLen = 100; rpcMsg.info = pMsg->info; rpcMsg.code = 0; + rpcFreeCont(pMsg->pCont); rpcSendResponse(&rpcMsg); } @@ -264,6 +274,7 @@ class TransObj { cli->Stop(); } void cliSendAndRecv(SRpcMsg *req, SRpcMsg *resp) { cli->SendAndRecv(req, resp); } + void cliSendReq(SRpcMsg *req) { cli->sendReq(req); } void cliSendAndRecvNoHandle(SRpcMsg *req, SRpcMsg *resp) { cli->SendAndRecvNoHandle(req, resp); } ~TransObj() { @@ -492,15 +503,16 @@ TEST_F(TransEnv, queryExcept) { TEST_F(TransEnv, noResp) { SRpcMsg resp = {0}; SRpcMsg req = {0}; - // for (int i = 0; i < 5; i++) { - // memset(&req, 0, sizeof(req)); - // req.info.noResp = 1; - // req.msgType = 1; - // req.pCont = rpcMallocCont(10); - // req.contLen = 10; - // tr->cliSendAndRecv(&req, &resp); - //} - // taosMsleep(2000); + for (int i = 0; i < 500000; i++) { + memset(&req, 0, sizeof(req)); + req.info.noResp = 1; + req.msgType = 3; + req.pCont = rpcMallocCont(10); + req.contLen = 10; + tr->cliSendReq(&req); + //tr->cliSendAndRecv(&req, &resp); + } + taosMsleep(2000); // no resp } diff --git a/source/libs/transport/test/transUT2.cpp b/source/libs/transport/test/transUT2.cpp new file mode 100644 index 00000000000..54d23b1f64f --- /dev/null +++ b/source/libs/transport/test/transUT2.cpp @@ -0,0 +1,529 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 * or later ("AGPL"), as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include "tdatablock.h" +#include "tglobal.h" +#include "tlog.h" +#include "tmisce.h" +#include "transLog.h" +#include "trpc.h" +#include "tversion.h" +using namespace std; + +const char *label = "APP"; +const char *secret = "secret"; +const char *user = "user"; +const char *ckey = "ckey"; + +class Server; +int port = 7000; +// server process +// server except + +typedef void (*CB)(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet); + +static void processContinueSend(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet); +static void processReleaseHandleCb(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet); +static void processRegisterFailure(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet); +static void processReq(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet); +// client process; +static void processResp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet); +class Client { + public: + void Init(int nThread) { + memcpy(tsTempDir, TD_TMP_DIR_PATH, strlen(TD_TMP_DIR_PATH)); + memset(&rpcInit_, 0, sizeof(rpcInit_)); + rpcInit_.localPort = 0; + rpcInit_.label = (char *)"client"; + rpcInit_.numOfThreads = nThread; + rpcInit_.cfp = processResp; + rpcInit_.user = (char *)user; + rpcInit_.parent = this; + rpcInit_.connType = TAOS_CONN_CLIENT; + rpcInit_.shareConnLimit = 200; + + taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer)); + this->transCli = rpcOpen(&rpcInit_); + //tsem_init(&this->sem, 0, 0); + } + void SetResp(SRpcMsg *pMsg) { + // set up resp; + this->resp = *pMsg; + } + SRpcMsg *Resp() { return &this->resp; } + + void Restart(CB cb) { + rpcClose(this->transCli); + rpcInit_.cfp = cb; + taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer)); + this->transCli = rpcOpen(&rpcInit_); + } + void Stop() { + rpcClose(this->transCli); + this->transCli = NULL; + } + + void SendAndRecv(SRpcMsg *req, SRpcMsg *resp) { + SEpSet epSet = {0}; + epSet.inUse = 0; + addEpIntoEpSet(&epSet, "127.0.0.1", 7000); + + rpcSendRequest(this->transCli, &epSet, req, NULL); + SemWait(); + *resp = this->resp; + } + void sendReq(SRpcMsg *req) { + SEpSet epSet = {0}; + epSet.inUse = 0; + addEpIntoEpSet(&epSet, "127.0.0.1", 7000); + + rpcSendRequest(this->transCli, &epSet, req, NULL); + } + + void sendReqWithId(SRpcMsg *req, int64_t *id) { + SEpSet epSet = {0}; + epSet.inUse = 0; + addEpIntoEpSet(&epSet, "127.0.0.1",7000); + rpcSendRequestWithCtx(this->transCli, &epSet, req, id, NULL); + + } + void freeId(int64_t *id) { + rpcFreeConnById(this->transCli, *id); + } + void SendAndRecvNoHandle(SRpcMsg *req, SRpcMsg *resp) { + if (req->info.handle != NULL) { + rpcReleaseHandle(req->info.handle, TAOS_CONN_CLIENT); + req->info.handle = NULL; + } + SendAndRecv(req, resp); + } + + void SemWait() { tsem_wait(&this->sem); } + void SemPost() { tsem_post(&this->sem); } + void Reset() {} + + ~Client() { + if (this->transCli) rpcClose(this->transCli); + } + + private: + tsem_t sem; + SRpcInit rpcInit_; + void *transCli; + SRpcMsg resp; +}; +class Server { + public: + Server() { + memcpy(tsTempDir, TD_TMP_DIR_PATH, strlen(TD_TMP_DIR_PATH)); + memset(&rpcInit_, 0, sizeof(rpcInit_)); + + memcpy(rpcInit_.localFqdn, "localhost", strlen("localhost")); + rpcInit_.localPort = port; + rpcInit_.label = (char *)"server"; + rpcInit_.numOfThreads = 5; + rpcInit_.cfp = processReq; + rpcInit_.user = (char *)user; + rpcInit_.connType = TAOS_CONN_SERVER; + taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer)); + } + void Start() { + this->transSrv = rpcOpen(&this->rpcInit_); + taosMsleep(1000); + } + void SetSrvContinueSend(CB cb) { + this->Stop(); + rpcInit_.cfp = cb; + this->Start(); + } + void Stop() { + if (this->transSrv == NULL) return; + rpcClose(this->transSrv); + this->transSrv = NULL; + } + void SetSrvSend(void (*cfp)(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet)) { + this->Stop(); + rpcInit_.cfp = cfp; + this->Start(); + } + void Restart() { + this->Stop(); + this->Start(); + } + ~Server() { + if (this->transSrv) rpcClose(this->transSrv); + this->transSrv = NULL; + } + + private: + SRpcInit rpcInit_; + void *transSrv; +}; +static void processReq(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { + SRpcMsg rpcMsg = {0}; + rpcMsg.pCont = rpcMallocCont(100); + rpcMsg.contLen = 100; + rpcMsg.info = pMsg->info; + rpcMsg.code = 0; + rpcFreeCont(pMsg->pCont); + rpcSendResponse(&rpcMsg); +} + +static void processContinueSend(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { + // for (int i = 0; i < 10; i++) { + // SRpcMsg rpcMsg = {0}; + // rpcMsg.pCont = rpcMallocCont(100); + // rpcMsg.contLen = 100; + // rpcMsg.info = pMsg->info; + // rpcMsg.code = 0; + // rpcSendResponse(&rpcMsg); + // } +} +static void processReleaseHandleCb(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { + SRpcMsg rpcMsg = {0}; + rpcMsg.pCont = rpcMallocCont(100); + rpcMsg.contLen = 100; + rpcMsg.info = pMsg->info; + rpcMsg.code = 0; + rpcSendResponse(&rpcMsg); + + rpcReleaseHandle(&pMsg->info, TAOS_CONN_SERVER); +} +static void processRegisterFailure(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { + // { + // SRpcMsg rpcMsg1 = {0}; + // rpcMsg1.pCont = rpcMallocCont(100); + // rpcMsg1.contLen = 100; + // rpcMsg1.info = pMsg->info; + // rpcMsg1.code = 0; + // rpcRegisterBrokenLinkArg(&rpcMsg1); + // } + // taosMsleep(10); + + // SRpcMsg rpcMsg = {0}; + // rpcMsg.pCont = rpcMallocCont(100); + // rpcMsg.contLen = 100; + // rpcMsg.info = pMsg->info; + // rpcMsg.code = 0; + // rpcSendResponse(&rpcMsg); +} +// client process; +static void processResp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { + Client *client = (Client *)parent; + rpcFreeCont(pMsg->pCont); + STraceId *trace = (STraceId *)&pMsg->info.traceId; + tGDebug("received resp %s",tstrerror(pMsg->code)); +} + +static void initEnv() { + dDebugFlag = 143; + vDebugFlag = 0; + mDebugFlag = 143; + cDebugFlag = 0; + jniDebugFlag = 0; + tmrDebugFlag = 143; + uDebugFlag = 143; + rpcDebugFlag = 143; + qDebugFlag = 0; + wDebugFlag = 0; + sDebugFlag = 0; + tsdbDebugFlag = 0; + tsLogEmbedded = 1; + tsAsyncLog = 0; + + std::string path = TD_TMP_DIR_PATH "transport"; + // taosRemoveDir(path.c_str()); + taosMkDir(path.c_str()); + + tstrncpy(tsLogDir, path.c_str(), PATH_MAX); + if (taosInitLog("taosdlog", 1, false) != 0) { + printf("failed to init log file\n"); + } +} + +class TransObj { + public: + TransObj() { + initEnv(); + cli = new Client; + cli->Init(1); + srv = new Server; + srv->Start(); + } + + void RestartCli(CB cb) { + // + cli->Restart(cb); + } + void StopSrv() { + // + srv->Stop(); + } + // call when link broken, and notify query or fetch stop + void SetSrvContinueSend(void (*cfp)(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet)) { + /////// + srv->SetSrvContinueSend(cfp); + } + void RestartSrv() { srv->Restart(); } + void StopCli() { + /////// + cli->Stop(); + } + void cliSendAndRecv(SRpcMsg *req, SRpcMsg *resp) { cli->SendAndRecv(req, resp); } + void cliSendReq(SRpcMsg *req) { cli->sendReq(req); } + + void cliSendReqWithId(SRpcMsg *req, int64_t *id) { cli->sendReqWithId(req, id);} + void cliFreeReqId(int64_t *id) { cli->freeId(id);} + void cliSendAndRecvNoHandle(SRpcMsg *req, SRpcMsg *resp) { cli->SendAndRecvNoHandle(req, resp); } + + ~TransObj() { + delete cli; + delete srv; + } + + private: + Client *cli; + Server *srv; +}; +class TransEnv : public ::testing::Test { + protected: + virtual void SetUp() { + // set up trans obj + tr = new TransObj(); + } + virtual void TearDown() { + // tear down + delete tr; + } + + TransObj *tr = NULL; +}; + +TEST_F(TransEnv, 01sendAndRec) { + // for (int i = 0; i < 10; i++) { + // SRpcMsg req = {0}, resp = {0}; + // req.msgType = 0; + // req.pCont = rpcMallocCont(10); + // req.contLen = 10; + // tr->cliSendAndRecv(&req, &resp); + // assert(resp.code == 0); + // } +} + +TEST_F(TransEnv, 02StopServer) { + // for (int i = 0; i < 1; i++) { + // SRpcMsg req = {0}, resp = {0}; + // req.msgType = 0; + // req.info.ahandle = (void *)0x35; + // req.pCont = rpcMallocCont(10); + // req.contLen = 10; + // tr->cliSendAndRecv(&req, &resp); + // assert(resp.code == 0); + // } + // SRpcMsg req = {0}, resp = {0}; + // req.info.ahandle = (void *)0x35; + // req.msgType = 1; + // req.pCont = rpcMallocCont(10); + // req.contLen = 10; + // tr->StopSrv(); + // // tr->RestartSrv(); + // tr->cliSendAndRecv(&req, &resp); + // assert(resp.code != 0); +} +TEST_F(TransEnv, clientUserDefined) { + // tr->RestartSrv(); + // for (int i = 0; i < 10; i++) { + // SRpcMsg req = {0}, resp = {0}; + // req.msgType = 0; + // req.pCont = rpcMallocCont(10); + // req.contLen = 10; + // tr->cliSendAndRecv(&req, &resp); + // assert(resp.code == 0); + // } + + ////////////////// +} + +TEST_F(TransEnv, cliPersistHandle) { + // SRpcMsg resp = {0}; + // void *handle = NULL; + // for (int i = 0; i < 10; i++) { + // SRpcMsg req = {0}; + // req.info = resp.info; + // req.info.persistHandle = 1; + + // req.msgType = 1; + // req.pCont = rpcMallocCont(10); + // req.contLen = 10; + // tr->cliSendAndRecv(&req, &resp); + // // if (i == 5) { + // // std::cout << "stop server" << std::endl; + // // tr->StopSrv(); + // //} + // // if (i >= 6) { + // // EXPECT_TRUE(resp.code != 0); + // //} + // handle = resp.info.handle; + // } + // rpcReleaseHandle(handle, TAOS_CONN_CLIENT); + // for (int i = 0; i < 10; i++) { + // SRpcMsg req = {0}; + // req.msgType = 1; + // req.pCont = rpcMallocCont(10); + // req.contLen = 10; + // tr->cliSendAndRecv(&req, &resp); + // } + + // taosMsleep(1000); + ////////////////// +} + +TEST_F(TransEnv, srvReleaseHandle) { + // SRpcMsg resp = {0}; + // tr->SetSrvContinueSend(processReleaseHandleCb); + // // tr->Restart(processReleaseHandleCb); + // void *handle = NULL; + // SRpcMsg req = {0}; + // for (int i = 0; i < 1; i++) { + // memset(&req, 0, sizeof(req)); + // req.info = resp.info; + // req.info.persistHandle = 1; + // req.msgType = 1; + // req.pCont = rpcMallocCont(10); + // req.contLen = 10; + // tr->cliSendAndRecv(&req, &resp); + // // tr->cliSendAndRecvNoHandle(&req, &resp); + // EXPECT_TRUE(resp.code == 0); + // } + ////////////////// +} +// reopen later +// TEST_F(TransEnv, cliReleaseHandleExcept) { +// SRpcMsg resp = {0}; +// SRpcMsg req = {0}; +// for (int i = 0; i < 3; i++) { +// memset(&req, 0, sizeof(req)); +// req.info = resp.info; +// req.info.persistHandle = 1; +// req.info.ahandle = (void *)1234; +// req.msgType = 1; +// req.pCont = rpcMallocCont(10); +// req.contLen = 10; +// tr->cliSendAndRecv(&req, &resp); +// if (i == 1) { +// std::cout << "stop server" << std::endl; +// tr->StopSrv(); +// } +// if (i > 1) { +// EXPECT_TRUE(resp.code != 0); +// } +// } +// ////////////////// +//} +TEST_F(TransEnv, srvContinueSend) { + // tr->SetSrvContinueSend(processContinueSend); + // SRpcMsg req = {0}, resp = {0}; + // for (int i = 0; i < 10; i++) { + // // memset(&req, 0, sizeof(req)); + // // memset(&resp, 0, sizeof(resp)); + // // req.msgType = 1; + // // req.pCont = rpcMallocCont(10); + // // req.contLen = 10; + // // tr->cliSendAndRecv(&req, &resp); + // } + // taosMsleep(1000); +} + +TEST_F(TransEnv, srvPersistHandleExcept) { + // tr->SetSrvContinueSend(processContinueSend); + // // tr->SetCliPersistFp(cliPersistHandle); + // SRpcMsg resp = {0}; + // SRpcMsg req = {0}; + // for (int i = 0; i < 5; i++) { + // // memset(&req, 0, sizeof(req)); + // // req.info = resp.info; + // // req.msgType = 1; + // // req.pCont = rpcMallocCont(10); + // // req.contLen = 10; + // // tr->cliSendAndRecv(&req, &resp); + // // if (i > 2) { + // // tr->StopCli(); + // // break; + // //} + // } + // taosMsleep(2000); + // conn broken + // +} +TEST_F(TransEnv, cliPersistHandleExcept) { + // tr->SetSrvContinueSend(processContinueSend); + // SRpcMsg resp = {0}; + // SRpcMsg req = {0}; + // for (int i = 0; i < 5; i++) { + // // memset(&req, 0, sizeof(req)); + // // req.info = resp.info; + // // req.msgType = 1; + // // req.pCont = rpcMallocCont(10); + // // req.contLen = 10; + // // tr->cliSendAndRecv(&req, &resp); + // // if (i > 2) { + // // tr->StopSrv(); + // // break; + // //} + // } + // taosMsleep(2000); + // // conn broken + // +} + +TEST_F(TransEnv, multiCliPersistHandleExcept) { + // conn broken +} +TEST_F(TransEnv, queryExcept) { + //taosMsleep(4 * 1000); +} +TEST_F(TransEnv, idTest) { + SRpcMsg resp = {0}; + SRpcMsg req = {0}; + for (int i = 0; i < 50000; i++) { + memset(&req, 0, sizeof(req)); + req.info.noResp = 0; + req.msgType = 3; + req.pCont = rpcMallocCont(10); + req.contLen = 10; + int64_t id; + tr->cliSendReqWithId(&req, &id); + tr->cliFreeReqId(&id); + } + taosMsleep(1000); + // no resp +} +TEST_F(TransEnv, noResp) { + SRpcMsg resp = {0}; + SRpcMsg req = {0}; + for (int i = 0; i < 500000; i++) { + memset(&req, 0, sizeof(req)); + req.info.noResp = 0; + req.msgType = 3; + req.pCont = rpcMallocCont(10); + req.contLen = 10; + tr->cliSendReq(&req); + //tr->cliSendAndRecv(&req, &resp); + } + taosMsleep(10000); + // no resp +} diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index a2e780b621f..66e337195bc 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -411,25 +411,31 @@ static int32_t walTrimIdxFile(SWal* pWal, int32_t fileIdx) { TAOS_RETURN(TSDB_CODE_SUCCESS); } -void printFileSet(SArray* fileSet) { +static void printFileSet(int32_t vgId, SArray* fileSet, const char* str) { int32_t sz = taosArrayGetSize(fileSet); for (int32_t i = 0; i < sz; i++) { SWalFileInfo* pFileInfo = taosArrayGet(fileSet, i); - wInfo("firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64 ", createTs:%" PRId64 - ", closeTs:%" PRId64, - pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset, pFileInfo->createTs, - pFileInfo->closeTs); + wInfo("vgId:%d, %s-%d, firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64 + ", createTs:%" PRId64 ", closeTs:%" PRId64, + vgId, str, i, pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset, + pFileInfo->createTs, pFileInfo->closeTs); } } int32_t walCheckAndRepairMeta(SWal* pWal) { // load log files, get first/snapshot/last version info + if (pWal->cfg.level == TAOS_WAL_SKIP) { + return TSDB_CODE_SUCCESS; + } int32_t code = 0; const char* logPattern = "^[0-9]+.log$"; const char* idxPattern = "^[0-9]+.idx$"; regex_t logRegPattern; regex_t idxRegPattern; + wInfo("vgId:%d, begin to repair meta, wal path:%s, firstVer:%" PRId64 ", lastVer:%" PRId64 ", snapshotVer:%" PRId64, + pWal->cfg.vgId, pWal->path, pWal->vers.firstVer, pWal->vers.lastVer, pWal->vers.snapshotVer); + if (regcomp(&logRegPattern, logPattern, REG_EXTENDED) != 0) { wError("failed to compile log pattern, error:%s", tstrerror(terrno)); return terrno; @@ -482,9 +488,9 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { taosArraySort(actualLog, compareWalFileInfo); - wInfo("vgId:%d, wal path:%s, actual log file num:%d", pWal->cfg.vgId, pWal->path, + wInfo("vgId:%d, actual log file, wal path:%s, num:%d", pWal->cfg.vgId, pWal->path, (int32_t)taosArrayGetSize(actualLog)); - printFileSet(actualLog); + printFileSet(pWal->cfg.vgId, actualLog, "actual log file"); int metaFileNum = taosArrayGetSize(pWal->fileInfoSet); int actualFileNum = taosArrayGetSize(actualLog); @@ -500,9 +506,9 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { TAOS_RETURN(code); } - wInfo("vgId:%d, wal path:%s, meta log file num:%d", pWal->cfg.vgId, pWal->path, + wInfo("vgId:%d, log file in meta, wal path:%s, num:%d", pWal->cfg.vgId, pWal->path, (int32_t)taosArrayGetSize(pWal->fileInfoSet)); - printFileSet(pWal->fileInfoSet); + printFileSet(pWal->cfg.vgId, pWal->fileInfoSet, "log file in meta"); int32_t sz = taosArrayGetSize(pWal->fileInfoSet); @@ -563,7 +569,9 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { // repair ts of files TAOS_CHECK_RETURN(walRepairLogFileTs(pWal, &updateMeta)); - printFileSet(pWal->fileInfoSet); + wInfo("vgId:%d, log file after repair, wal path:%s, num:%d", pWal->cfg.vgId, pWal->path, + (int32_t)taosArrayGetSize(pWal->fileInfoSet)); + printFileSet(pWal->cfg.vgId, pWal->fileInfoSet, "file after repair"); // update meta file if (updateMeta) { TAOS_CHECK_RETURN(walSaveMeta(pWal)); @@ -571,6 +579,9 @@ int32_t walCheckAndRepairMeta(SWal* pWal) { TAOS_CHECK_RETURN(walLogEntriesComplete(pWal)); + wInfo("vgId:%d, success to repair meta, wal path:%s, firstVer:%" PRId64 ", lastVer:%" PRId64 ", snapshotVer:%" PRId64, + pWal->cfg.vgId, pWal->path, pWal->vers.firstVer, pWal->vers.lastVer, pWal->vers.snapshotVer); + return code; } @@ -1058,6 +1069,8 @@ int32_t walSaveMeta(SWal* pWal) { TAOS_CHECK_GOTO(TAOS_SYSTEM_ERROR(errno), &lino, _err); } + wInfo("vgId:%d, save meta file: %s, firstVer:%" PRId64 ", lastVer:%" PRId64, pWal->cfg.vgId, tmpFnameStr, + pWal->vers.firstVer, pWal->vers.lastVer); // rename it n = walBuildMetaName(pWal, metaVer + 1, fnameStr); @@ -1155,9 +1168,9 @@ int32_t walLoadMeta(SWal* pWal) { (void)taosCloseFile(&pFile); taosMemoryFree(buf); - wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%d", pWal->cfg.vgId, fnameStr, - (int32_t)taosArrayGetSize(pWal->fileInfoSet)); - printFileSet(pWal->fileInfoSet); + wInfo("vgId:%d, meta file loaded: %s, firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileInfoSet size:%d", pWal->cfg.vgId, + fnameStr, pWal->vers.firstVer, pWal->vers.lastVer, (int32_t)taosArrayGetSize(pWal->fileInfoSet)); + printFileSet(pWal->cfg.vgId, pWal->fileInfoSet, "file in meta"); TAOS_RETURN(code); } diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index c8c37b11bc5..1a9652b3bb6 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -294,8 +294,11 @@ int32_t walRollback(SWal *pWal, int64_t ver) { static int32_t walRollImpl(SWal *pWal) { int32_t code = 0, lino = 0; + if (pWal->cfg.level == TAOS_WAL_SKIP && pWal->pIdxFile != NULL && pWal->pLogFile != NULL) { + TAOS_RETURN(TSDB_CODE_SUCCESS); + } if (pWal->pIdxFile != NULL) { - if (pWal->cfg.level != TAOS_WAL_SKIP && (code = taosFsyncFile(pWal->pIdxFile)) != 0) { + if ((code = taosFsyncFile(pWal->pIdxFile)) != 0) { TAOS_CHECK_GOTO(terrno, &lino, _exit); } code = taosCloseFile(&pWal->pIdxFile); @@ -305,7 +308,7 @@ static int32_t walRollImpl(SWal *pWal) { } if (pWal->pLogFile != NULL) { - if (pWal->cfg.level != TAOS_WAL_SKIP && (code = taosFsyncFile(pWal->pLogFile)) != 0) { + if ((code = taosFsyncFile(pWal->pLogFile)) != 0) { TAOS_CHECK_GOTO(terrno, &lino, _exit); } code = taosCloseFile(&pWal->pLogFile); @@ -664,7 +667,7 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy // set status if (pWal->vers.firstVer == -1) { - pWal->vers.firstVer = 0; + pWal->vers.firstVer = index; } pWal->vers.lastVer = index; pWal->totSize += sizeof(SWalCkHead) + cyptedBodyLen; diff --git a/source/libs/wal/test/walMetaTest.cpp b/source/libs/wal/test/walMetaTest.cpp index a0285f13632..3e6fab116f1 100644 --- a/source/libs/wal/test/walMetaTest.cpp +++ b/source/libs/wal/test/walMetaTest.cpp @@ -455,3 +455,59 @@ TEST_F(WalRetentionEnv, repairMeta1) { } walCloseReader(pRead); } + +class WalSkipLevel : public ::testing::Test { + protected: + static void SetUpTestCase() { + int code = walInit(NULL); + ASSERT(code == 0); + } + + static void TearDownTestCase() { walCleanUp(); } + + void walResetEnv() { + TearDown(); + taosRemoveDir(pathName); + SetUp(); + } + + void SetUp() override { + SWalCfg cfg; + cfg.rollPeriod = -1; + cfg.segSize = -1; + cfg.committed =-1; + cfg.retentionPeriod = -1; + cfg.retentionSize = 0; + cfg.rollPeriod = 0; + cfg.vgId = 1; + cfg.level = TAOS_WAL_SKIP; + pWal = walOpen(pathName, &cfg); + ASSERT(pWal != NULL); + } + + void TearDown() override { + walClose(pWal); + pWal = NULL; + } + + SWal* pWal = NULL; + const char* pathName = TD_TMP_DIR_PATH "wal_test"; +}; + +TEST_F(WalSkipLevel, restart) { + walResetEnv(); + int code; + + int i; + for (i = 0; i < 100; i++) { + char newStr[100]; + sprintf(newStr, "%s-%d", ranStr, i); + int len = strlen(newStr); + code = walAppendLog(pWal, i, 0, syncMeta, newStr, len); + ASSERT_EQ(code, 0); + } + + TearDown(); + + SetUp(); +} \ No newline at end of file diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c index 497769a71cf..84de563cda2 100644 --- a/source/os/src/osDir.c +++ b/source/os/src/osDir.c @@ -223,10 +223,15 @@ int32_t taosMulModeMkDir(const char *dirname, int mode, bool checkAccess) { if (checkAccess && taosCheckAccessFile(temp, TD_FILE_ACCESS_EXIST_OK | TD_FILE_ACCESS_READ_OK | TD_FILE_ACCESS_WRITE_OK)) { return 0; } + code = chmod(temp, mode); if (-1 == code) { - terrno = TAOS_SYSTEM_ERROR(errno); - return terrno; + struct stat statbuf = {0}; + code = stat(temp, &statbuf); + if (code != 0 || (statbuf.st_mode & mode) != mode) { + terrno = TAOS_SYSTEM_ERROR(errno); + return terrno; + } } } diff --git a/source/os/src/osEnv.c b/source/os/src/osEnv.c index a3791eb0267..05c9936c2ea 100644 --- a/source/os/src/osEnv.c +++ b/source/os/src/osEnv.c @@ -37,7 +37,6 @@ float tsNumOfCores = 0; int64_t tsTotalMemoryKB = 0; char *tsProcPath = NULL; -char tsSIMDEnable = 1; char tsAVX512Enable = 0; char tsSSE42Supported = 0; char tsAVXSupported = 0; diff --git a/source/os/test/CMakeLists.txt b/source/os/test/CMakeLists.txt index 324920f37bd..cc7110517f7 100644 --- a/source/os/test/CMakeLists.txt +++ b/source/os/test/CMakeLists.txt @@ -5,12 +5,11 @@ FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64) FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64) -IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) +IF(HEADER_GTEST_INCLUDE_DIR AND(LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) MESSAGE(STATUS "gTest library found, build os test") INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) - ENDIF() INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/src/util/inc) diff --git a/source/util/CMakeLists.txt b/source/util/CMakeLists.txt index 063988ea00c..2633bb3268d 100644 --- a/source/util/CMakeLists.txt +++ b/source/util/CMakeLists.txt @@ -1,5 +1,9 @@ configure_file("${CMAKE_CURRENT_SOURCE_DIR}/src/version.c.in" "${CMAKE_CURRENT_SOURCE_DIR}/src/version.c") aux_source_directory(src UTIL_SRC) +IF(COMPILER_SUPPORT_AVX2) + MESSAGE(STATUS "AVX2 instructions is ACTIVATED") + set_source_files_properties(src/tdecompressavx.c PROPERTIES COMPILE_FLAGS -mavx2) +ENDIF() add_library(util STATIC ${UTIL_SRC}) if(DEFINED GRANT_CFG_INCLUDE_DIR) @@ -14,7 +18,7 @@ else() endif(${ASSERT_NOT_CORE}) if(${BUILD_WITH_ANALYSIS}) - add_definitions(-DUSE_ANAL) + add_definitions(-DUSE_ANALYTICS) endif() target_include_directories( diff --git a/source/util/src/tanal.c b/source/util/src/tanalytics.c similarity index 97% rename from source/util/src/tanal.c rename to source/util/src/tanalytics.c index 92eee28ba87..99d91700a2e 100644 --- a/source/util/src/tanal.c +++ b/source/util/src/tanalytics.c @@ -14,18 +14,17 @@ */ #define _DEFAULT_SOURCE -#include "tanal.h" -#include "tmsg.h" +#include "tanalytics.h" #include "ttypes.h" #include "tutil.h" -#ifdef USE_ANAL +#ifdef USE_ANALYTICS #include #define ANAL_ALGO_SPLIT "," typedef struct { int64_t ver; - SHashObj *hash; // algoname:algotype -> SAnalUrl + SHashObj *hash; // algoname:algotype -> SAnalyticsUrl TdThreadMutex lock; } SAlgoMgmt; @@ -69,7 +68,7 @@ EAnalAlgoType taosAnalAlgoInt(const char *name) { return ANAL_ALGO_TYPE_END; } -int32_t taosAnalInit() { +int32_t taosAnalyticsInit() { if (curl_global_init(CURL_GLOBAL_ALL) != 0) { uError("failed to init curl"); return -1; @@ -94,14 +93,14 @@ int32_t taosAnalInit() { static void taosAnalFreeHash(SHashObj *hash) { void *pIter = taosHashIterate(hash, NULL); while (pIter != NULL) { - SAnalUrl *pUrl = (SAnalUrl *)pIter; + SAnalyticsUrl *pUrl = (SAnalyticsUrl *)pIter; taosMemoryFree(pUrl->url); pIter = taosHashIterate(hash, pIter); } taosHashCleanup(hash); } -void taosAnalCleanup() { +void taosAnalyticsCleanup() { curl_global_cleanup(); if (taosThreadMutexDestroy(&tsAlgos.lock) != 0) { uError("failed to destroy anal lock"); @@ -167,8 +166,10 @@ int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, char name[TSDB_ANAL_ALGO_KEY_LEN] = {0}; int32_t nameLen = 1 + tsnprintf(name, sizeof(name) - 1, "%d:%s", type, algoName); + char *unused = strntolower(name, name, nameLen); + if (taosThreadMutexLock(&tsAlgos.lock) == 0) { - SAnalUrl *pUrl = taosHashAcquire(tsAlgos.hash, name, nameLen); + SAnalyticsUrl *pUrl = taosHashAcquire(tsAlgos.hash, name, nameLen); if (pUrl != NULL) { tstrncpy(url, pUrl->url, urlLen); uDebug("algo:%s, type:%s, url:%s", algoName, taosAnalAlgoStr(type), url); @@ -178,6 +179,7 @@ int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, code = terrno; uError("algo:%s, type:%s, url not found", algoName, taosAnalAlgoStr(type)); } + if (taosThreadMutexUnlock(&tsAlgos.lock) != 0) { uError("failed to unlock hash"); return TSDB_CODE_OUT_OF_MEMORY; @@ -403,7 +405,7 @@ static int32_t tsosAnalJsonBufOpen(SAnalBuf *pBuf, int32_t numOfCols) { return terrno; } - pBuf->pCols = taosMemoryCalloc(numOfCols, sizeof(SAnalColBuf)); + pBuf->pCols = taosMemoryCalloc(numOfCols, sizeof(SAnalyticsColBuf)); if (pBuf->pCols == NULL) return TSDB_CODE_OUT_OF_MEMORY; pBuf->numOfCols = numOfCols; @@ -412,7 +414,7 @@ static int32_t tsosAnalJsonBufOpen(SAnalBuf *pBuf, int32_t numOfCols) { } for (int32_t i = 0; i < numOfCols; ++i) { - SAnalColBuf *pCol = &pBuf->pCols[i]; + SAnalyticsColBuf *pCol = &pBuf->pCols[i]; snprintf(pCol->fileName, sizeof(pCol->fileName), "%s-c%d", pBuf->fileName, i); pCol->filePtr = taosOpenFile(pCol->fileName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_WRITE_THROUGH); @@ -546,7 +548,7 @@ static int32_t taosAnalJsonBufWriteDataEnd(SAnalBuf *pBuf) { if (pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { for (int32_t i = 0; i < pBuf->numOfCols; ++i) { - SAnalColBuf *pCol = &pBuf->pCols[i]; + SAnalyticsColBuf *pCol = &pBuf->pCols[i]; code = taosFsyncFile(pCol->filePtr); if (code != 0) return code; @@ -588,7 +590,7 @@ int32_t taosAnalJsonBufClose(SAnalBuf *pBuf) { if (pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { for (int32_t i = 0; i < pBuf->numOfCols; ++i) { - SAnalColBuf *pCol = &pBuf->pCols[i]; + SAnalyticsColBuf *pCol = &pBuf->pCols[i]; if (pCol->filePtr != NULL) { code = taosFsyncFile(pCol->filePtr); if (code != 0) return code; @@ -610,7 +612,7 @@ void taosAnalBufDestroy(SAnalBuf *pBuf) { if (pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) { for (int32_t i = 0; i < pBuf->numOfCols; ++i) { - SAnalColBuf *pCol = &pBuf->pCols[i]; + SAnalyticsColBuf *pCol = &pBuf->pCols[i]; if (pCol->fileName[0] != 0) { if (pCol->filePtr != NULL) (void)taosCloseFile(&pCol->filePtr); if (taosRemoveFile(pCol->fileName) != 0) { @@ -726,8 +728,8 @@ static int32_t taosAnalBufGetCont(SAnalBuf *pBuf, char **ppCont, int64_t *pContL #else -int32_t taosAnalInit() { return 0; } -void taosAnalCleanup() {} +int32_t taosAnalyticsInit() { return 0; } +void taosAnalyticsCleanup() {} SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBuf) { return NULL; } int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen) { return 0; } diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 9c9ded693e0..6ffb5b635ac 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -471,12 +471,12 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha return nelements * word_length; } -#ifdef __AVX512F__ if (tsSIMDEnable && tsAVX512Enable && tsAVX512Supported) { - tsDecompressIntImpl_Hw(input, nelements, output, type); - return nelements * word_length; + int32_t cnt = tsDecompressIntImpl_Hw(input, nelements, output, type); + if (cnt >= 0) { + return cnt; + } } -#endif // Selector value: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 char bit_per_integer[] = {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 15, 20, 30, 60}; @@ -867,12 +867,12 @@ int32_t tsDecompressTimestampImp(const char *const input, const int32_t nelement memcpy(output, input + 1, nelements * longBytes); return nelements * longBytes; } else if (input[0] == 1) { // Decompress -#ifdef __AVX512VL__ if (tsSIMDEnable && tsAVX512Enable && tsAVX512Supported) { - tsDecompressTimestampAvx512(const char *const input, const int32_t nelements, char *const output, bool bigEndian); - return nelements * longBytes; + int32_t cnt = tsDecompressTimestampAvx512(input, nelements, output, false); + if (cnt >= 0) { + return cnt; + } } -#endif int64_t *ostream = (int64_t *)output; @@ -1103,13 +1103,14 @@ int32_t tsDecompressDoubleImp(const char *const input, int32_t ninput, const int return nelements * DOUBLE_BYTES; } -#ifdef __AVX2__ // use AVX2 implementation when allowed and the compression ratio is not high double compressRatio = 1.0 * nelements * DOUBLE_BYTES / ninput; if (tsSIMDEnable && tsAVX2Supported && compressRatio < 2) { - return tsDecompressDoubleImpAvx2(input + 1, nelements, output); + int32_t cnt = tsDecompressDoubleImpAvx2(input + 1, nelements, output); + if (cnt >= 0) { + return cnt; + } } -#endif // use implementation without SIMD instructions by default return tsDecompressDoubleImpHelper(input + 1, nelements, output); @@ -1257,13 +1258,14 @@ int32_t tsDecompressFloatImp(const char *const input, int32_t ninput, const int3 return nelements * FLOAT_BYTES; } -#ifdef __AVX2__ // use AVX2 implementation when allowed and the compression ratio is not high double compressRatio = 1.0 * nelements * FLOAT_BYTES / ninput; if (tsSIMDEnable && tsAVX2Supported && compressRatio < 2) { - return tsDecompressFloatImpAvx2(input + 1, nelements, output); + int32_t cnt = tsDecompressFloatImpAvx2(input + 1, nelements, output); + if (cnt >= 0) { + return cnt; + } } -#endif // use implementation without SIMD instructions by default return tsDecompressFloatImpHelper(input + 1, nelements, output); @@ -1617,7 +1619,10 @@ int32_t tsDecompressBigint(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int uTrace("encode:%s, compress:%s, level:%d, type:%s, l1:%d", compressL1Dict[l1].name, compressL2Dict[l2].name, \ lvl, tDataTypes[type].name, l1); \ int32_t len = compressL1Dict[l1].comprFn(pIn, nEle, pBuf, type); \ - int8_t alvl = tsGetCompressL2Level(l2, lvl); \ + if (len < 0) { \ + return len; \ + } \ + int8_t alvl = tsGetCompressL2Level(l2, lvl); \ return compressL2Dict[l2].comprFn(pBuf, len, pOut, nOut, type, alvl); \ } else { \ uTrace("dencode:%s, decompress:%s, level:%d, type:%s", compressL1Dict[l1].name, compressL2Dict[l2].name, lvl, \ @@ -1628,8 +1633,7 @@ int32_t tsDecompressBigint(void *pIn, int32_t nIn, int32_t nEle, void *pOut, int } \ } else if (l1 == L1_DISABLED && l2 != L2_DISABLED) { \ if (compress) { \ - uTrace("encode:%s, compress:%s, level:%d, type:%s", "disabled", compressL2Dict[l1].name, lvl, \ - tDataTypes[type].name); \ + uTrace("encode:%s, compress:%s, level:%d, type:%s", "disabled", "disable", lvl, tDataTypes[type].name); \ int8_t alvl = tsGetCompressL2Level(l2, lvl); \ return compressL2Dict[l2].comprFn(pIn, nIn, pOut, nOut, type, alvl); \ } else { \ @@ -1883,3 +1887,26 @@ int8_t tUpdateCompress(uint32_t oldCmpr, uint32_t newCmpr, uint8_t l2Disabled, u return update; } + +int32_t getWordLength(char type) { + int32_t wordLength = 0; + switch (type) { + case TSDB_DATA_TYPE_BIGINT: + wordLength = LONG_BYTES; + break; + case TSDB_DATA_TYPE_INT: + wordLength = INT_BYTES; + break; + case TSDB_DATA_TYPE_SMALLINT: + wordLength = SHORT_BYTES; + break; + case TSDB_DATA_TYPE_TINYINT: + wordLength = CHAR_BYTES; + break; + default: + uError("Invalid decompress integer type:%d", type); + return TSDB_CODE_INVALID_PARA; + } + + return wordLength; +} diff --git a/source/util/src/tdecompress.c b/source/util/src/tdecompressavx.c similarity index 95% rename from source/util/src/tdecompress.c rename to source/util/src/tdecompressavx.c index 81223d73111..143867b783e 100644 --- a/source/util/src/tdecompress.c +++ b/source/util/src/tdecompressavx.c @@ -13,35 +13,16 @@ * along with this program. If not, see . */ -#include "os.h" #include "tcompression.h" -#include "ttypes.h" - -int32_t getWordLength(char type) { - int32_t wordLength = 0; - switch (type) { - case TSDB_DATA_TYPE_BIGINT: - wordLength = LONG_BYTES; - break; - case TSDB_DATA_TYPE_INT: - wordLength = INT_BYTES; - break; - case TSDB_DATA_TYPE_SMALLINT: - wordLength = SHORT_BYTES; - break; - case TSDB_DATA_TYPE_TINYINT: - wordLength = CHAR_BYTES; - break; - default: - uError("Invalid decompress integer type:%d", type); - return TSDB_CODE_INVALID_PARA; - } - - return wordLength; -} #ifdef __AVX2__ +char tsSIMDEnable = 1; +#else +char tsSIMDEnable = 0; +#endif + int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, char *const output, const char type) { +#ifdef __AVX2__ int32_t word_length = getWordLength(type); // Selector value: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 @@ -75,12 +56,12 @@ int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, int32_t batch = 0; int32_t remain = 0; if (tsSIMDEnable && tsAVX512Supported && tsAVX512Enable) { -#if __AVX512F__ +#ifdef __AVX512F__ batch = num >> 3; remain = num & 0x07; #endif } else if (tsSIMDEnable && tsAVX2Supported) { -#if __AVX2__ +#ifdef __AVX2__ batch = num >> 2; remain = num & 0x03; #endif @@ -88,7 +69,7 @@ int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, if (selector == 0 || selector == 1) { if (tsSIMDEnable && tsAVX512Supported && tsAVX512Enable) { -#if __AVX512F__ +#ifdef __AVX512F__ for (int32_t i = 0; i < batch; ++i) { __m512i prev = _mm512_set1_epi64(prevValue); _mm512_storeu_si512((__m512i *)&p[_pos], prev); @@ -117,7 +98,7 @@ int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, } } else { if (tsSIMDEnable && tsAVX512Supported && tsAVX512Enable) { -#if __AVX512F__ +#ifdef __AVX512F__ __m512i sum_mask1 = _mm512_set_epi64(6, 6, 4, 4, 2, 2, 0, 0); __m512i sum_mask2 = _mm512_set_epi64(5, 5, 5, 5, 1, 1, 1, 1); __m512i sum_mask3 = _mm512_set_epi64(3, 3, 3, 3, 3, 3, 3, 3); @@ -310,10 +291,13 @@ int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, } return nelements * word_length; +#else + uError("unable run %s without avx2 instructions", __func__); + return -1; +#endif } -#define M256_BYTES sizeof(__m256i) - +#ifdef __AVX2__ FORCE_INLINE __m256i decodeFloatAvx2(const char *data, const char *flag) { __m256i dataVec = _mm256_load_si256((__m256i *)data); __m256i flagVec = _mm256_load_si256((__m256i *)flag); @@ -332,7 +316,27 @@ FORCE_INLINE __m256i decodeFloatAvx2(const char *data, const char *flag) { return diffVec; } +FORCE_INLINE __m256i decodeDoubleAvx2(const char *data, const char *flag) { + __m256i dataVec = _mm256_load_si256((__m256i *)data); + __m256i flagVec = _mm256_load_si256((__m256i *)flag); + __m256i k7 = _mm256_set1_epi64x(7); + __m256i lopart = _mm256_set_epi64x(0, -1, 0, -1); + __m256i hipart = _mm256_set_epi64x(-1, 0, -1, 0); + __m256i trTail = _mm256_cmpgt_epi64(flagVec, k7); + __m256i trHead = _mm256_andnot_si256(trTail, _mm256_set1_epi64x(-1)); + __m256i shiftVec = _mm256_slli_epi64(_mm256_sub_epi64(k7, _mm256_and_si256(flagVec, k7)), 3); + __m256i maskVec = hipart; + __m256i diffVec = _mm256_sllv_epi64(dataVec, _mm256_and_si256(shiftVec, maskVec)); + maskVec = _mm256_or_si256(trHead, lopart); + diffVec = _mm256_srlv_epi64(diffVec, _mm256_and_si256(shiftVec, maskVec)); + maskVec = _mm256_and_si256(trTail, lopart); + diffVec = _mm256_sllv_epi64(diffVec, _mm256_and_si256(shiftVec, maskVec)); + return diffVec; +} +#endif + int32_t tsDecompressFloatImpAvx2(const char *input, int32_t nelements, char *output) { +#ifdef __AVX2__ // Allocate memory-aligned buffer char buf[M256_BYTES * 3]; memset(buf, 0, sizeof(buf)); @@ -343,7 +347,7 @@ int32_t tsDecompressFloatImpAvx2(const char *input, int32_t nelements, char *out // Load data into the buffer for batch processing int32_t batchSize = M256_BYTES / FLOAT_BYTES; - int32_t idx = 0; + int32_t idx = 0; uint32_t cur = 0; for (int32_t i = 0; i < nelements; i += 2) { if (idx == batchSize) { @@ -380,27 +384,14 @@ int32_t tsDecompressFloatImpAvx2(const char *input, int32_t nelements, char *out out += idx * FLOAT_BYTES; } return (int32_t)(out - output); -} - -FORCE_INLINE __m256i decodeDoubleAvx2(const char *data, const char *flag) { - __m256i dataVec = _mm256_load_si256((__m256i *)data); - __m256i flagVec = _mm256_load_si256((__m256i *)flag); - __m256i k7 = _mm256_set1_epi64x(7); - __m256i lopart = _mm256_set_epi64x(0, -1, 0, -1); - __m256i hipart = _mm256_set_epi64x(-1, 0, -1, 0); - __m256i trTail = _mm256_cmpgt_epi64(flagVec, k7); - __m256i trHead = _mm256_andnot_si256(trTail, _mm256_set1_epi64x(-1)); - __m256i shiftVec = _mm256_slli_epi64(_mm256_sub_epi64(k7, _mm256_and_si256(flagVec, k7)), 3); - __m256i maskVec = hipart; - __m256i diffVec = _mm256_sllv_epi64(dataVec, _mm256_and_si256(shiftVec, maskVec)); - maskVec = _mm256_or_si256(trHead, lopart); - diffVec = _mm256_srlv_epi64(diffVec, _mm256_and_si256(shiftVec, maskVec)); - maskVec = _mm256_and_si256(trTail, lopart); - diffVec = _mm256_sllv_epi64(diffVec, _mm256_and_si256(shiftVec, maskVec)); - return diffVec; +#else + uError("unable run %s without avx2 instructions", __func__); + return -1; +#endif } int32_t tsDecompressDoubleImpAvx2(const char *input, const int32_t nelements, char *const output) { +#ifdef __AVX2__ // Allocate memory-aligned buffer char buf[M256_BYTES * 3]; memset(buf, 0, sizeof(buf)); @@ -448,12 +439,15 @@ int32_t tsDecompressDoubleImpAvx2(const char *input, const int32_t nelements, ch out += idx * DOUBLE_BYTES; } return (int32_t)(out - output); -} +#else + uError("unable run %s without avx2 instructions", __func__); + return -1; #endif +} -#if __AVX512VL__ -// decode two timestamps in one loop. -void tsDecompressTimestampAvx2(const char *const input, const int32_t nelements, char *const output, bool bigEndian) { +int32_t tsDecompressTimestampAvx2(const char *const input, const int32_t nelements, char *const output, + bool bigEndian) { +#ifdef __AVX512VL__ int64_t *ostream = (int64_t *)output; int32_t ipos = 1, opos = 0; @@ -588,11 +582,16 @@ void tsDecompressTimestampAvx2(const char *const input, const int32_t nelements, ostream[opos++] = prevVal[1] + prevDeltaX; } } - return; + return opos; +#else + uError("unable run %s without avx512 instructions", __func__); + return -1; +#endif } -void tsDecompressTimestampAvx512(const char *const input, const int32_t nelements, char *const output, - bool UNUSED_PARAM(bigEndian)) { +int32_t tsDecompressTimestampAvx512(const char *const input, const int32_t nelements, char *const output, + bool UNUSED_PARAM(bigEndian)) { +#ifdef __AVX512VL__ int64_t *ostream = (int64_t *)output; int32_t ipos = 1, opos = 0; @@ -700,6 +699,9 @@ void tsDecompressTimestampAvx512(const char *const input, const int32_t nelement } } - return; -} + return opos; +#else + uError("unable run %s without avx512 instructions", __func__); + return -1; #endif +} diff --git a/source/util/src/terror.c b/source/util/src/terror.c index d72400089aa..4b59fa42fb9 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -166,6 +166,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_ENCODE_PARAM_NULL, "Not found compress pa TAOS_DEFINE_ERROR(TSDB_CODE_TSC_COMPRESS_PARAM_ERROR, "Invalid compress param") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_COMPRESS_LEVEL_ERROR, "Invalid compress level param") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FAIL_GENERATE_JSON, "failed to generate JSON") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR, "bind number out of range or not match") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INTERNAL_ERROR, "Internal error") @@ -270,6 +271,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_IN_CREATING, "Database in creating TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SYS_TABLENAME, "Invalid system table name") TAOS_DEFINE_ERROR(TSDB_CODE_MND_ENCRYPT_NOT_ALLOW_CHANGE, "Encryption is not allowed to be changed after database is created") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_WAL_LEVEL, "Invalid option, wal_level 0 should be used with replica 1") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DNODE_LIST_FMT, "Invalid dnode list format") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_LIST_REPEAT, "Duplicate items in the dnode list") // mnode-node TAOS_DEFINE_ERROR(TSDB_CODE_MND_MNODE_ALREADY_EXIST, "Mnode already exists") @@ -360,8 +363,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_MANY_ALGO, "Anode too many algori TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_LONG_ALGO_NAME, "Anode too long algorithm name") TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_MANY_ALGO_TYPE, "Anode too many algorithm type") -TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_URL_RSP_IS_NULL, "Analysis url response is NULL") -TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_URL_CANT_ACCESS, "Analysis url can't access") +TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_URL_RSP_IS_NULL, "Analysis service response is NULL") +TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_URL_CANT_ACCESS, "Analysis service can't access") TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ALGO_NOT_FOUND, "Analysis algorithm not found") TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ALGO_NOT_LOAD, "Analysis algorithm not loaded") TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_BUF_INVALID_TYPE, "Analysis invalid buffer type") @@ -623,6 +626,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SCH_INTERNAL_ERROR, "scheduler internal er TAOS_DEFINE_ERROR(TSDB_CODE_SCH_TIMEOUT_ERROR, "Task timeout") TAOS_DEFINE_ERROR(TSDB_CODE_SCH_JOB_IS_DROPPING, "Job is dropping") TAOS_DEFINE_ERROR(TSDB_CODE_SCH_JOB_NOT_EXISTS, "Job no longer exist") +TAOS_DEFINE_ERROR(TSDB_CODE_SCH_DATA_SRC_EP_MISS, "No valid epSet for data source node") // parser TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYNTAX_ERROR, "syntax error near") @@ -766,6 +770,11 @@ TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_SETUP_ERROR, "Function set up fail TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_INVALID_RES_LENGTH, "Function result exceed max length") TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_HISTOGRAM_ERROR, "Function failed to calculate histogram") TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_PERCENTILE_ERROR, "Function failed to calculate percentile") +TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_RANGE, "Invalid function para range") +TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_PRIMTS, "Function parameter should be primary timestamp") +TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_PK, "Function parameter should be primary key") +TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_HAS_COL, "Function parameter should have column") + //udf TAOS_DEFINE_ERROR(TSDB_CODE_UDF_STOPPING, "udf is stopping") diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 3ca148a6250..76d01395216 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -21,10 +21,12 @@ #include "tjson.h" #include "tutil.h" -#define LOG_MAX_LINE_SIZE (10024) -#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) -#define LOG_MAX_LINE_DUMP_SIZE (1024 * 1024) -#define LOG_MAX_LINE_DUMP_BUFFER_SIZE (LOG_MAX_LINE_DUMP_SIZE + 128) +#define LOG_MAX_LINE_SIZE (10024) +#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) +#define LOG_MAX_STACK_LINE_SIZE (512) +#define LOG_MAX_STACK_LINE_BUFFER_SIZE (LOG_MAX_STACK_LINE_SIZE + 3) +#define LOG_MAX_LINE_DUMP_SIZE (1024 * 1024) +#define LOG_MAX_LINE_DUMP_BUFFER_SIZE (LOG_MAX_LINE_DUMP_SIZE + 128) #define LOG_FILE_DAY_LEN 64 @@ -126,7 +128,7 @@ int32_t idxDebugFlag = 131; int32_t sndDebugFlag = 131; int32_t simDebugFlag = 131; -int32_t tqClientDebug = 0; +int32_t tqClientDebugFlag = 131; int64_t dbgEmptyW = 0; int64_t dbgWN = 0; @@ -669,16 +671,40 @@ static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *b } } -void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...) { - if (!(dflag & DEBUG_FILE) && !(dflag & DEBUG_SCREEN)) return; +/* + use taosPrintLogImpl_useStackBuffer to avoid stack overflow - char buffer[LOG_MAX_LINE_BUFFER_SIZE]; +*/ +static int8_t taosPrintLogImplUseStackBuffer(const char *flags, int32_t level, int32_t dflag, const char *format, + va_list args) { + char buffer[LOG_MAX_STACK_LINE_BUFFER_SIZE]; int32_t len = taosBuildLogHead(buffer, flags); - va_list argpointer; - va_start(argpointer, format); - int32_t writeLen = len + vsnprintf(buffer + len, LOG_MAX_LINE_BUFFER_SIZE - len, format, argpointer); - va_end(argpointer); + int32_t writeLen = len + vsnprintf(buffer + len, LOG_MAX_STACK_LINE_BUFFER_SIZE - len - 1, format, args); + if (writeLen > LOG_MAX_STACK_LINE_SIZE) { + return 1; + } + + buffer[writeLen++] = '\n'; + buffer[writeLen] = 0; + + taosPrintLogImp(level, dflag, buffer, writeLen); + + if (tsLogFp && level <= DEBUG_INFO) { + buffer[writeLen - 1] = 0; + (*tsLogFp)(taosGetTimestampMs(), level, buffer + len); + } + return 0; +} +static int8_t taosPrintLogImplUseHeapBuffer(const char *flags, int32_t level, int32_t dflag, const char *format, + va_list args) { + char *buffer = taosMemoryCalloc(1, LOG_MAX_LINE_BUFFER_SIZE + 1); + if (buffer == NULL) { + return 1; + } + int32_t len = taosBuildLogHead(buffer, flags); + + int32_t writeLen = len + vsnprintf(buffer + len, LOG_MAX_LINE_BUFFER_SIZE - len - 1, format, args); if (writeLen > LOG_MAX_LINE_SIZE) writeLen = LOG_MAX_LINE_SIZE; buffer[writeLen++] = '\n'; @@ -690,6 +716,22 @@ void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *f buffer[writeLen - 1] = 0; (*tsLogFp)(taosGetTimestampMs(), level, buffer + len); } + taosMemoryFree(buffer); + return 0; +} +void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...) { + if (!(dflag & DEBUG_FILE) && !(dflag & DEBUG_SCREEN)) return; + + va_list argpointer, argpointer_copy; + va_start(argpointer, format); + va_copy(argpointer_copy, argpointer); + + if (taosPrintLogImplUseStackBuffer(flags, level, dflag, format, argpointer) == 0) { + } else { + TAOS_UNUSED(taosPrintLogImplUseHeapBuffer(flags, level, dflag, format, argpointer_copy)); + } + va_end(argpointer_copy); + va_end(argpointer); } void taosPrintLongString(const char *flags, int32_t level, int32_t dflag, const char *format, ...) { diff --git a/source/util/src/tlrucache.c b/source/util/src/tlrucache.c index 69832cd46c6..43a220af50a 100644 --- a/source/util/src/tlrucache.c +++ b/source/util/src/tlrucache.c @@ -14,12 +14,12 @@ */ #define _DEFAULT_SOURCE +#include "tlrucache.h" #include "os.h" #include "taoserror.h" #include "tarray.h" #include "tdef.h" #include "tlog.h" -#include "tlrucache.h" #include "tutil.h" typedef struct SLRUEntry SLRUEntry; @@ -110,7 +110,7 @@ struct SLRUEntryTable { }; static int taosLRUEntryTableInit(SLRUEntryTable *table, int maxUpperHashBits) { - table->lengthBits = 4; + table->lengthBits = 16; table->list = taosMemoryCalloc(1 << table->lengthBits, sizeof(SLRUEntry *)); if (!table->list) { TAOS_RETURN(terrno); @@ -371,24 +371,35 @@ static void taosLRUCacheShardCleanup(SLRUCacheShard *shard) { static LRUStatus taosLRUCacheShardInsertEntry(SLRUCacheShard *shard, SLRUEntry *e, LRUHandle **handle, bool freeOnFail) { - LRUStatus status = TAOS_LRU_STATUS_OK; - SArray *lastReferenceList = taosArrayInit(16, POINTER_BYTES); - if (!lastReferenceList) { - taosLRUEntryFree(e); - return TAOS_LRU_STATUS_FAIL; + LRUStatus status = TAOS_LRU_STATUS_OK; + SLRUEntry *toFree = NULL; + SArray *lastReferenceList = NULL; + if (shard->usage + e->totalCharge > shard->capacity) { + lastReferenceList = taosArrayInit(16, POINTER_BYTES); + if (!lastReferenceList) { + taosLRUEntryFree(e); + return TAOS_LRU_STATUS_FAIL; + } } (void)taosThreadMutexLock(&shard->mutex); - taosLRUCacheShardEvictLRU(shard, e->totalCharge, lastReferenceList); + if (shard->usage + e->totalCharge > shard->capacity && shard->lru.next != &shard->lru) { + if (!lastReferenceList) { + lastReferenceList = taosArrayInit(16, POINTER_BYTES); + if (!lastReferenceList) { + taosLRUEntryFree(e); + (void)taosThreadMutexUnlock(&shard->mutex); + return TAOS_LRU_STATUS_FAIL; + } + } + taosLRUCacheShardEvictLRU(shard, e->totalCharge, lastReferenceList); + } if (shard->usage + e->totalCharge > shard->capacity && (shard->strictCapacity || handle == NULL)) { TAOS_LRU_ENTRY_SET_IN_CACHE(e, false); if (handle == NULL) { - if (!taosArrayPush(lastReferenceList, &e)) { - taosLRUEntryFree(e); - goto _exit; - } + toFree = e; } else { if (freeOnFail) { taosLRUEntryFree(e); @@ -413,11 +424,7 @@ static LRUStatus taosLRUCacheShardInsertEntry(SLRUCacheShard *shard, SLRUEntry * taosLRUCacheShardLRURemove(shard, old); shard->usage -= old->totalCharge; - if (!taosArrayPush(lastReferenceList, &old)) { - taosLRUEntryFree(e); - taosLRUEntryFree(old); - goto _exit; - } + toFree = old; } } if (handle == NULL) { @@ -434,6 +441,10 @@ static LRUStatus taosLRUCacheShardInsertEntry(SLRUCacheShard *shard, SLRUEntry * _exit: (void)taosThreadMutexUnlock(&shard->mutex); + if (toFree) { + taosLRUEntryFree(toFree); + } + for (int i = 0; i < taosArrayGetSize(lastReferenceList); ++i) { SLRUEntry *entry = taosArrayGetP(lastReferenceList, i); @@ -733,7 +744,8 @@ void taosLRUCacheCleanup(SLRUCache *cache) { } LRUStatus taosLRUCacheInsert(SLRUCache *cache, const void *key, size_t keyLen, void *value, size_t charge, - _taos_lru_deleter_t deleter, _taos_lru_overwriter_t overwriter, LRUHandle **handle, LRUPriority priority, void *ud) { + _taos_lru_deleter_t deleter, _taos_lru_overwriter_t overwriter, LRUHandle **handle, + LRUPriority priority, void *ud) { uint32_t hash = TAOS_LRU_CACHE_SHARD_HASH32(key, keyLen); uint32_t shardIndex = hash & cache->shardedCache.shardMask; diff --git a/source/util/src/version.c.in b/source/util/src/version.c.in index c91b46e18d1..c5123e6f214 100644 --- a/source/util/src/version.c.in +++ b/source/util/src/version.c.in @@ -1,7 +1,7 @@ -char version[64] = "${TD_VER_NUMBER}"; -char compatible_version[12] = "${TD_VER_COMPATIBLE}"; -char gitinfo[48] = "${TD_VER_GIT}"; -char gitinfoOfInternal[48] = "${TD_VER_GIT_INTERNAL}"; -char buildinfo[64] = "${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}"; +char td_version[64] = "${TD_VER_NUMBER}"; +char td_compatible_version[12] = "${TD_VER_COMPATIBLE}"; +char td_gitinfo[48] = "${TD_VER_GIT}"; +char td_gitinfoOfInternal[48] = "${TD_VER_GIT_INTERNAL}"; +char td_buildinfo[64] = "${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}"; void libtaos_${TD_LIB_VER_NUMBER}_${TD_VER_OSTYPE}_${TD_VER_CPUTYPE}_${TD_VER_VERTYPE}() {}; \ No newline at end of file diff --git a/source/util/test/CMakeLists.txt b/source/util/test/CMakeLists.txt index 0ddc1dd6537..42473a03a60 100644 --- a/source/util/test/CMakeLists.txt +++ b/source/util/test/CMakeLists.txt @@ -5,7 +5,7 @@ FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64) FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64) -IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) +IF(HEADER_GTEST_INCLUDE_DIR AND(LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) MESSAGE(STATUS "gTest library found, build unit test") INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) @@ -20,18 +20,16 @@ IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) LIST(APPEND SOURCE_LIST ${CMAKE_CURRENT_SOURCE_DIR}/hashTest.cpp) ADD_EXECUTABLE(hashTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES(hashTest util common os gtest pthread) - + LIST(APPEND BIN_SRC ${CMAKE_CURRENT_SOURCE_DIR}/trefTest.c) ADD_EXECUTABLE(trefTest ${BIN_SRC}) TARGET_LINK_LIBRARIES(trefTest common util) - ENDIF() -#IF (TD_LINUX) -# ADD_EXECUTABLE(trefTest ./trefTest.c) -# TARGET_LINK_LIBRARIES(trefTest util common) -#ENDIF () - +# IF (TD_LINUX) +# ADD_EXECUTABLE(trefTest ./trefTest.c) +# TARGET_LINK_LIBRARIES(trefTest util common) +# ENDIF () INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/src/util/inc) INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/include/common) @@ -46,8 +44,8 @@ add_test( # # freelistTest # add_executable(freelistTest "") # target_sources(freelistTest -# PRIVATE -# "freelistTest.cpp" +# PRIVATE +# "freelistTest.cpp" # ) # target_link_libraries(freelistTest os util gtest gtest_main) @@ -57,7 +55,7 @@ add_test( # cfgTest add_executable(cfgTest "cfgTest.cpp") -target_link_libraries(cfgTest os util gtest_main) +target_link_libraries(cfgTest os util gtest_main) add_test( NAME cfgTest COMMAND cfgTest @@ -65,7 +63,7 @@ add_test( # bloomFilterTest add_executable(bloomFilterTest "bloomFilterTest.cpp") -target_link_libraries(bloomFilterTest os util gtest_main) +target_link_libraries(bloomFilterTest os util gtest_main) add_test( NAME bloomFilterTest COMMAND bloomFilterTest @@ -73,7 +71,7 @@ add_test( # taosbsearchTest add_executable(taosbsearchTest "taosbsearchTest.cpp") -target_link_libraries(taosbsearchTest os util gtest_main) +target_link_libraries(taosbsearchTest os util gtest_main) add_test( NAME taosbsearchTest COMMAND taosbsearchTest @@ -81,7 +79,7 @@ add_test( # trbtreeTest add_executable(rbtreeTest "trbtreeTest.cpp") -target_link_libraries(rbtreeTest os util gtest_main) +target_link_libraries(rbtreeTest os util gtest_main) add_test( NAME rbtreeTest COMMAND rbtreeTest @@ -120,12 +118,19 @@ add_test( ) add_executable(regexTest "regexTest.cpp") -target_link_libraries(regexTest os util gtest_main ) +target_link_libraries(regexTest os util gtest_main) add_test( NAME regexTest COMMAND regexTest ) +add_executable(logTest "log.cpp") +target_link_libraries(logTest os util common gtest_main) +add_test( + NAME logTest + COMMAND logTest +) + add_executable(decompressTest "decompressTest.cpp") target_link_libraries(decompressTest os util common gtest_main) add_test( @@ -133,7 +138,7 @@ add_test( COMMAND decompressTest ) -if (${TD_LINUX}) +if(${TD_LINUX}) # terrorTest add_executable(terrorTest "terrorTest.cpp") target_link_libraries(terrorTest os util common gtest_main) @@ -156,4 +161,4 @@ if (${TD_LINUX}) COMMAND memPoolTest ) -endif () +endif() diff --git a/source/util/test/errorCodeTable.ini b/source/util/test/errorCodeTable.ini index 33c9d77c5e7..e837954a0bf 100644 --- a/source/util/test/errorCodeTable.ini +++ b/source/util/test/errorCodeTable.ini @@ -97,6 +97,7 @@ TSDB_CODE_TSC_ENCODE_PARAM_ERROR = 0x80000231 TSDB_CODE_TSC_ENCODE_PARAM_NULL = 0x80000232 TSDB_CODE_TSC_COMPRESS_PARAM_ERROR = 0x80000233 TSDB_CODE_TSC_COMPRESS_LEVEL_ERROR = 0x80000234 +TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR = 0x80000236 TSDB_CODE_TSC_INTERNAL_ERROR = 0x800002FF TSDB_CODE_MND_REQ_REJECTED = 0x80000300 TSDB_CODE_MND_NO_RIGHTS = 0x80000303 diff --git a/source/util/test/log.cpp b/source/util/test/log.cpp new file mode 100644 index 00000000000..ba32d2d6394 --- /dev/null +++ b/source/util/test/log.cpp @@ -0,0 +1,46 @@ +#include +#include +#include +#include +#include +#include + +using namespace std; + + +TEST(log, check_log_refactor) { + const char *logDir = "/tmp"; + const char *defaultLogFileNamePrefix = "taoslog"; + const int32_t maxLogFileNum = 10000; + tsAsyncLog = 0; + // idxDebugFlag = 143; + strcpy(tsLogDir, (char *)logDir); + taosInitLog(tsLogDir, 10, false); + tsAsyncLog = 0; + uDebugFlag = 143; + + std::string str; + str.push_back('a'); + + for (int i = 0; i < 10000; i += 2) { + str.push_back('a'); + uError("write to file %s", str.c_str()); + } + str.clear(); + for (int i = 0; i < 10000; i += 2) { + str.push_back('a'); + uDebug("write to file %s", str.c_str()); + } + + for (int i = 0; i < 10000; i += 2) { + str.push_back('a'); + uInfo("write to file %s", str.c_str()); + } + str.clear(); + + for (int i = 0; i < 10000; i += 2) { + str.push_back('a'); + uTrace("write to file %s", str.c_str()); + } + taosCloseLog(); +} diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py index b816095817f..a82bf4c94f2 100644 --- a/tests/army/frame/common.py +++ b/tests/army/frame/common.py @@ -803,11 +803,14 @@ def getOneRow(self, location, containElm): else: tdLog.exit(f"getOneRow out of range: row_index={location} row_count={self.query_row}") - def killProcessor(self, processorName): + def kill_signal_process(self, signal=15, processor_name: str = "taosd"): if (platform.system().lower() == 'windows'): - os.system("TASKKILL /F /IM %s.exe"%processorName) + os.system(f"TASKKILL /F /IM {processor_name}.exe") else: - os.system("unset LD_PRELOAD; pkill %s " % processorName) + command = f"unset LD_PRELOAD; sudo pkill -f -{signal} '{processor_name}'" + tdLog.debug(f"command: {command}") + os.system(command) + def gen_tag_col_str(self, gen_type, data_type, count): """ diff --git a/tests/army/frame/sql.py b/tests/army/frame/sql.py index 8b99219524a..b4bc31b6a87 100644 --- a/tests/army/frame/sql.py +++ b/tests/army/frame/sql.py @@ -73,7 +73,7 @@ def prepare(self, dbname="db", drop=True, **kwargs): for k, v in kwargs.items(): s += f" {k} {v}" if "duration" not in kwargs: - s += " duration 300" + s += " duration 100" self.cursor.execute(s) s = f'use {dbname}' self.cursor.execute(s) diff --git a/tests/army/query/function/ans/interp.csv b/tests/army/query/function/ans/interp.csv new file mode 100644 index 00000000000..e1ba236aa11 --- /dev/null +++ b/tests/army/query/function/ans/interp.csv @@ -0,0 +1,368 @@ + +taos> select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (null) order by irowts; + irowts | table_name | isfilled | intp_c1 | +================================================================================ + 2020-02-01 00:00:04.000 | td32727 | true | NULL | + 2020-02-01 00:00:05.000 | td32727 | false | 5 | + 2020-02-01 00:00:06.000 | td32727 | true | NULL | + 2020-02-01 00:00:07.000 | td32727 | true | NULL | + 2020-02-01 00:00:08.000 | td32727 | true | NULL | + 2020-02-01 00:00:09.000 | td32727 | true | NULL | + 2020-02-01 00:00:10.000 | td32727 | false | 10 | + 2020-02-01 00:00:11.000 | td32727 | true | NULL | + 2020-02-01 00:00:12.000 | td32727 | true | NULL | + 2020-02-01 00:00:13.000 | td32727 | true | NULL | + 2020-02-01 00:00:14.000 | td32727 | true | NULL | + 2020-02-01 00:00:15.000 | td32727 | false | 15 | + 2020-02-01 00:00:16.000 | td32727 | true | NULL | + +taos> select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (next) order by irowts; + irowts | table_name | isfilled | intp_c1 | +================================================================================ + 2020-02-01 00:00:04.000 | td32727 | true | 5 | + 2020-02-01 00:00:05.000 | td32727 | false | 5 | + 2020-02-01 00:00:06.000 | td32727 | true | 10 | + 2020-02-01 00:00:07.000 | td32727 | true | 10 | + 2020-02-01 00:00:08.000 | td32727 | true | 10 | + 2020-02-01 00:00:09.000 | td32727 | true | 10 | + 2020-02-01 00:00:10.000 | td32727 | false | 10 | + 2020-02-01 00:00:11.000 | td32727 | true | 15 | + 2020-02-01 00:00:12.000 | td32727 | true | 15 | + 2020-02-01 00:00:13.000 | td32727 | true | 15 | + 2020-02-01 00:00:14.000 | td32727 | true | 15 | + 2020-02-01 00:00:15.000 | td32727 | false | 15 | + +taos> select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (prev) order by irowts; + irowts | table_name | isfilled | intp_c1 | +================================================================================ + 2020-02-01 00:00:05.000 | td32727 | false | 5 | + 2020-02-01 00:00:06.000 | td32727 | true | 5 | + 2020-02-01 00:00:07.000 | td32727 | true | 5 | + 2020-02-01 00:00:08.000 | td32727 | true | 5 | + 2020-02-01 00:00:09.000 | td32727 | true | 5 | + 2020-02-01 00:00:10.000 | td32727 | false | 10 | + 2020-02-01 00:00:11.000 | td32727 | true | 10 | + 2020-02-01 00:00:12.000 | td32727 | true | 10 | + 2020-02-01 00:00:13.000 | td32727 | true | 10 | + 2020-02-01 00:00:14.000 | td32727 | true | 10 | + 2020-02-01 00:00:15.000 | td32727 | false | 15 | + 2020-02-01 00:00:16.000 | td32727 | true | 15 | + +taos> select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (linear) order by irowts; + irowts | table_name | isfilled | intp_c1 | +================================================================================ + 2020-02-01 00:00:05.000 | td32727 | false | 5 | + 2020-02-01 00:00:06.000 | td32727 | true | 6 | + 2020-02-01 00:00:07.000 | td32727 | true | 7 | + 2020-02-01 00:00:08.000 | td32727 | true | 8 | + 2020-02-01 00:00:09.000 | td32727 | true | 9 | + 2020-02-01 00:00:10.000 | td32727 | false | 10 | + 2020-02-01 00:00:11.000 | td32727 | true | 11 | + 2020-02-01 00:00:12.000 | td32727 | true | 12 | + 2020-02-01 00:00:13.000 | td32727 | true | 13 | + 2020-02-01 00:00:14.000 | td32727 | true | 14 | + 2020-02-01 00:00:15.000 | td32727 | false | 15 | + +taos> select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (value, 1) order by irowts; + irowts | table_name | isfilled | intp_c1 | +================================================================================ + 2020-02-01 00:00:04.000 | td32727 | true | 1 | + 2020-02-01 00:00:05.000 | td32727 | false | 5 | + 2020-02-01 00:00:06.000 | td32727 | true | 1 | + 2020-02-01 00:00:07.000 | td32727 | true | 1 | + 2020-02-01 00:00:08.000 | td32727 | true | 1 | + 2020-02-01 00:00:09.000 | td32727 | true | 1 | + 2020-02-01 00:00:10.000 | td32727 | false | 10 | + 2020-02-01 00:00:11.000 | td32727 | true | 1 | + 2020-02-01 00:00:12.000 | td32727 | true | 1 | + 2020-02-01 00:00:13.000 | td32727 | true | 1 | + 2020-02-01 00:00:14.000 | td32727 | true | 1 | + 2020-02-01 00:00:15.000 | td32727 | false | 15 | + 2020-02-01 00:00:16.000 | td32727 | true | 1 | + +taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (null) order by irowts, c2; + irowts | table_name | c_c2 | isfilled | intp_c1 | +============================================================================================== + 2020-02-01 00:00:04.000 | td32727 | 5 | true | NULL | + 2020-02-01 00:00:04.000 | td32727 | 10 | true | NULL | + 2020-02-01 00:00:04.000 | td32727 | 15 | true | NULL | + 2020-02-01 00:00:05.000 | td32727 | 5 | false | 5 | + 2020-02-01 00:00:05.000 | td32727 | 10 | true | NULL | + 2020-02-01 00:00:05.000 | td32727 | 15 | true | NULL | + 2020-02-01 00:00:06.000 | td32727 | 5 | true | NULL | + 2020-02-01 00:00:06.000 | td32727 | 10 | true | NULL | + 2020-02-01 00:00:06.000 | td32727 | 15 | true | NULL | + 2020-02-01 00:00:07.000 | td32727 | 5 | true | NULL | + 2020-02-01 00:00:07.000 | td32727 | 10 | true | NULL | + 2020-02-01 00:00:07.000 | td32727 | 15 | true | NULL | + 2020-02-01 00:00:08.000 | td32727 | 5 | true | NULL | + 2020-02-01 00:00:08.000 | td32727 | 10 | true | NULL | + 2020-02-01 00:00:08.000 | td32727 | 15 | true | NULL | + 2020-02-01 00:00:09.000 | td32727 | 5 | true | NULL | + 2020-02-01 00:00:09.000 | td32727 | 10 | true | NULL | + 2020-02-01 00:00:09.000 | td32727 | 15 | true | NULL | + 2020-02-01 00:00:10.000 | td32727 | 5 | true | NULL | + 2020-02-01 00:00:10.000 | td32727 | 10 | false | 10 | + 2020-02-01 00:00:10.000 | td32727 | 15 | true | NULL | + 2020-02-01 00:00:11.000 | td32727 | 5 | true | NULL | + 2020-02-01 00:00:11.000 | td32727 | 10 | true | NULL | + 2020-02-01 00:00:11.000 | td32727 | 15 | true | NULL | + 2020-02-01 00:00:12.000 | td32727 | 5 | true | NULL | + 2020-02-01 00:00:12.000 | td32727 | 10 | true | NULL | + 2020-02-01 00:00:12.000 | td32727 | 15 | true | NULL | + 2020-02-01 00:00:13.000 | td32727 | 5 | true | NULL | + 2020-02-01 00:00:13.000 | td32727 | 10 | true | NULL | + 2020-02-01 00:00:13.000 | td32727 | 15 | true | NULL | + 2020-02-01 00:00:14.000 | td32727 | 5 | true | NULL | + 2020-02-01 00:00:14.000 | td32727 | 10 | true | NULL | + 2020-02-01 00:00:14.000 | td32727 | 15 | true | NULL | + 2020-02-01 00:00:15.000 | td32727 | 5 | true | NULL | + 2020-02-01 00:00:15.000 | td32727 | 10 | true | NULL | + 2020-02-01 00:00:15.000 | td32727 | 15 | false | 15 | + 2020-02-01 00:00:16.000 | td32727 | 5 | true | NULL | + 2020-02-01 00:00:16.000 | td32727 | 10 | true | NULL | + 2020-02-01 00:00:16.000 | td32727 | 15 | true | NULL | + +taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (next) order by irowts, c2; + irowts | table_name | c_c2 | isfilled | intp_c1 | +============================================================================================== + 2020-02-01 00:00:04.000 | td32727 | 5 | true | 5 | + 2020-02-01 00:00:04.000 | td32727 | 10 | true | 10 | + 2020-02-01 00:00:04.000 | td32727 | 15 | true | 15 | + 2020-02-01 00:00:05.000 | td32727 | 5 | false | 5 | + 2020-02-01 00:00:05.000 | td32727 | 10 | true | 10 | + 2020-02-01 00:00:05.000 | td32727 | 15 | true | 15 | + 2020-02-01 00:00:06.000 | td32727 | 10 | true | 10 | + 2020-02-01 00:00:06.000 | td32727 | 15 | true | 15 | + 2020-02-01 00:00:07.000 | td32727 | 10 | true | 10 | + 2020-02-01 00:00:07.000 | td32727 | 15 | true | 15 | + 2020-02-01 00:00:08.000 | td32727 | 10 | true | 10 | + 2020-02-01 00:00:08.000 | td32727 | 15 | true | 15 | + 2020-02-01 00:00:09.000 | td32727 | 10 | true | 10 | + 2020-02-01 00:00:09.000 | td32727 | 15 | true | 15 | + 2020-02-01 00:00:10.000 | td32727 | 10 | false | 10 | + 2020-02-01 00:00:10.000 | td32727 | 15 | true | 15 | + 2020-02-01 00:00:11.000 | td32727 | 15 | true | 15 | + 2020-02-01 00:00:12.000 | td32727 | 15 | true | 15 | + 2020-02-01 00:00:13.000 | td32727 | 15 | true | 15 | + 2020-02-01 00:00:14.000 | td32727 | 15 | true | 15 | + 2020-02-01 00:00:15.000 | td32727 | 15 | false | 15 | + +taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (prev) order by irowts, c2; + irowts | table_name | c_c2 | isfilled | intp_c1 | +============================================================================================== + 2020-02-01 00:00:05.000 | td32727 | 5 | false | 5 | + 2020-02-01 00:00:06.000 | td32727 | 5 | true | 5 | + 2020-02-01 00:00:07.000 | td32727 | 5 | true | 5 | + 2020-02-01 00:00:08.000 | td32727 | 5 | true | 5 | + 2020-02-01 00:00:09.000 | td32727 | 5 | true | 5 | + 2020-02-01 00:00:10.000 | td32727 | 5 | true | 5 | + 2020-02-01 00:00:10.000 | td32727 | 10 | false | 10 | + 2020-02-01 00:00:11.000 | td32727 | 5 | true | 5 | + 2020-02-01 00:00:11.000 | td32727 | 10 | true | 10 | + 2020-02-01 00:00:12.000 | td32727 | 5 | true | 5 | + 2020-02-01 00:00:12.000 | td32727 | 10 | true | 10 | + 2020-02-01 00:00:13.000 | td32727 | 5 | true | 5 | + 2020-02-01 00:00:13.000 | td32727 | 10 | true | 10 | + 2020-02-01 00:00:14.000 | td32727 | 5 | true | 5 | + 2020-02-01 00:00:14.000 | td32727 | 10 | true | 10 | + 2020-02-01 00:00:15.000 | td32727 | 5 | true | 5 | + 2020-02-01 00:00:15.000 | td32727 | 10 | true | 10 | + 2020-02-01 00:00:15.000 | td32727 | 15 | false | 15 | + 2020-02-01 00:00:16.000 | td32727 | 5 | true | 5 | + 2020-02-01 00:00:16.000 | td32727 | 10 | true | 10 | + 2020-02-01 00:00:16.000 | td32727 | 15 | true | 15 | + +taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (linear) order by irowts, c2; + irowts | table_name | c_c2 | isfilled | intp_c1 | +============================================================================================== + 2020-02-01 00:00:05.000 | td32727 | 5 | false | 5 | + 2020-02-01 00:00:10.000 | td32727 | 10 | false | 10 | + 2020-02-01 00:00:15.000 | td32727 | 15 | false | 15 | + +taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (value, 1) order by irowts, c2; + irowts | table_name | c_c2 | isfilled | intp_c1 | +============================================================================================== + 2020-02-01 00:00:04.000 | td32727 | 5 | true | 1 | + 2020-02-01 00:00:04.000 | td32727 | 10 | true | 1 | + 2020-02-01 00:00:04.000 | td32727 | 15 | true | 1 | + 2020-02-01 00:00:05.000 | td32727 | 5 | false | 5 | + 2020-02-01 00:00:05.000 | td32727 | 10 | true | 1 | + 2020-02-01 00:00:05.000 | td32727 | 15 | true | 1 | + 2020-02-01 00:00:06.000 | td32727 | 5 | true | 1 | + 2020-02-01 00:00:06.000 | td32727 | 10 | true | 1 | + 2020-02-01 00:00:06.000 | td32727 | 15 | true | 1 | + 2020-02-01 00:00:07.000 | td32727 | 5 | true | 1 | + 2020-02-01 00:00:07.000 | td32727 | 10 | true | 1 | + 2020-02-01 00:00:07.000 | td32727 | 15 | true | 1 | + 2020-02-01 00:00:08.000 | td32727 | 5 | true | 1 | + 2020-02-01 00:00:08.000 | td32727 | 10 | true | 1 | + 2020-02-01 00:00:08.000 | td32727 | 15 | true | 1 | + 2020-02-01 00:00:09.000 | td32727 | 5 | true | 1 | + 2020-02-01 00:00:09.000 | td32727 | 10 | true | 1 | + 2020-02-01 00:00:09.000 | td32727 | 15 | true | 1 | + 2020-02-01 00:00:10.000 | td32727 | 5 | true | 1 | + 2020-02-01 00:00:10.000 | td32727 | 10 | false | 10 | + 2020-02-01 00:00:10.000 | td32727 | 15 | true | 1 | + 2020-02-01 00:00:11.000 | td32727 | 5 | true | 1 | + 2020-02-01 00:00:11.000 | td32727 | 10 | true | 1 | + 2020-02-01 00:00:11.000 | td32727 | 15 | true | 1 | + 2020-02-01 00:00:12.000 | td32727 | 5 | true | 1 | + 2020-02-01 00:00:12.000 | td32727 | 10 | true | 1 | + 2020-02-01 00:00:12.000 | td32727 | 15 | true | 1 | + 2020-02-01 00:00:13.000 | td32727 | 5 | true | 1 | + 2020-02-01 00:00:13.000 | td32727 | 10 | true | 1 | + 2020-02-01 00:00:13.000 | td32727 | 15 | true | 1 | + 2020-02-01 00:00:14.000 | td32727 | 5 | true | 1 | + 2020-02-01 00:00:14.000 | td32727 | 10 | true | 1 | + 2020-02-01 00:00:14.000 | td32727 | 15 | true | 1 | + 2020-02-01 00:00:15.000 | td32727 | 5 | true | 1 | + 2020-02-01 00:00:15.000 | td32727 | 10 | true | 1 | + 2020-02-01 00:00:15.000 | td32727 | 15 | false | 15 | + 2020-02-01 00:00:16.000 | td32727 | 5 | true | 1 | + 2020-02-01 00:00:16.000 | td32727 | 10 | true | 1 | + 2020-02-01 00:00:16.000 | td32727 | 15 | true | 1 | + +taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (null) order by irowts, c2, c3; + irowts | table_name | c_c2 | c_c3 | isfilled | intp_c1 | +====================================================================================================================== + 2020-02-01 00:00:04.000 | td32727 | 5 | 5 | true | NULL | + 2020-02-01 00:00:04.000 | td32727 | 10 | 10 | true | NULL | + 2020-02-01 00:00:04.000 | td32727 | 15 | 15 | true | NULL | + 2020-02-01 00:00:05.000 | td32727 | 5 | 5 | false | 5 | + 2020-02-01 00:00:05.000 | td32727 | 10 | 10 | true | NULL | + 2020-02-01 00:00:05.000 | td32727 | 15 | 15 | true | NULL | + 2020-02-01 00:00:06.000 | td32727 | 5 | 5 | true | NULL | + 2020-02-01 00:00:06.000 | td32727 | 10 | 10 | true | NULL | + 2020-02-01 00:00:06.000 | td32727 | 15 | 15 | true | NULL | + 2020-02-01 00:00:07.000 | td32727 | 5 | 5 | true | NULL | + 2020-02-01 00:00:07.000 | td32727 | 10 | 10 | true | NULL | + 2020-02-01 00:00:07.000 | td32727 | 15 | 15 | true | NULL | + 2020-02-01 00:00:08.000 | td32727 | 5 | 5 | true | NULL | + 2020-02-01 00:00:08.000 | td32727 | 10 | 10 | true | NULL | + 2020-02-01 00:00:08.000 | td32727 | 15 | 15 | true | NULL | + 2020-02-01 00:00:09.000 | td32727 | 5 | 5 | true | NULL | + 2020-02-01 00:00:09.000 | td32727 | 10 | 10 | true | NULL | + 2020-02-01 00:00:09.000 | td32727 | 15 | 15 | true | NULL | + 2020-02-01 00:00:10.000 | td32727 | 5 | 5 | true | NULL | + 2020-02-01 00:00:10.000 | td32727 | 10 | 10 | false | 10 | + 2020-02-01 00:00:10.000 | td32727 | 15 | 15 | true | NULL | + 2020-02-01 00:00:11.000 | td32727 | 5 | 5 | true | NULL | + 2020-02-01 00:00:11.000 | td32727 | 10 | 10 | true | NULL | + 2020-02-01 00:00:11.000 | td32727 | 15 | 15 | true | NULL | + 2020-02-01 00:00:12.000 | td32727 | 5 | 5 | true | NULL | + 2020-02-01 00:00:12.000 | td32727 | 10 | 10 | true | NULL | + 2020-02-01 00:00:12.000 | td32727 | 15 | 15 | true | NULL | + 2020-02-01 00:00:13.000 | td32727 | 5 | 5 | true | NULL | + 2020-02-01 00:00:13.000 | td32727 | 10 | 10 | true | NULL | + 2020-02-01 00:00:13.000 | td32727 | 15 | 15 | true | NULL | + 2020-02-01 00:00:14.000 | td32727 | 5 | 5 | true | NULL | + 2020-02-01 00:00:14.000 | td32727 | 10 | 10 | true | NULL | + 2020-02-01 00:00:14.000 | td32727 | 15 | 15 | true | NULL | + 2020-02-01 00:00:15.000 | td32727 | 5 | 5 | true | NULL | + 2020-02-01 00:00:15.000 | td32727 | 10 | 10 | true | NULL | + 2020-02-01 00:00:15.000 | td32727 | 15 | 15 | false | 15 | + 2020-02-01 00:00:16.000 | td32727 | 5 | 5 | true | NULL | + 2020-02-01 00:00:16.000 | td32727 | 10 | 10 | true | NULL | + 2020-02-01 00:00:16.000 | td32727 | 15 | 15 | true | NULL | + +taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (next) order by irowts, c2, c3; + irowts | table_name | c_c2 | c_c3 | isfilled | intp_c1 | +====================================================================================================================== + 2020-02-01 00:00:04.000 | td32727 | 5 | 5 | true | 5 | + 2020-02-01 00:00:04.000 | td32727 | 10 | 10 | true | 10 | + 2020-02-01 00:00:04.000 | td32727 | 15 | 15 | true | 15 | + 2020-02-01 00:00:05.000 | td32727 | 5 | 5 | false | 5 | + 2020-02-01 00:00:05.000 | td32727 | 10 | 10 | true | 10 | + 2020-02-01 00:00:05.000 | td32727 | 15 | 15 | true | 15 | + 2020-02-01 00:00:06.000 | td32727 | 10 | 10 | true | 10 | + 2020-02-01 00:00:06.000 | td32727 | 15 | 15 | true | 15 | + 2020-02-01 00:00:07.000 | td32727 | 10 | 10 | true | 10 | + 2020-02-01 00:00:07.000 | td32727 | 15 | 15 | true | 15 | + 2020-02-01 00:00:08.000 | td32727 | 10 | 10 | true | 10 | + 2020-02-01 00:00:08.000 | td32727 | 15 | 15 | true | 15 | + 2020-02-01 00:00:09.000 | td32727 | 10 | 10 | true | 10 | + 2020-02-01 00:00:09.000 | td32727 | 15 | 15 | true | 15 | + 2020-02-01 00:00:10.000 | td32727 | 10 | 10 | false | 10 | + 2020-02-01 00:00:10.000 | td32727 | 15 | 15 | true | 15 | + 2020-02-01 00:00:11.000 | td32727 | 15 | 15 | true | 15 | + 2020-02-01 00:00:12.000 | td32727 | 15 | 15 | true | 15 | + 2020-02-01 00:00:13.000 | td32727 | 15 | 15 | true | 15 | + 2020-02-01 00:00:14.000 | td32727 | 15 | 15 | true | 15 | + 2020-02-01 00:00:15.000 | td32727 | 15 | 15 | false | 15 | + +taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (prev) order by irowts, c2, c3; + irowts | table_name | c_c2 | c_c3 | isfilled | intp_c1 | +====================================================================================================================== + 2020-02-01 00:00:05.000 | td32727 | 5 | 5 | false | 5 | + 2020-02-01 00:00:06.000 | td32727 | 5 | 5 | true | 5 | + 2020-02-01 00:00:07.000 | td32727 | 5 | 5 | true | 5 | + 2020-02-01 00:00:08.000 | td32727 | 5 | 5 | true | 5 | + 2020-02-01 00:00:09.000 | td32727 | 5 | 5 | true | 5 | + 2020-02-01 00:00:10.000 | td32727 | 5 | 5 | true | 5 | + 2020-02-01 00:00:10.000 | td32727 | 10 | 10 | false | 10 | + 2020-02-01 00:00:11.000 | td32727 | 5 | 5 | true | 5 | + 2020-02-01 00:00:11.000 | td32727 | 10 | 10 | true | 10 | + 2020-02-01 00:00:12.000 | td32727 | 5 | 5 | true | 5 | + 2020-02-01 00:00:12.000 | td32727 | 10 | 10 | true | 10 | + 2020-02-01 00:00:13.000 | td32727 | 5 | 5 | true | 5 | + 2020-02-01 00:00:13.000 | td32727 | 10 | 10 | true | 10 | + 2020-02-01 00:00:14.000 | td32727 | 5 | 5 | true | 5 | + 2020-02-01 00:00:14.000 | td32727 | 10 | 10 | true | 10 | + 2020-02-01 00:00:15.000 | td32727 | 5 | 5 | true | 5 | + 2020-02-01 00:00:15.000 | td32727 | 10 | 10 | true | 10 | + 2020-02-01 00:00:15.000 | td32727 | 15 | 15 | false | 15 | + 2020-02-01 00:00:16.000 | td32727 | 5 | 5 | true | 5 | + 2020-02-01 00:00:16.000 | td32727 | 10 | 10 | true | 10 | + 2020-02-01 00:00:16.000 | td32727 | 15 | 15 | true | 15 | + +taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (linear) order by irowts, c2, c3; + irowts | table_name | c_c2 | c_c3 | isfilled | intp_c1 | +====================================================================================================================== + 2020-02-01 00:00:05.000 | td32727 | 5 | 5 | false | 5 | + 2020-02-01 00:00:10.000 | td32727 | 10 | 10 | false | 10 | + 2020-02-01 00:00:15.000 | td32727 | 15 | 15 | false | 15 | + +taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (value, 1) order by irowts, c2, c3; + irowts | table_name | c_c2 | c_c3 | isfilled | intp_c1 | +====================================================================================================================== + 2020-02-01 00:00:04.000 | td32727 | 5 | 5 | true | 1 | + 2020-02-01 00:00:04.000 | td32727 | 10 | 10 | true | 1 | + 2020-02-01 00:00:04.000 | td32727 | 15 | 15 | true | 1 | + 2020-02-01 00:00:05.000 | td32727 | 5 | 5 | false | 5 | + 2020-02-01 00:00:05.000 | td32727 | 10 | 10 | true | 1 | + 2020-02-01 00:00:05.000 | td32727 | 15 | 15 | true | 1 | + 2020-02-01 00:00:06.000 | td32727 | 5 | 5 | true | 1 | + 2020-02-01 00:00:06.000 | td32727 | 10 | 10 | true | 1 | + 2020-02-01 00:00:06.000 | td32727 | 15 | 15 | true | 1 | + 2020-02-01 00:00:07.000 | td32727 | 5 | 5 | true | 1 | + 2020-02-01 00:00:07.000 | td32727 | 10 | 10 | true | 1 | + 2020-02-01 00:00:07.000 | td32727 | 15 | 15 | true | 1 | + 2020-02-01 00:00:08.000 | td32727 | 5 | 5 | true | 1 | + 2020-02-01 00:00:08.000 | td32727 | 10 | 10 | true | 1 | + 2020-02-01 00:00:08.000 | td32727 | 15 | 15 | true | 1 | + 2020-02-01 00:00:09.000 | td32727 | 5 | 5 | true | 1 | + 2020-02-01 00:00:09.000 | td32727 | 10 | 10 | true | 1 | + 2020-02-01 00:00:09.000 | td32727 | 15 | 15 | true | 1 | + 2020-02-01 00:00:10.000 | td32727 | 5 | 5 | true | 1 | + 2020-02-01 00:00:10.000 | td32727 | 10 | 10 | false | 10 | + 2020-02-01 00:00:10.000 | td32727 | 15 | 15 | true | 1 | + 2020-02-01 00:00:11.000 | td32727 | 5 | 5 | true | 1 | + 2020-02-01 00:00:11.000 | td32727 | 10 | 10 | true | 1 | + 2020-02-01 00:00:11.000 | td32727 | 15 | 15 | true | 1 | + 2020-02-01 00:00:12.000 | td32727 | 5 | 5 | true | 1 | + 2020-02-01 00:00:12.000 | td32727 | 10 | 10 | true | 1 | + 2020-02-01 00:00:12.000 | td32727 | 15 | 15 | true | 1 | + 2020-02-01 00:00:13.000 | td32727 | 5 | 5 | true | 1 | + 2020-02-01 00:00:13.000 | td32727 | 10 | 10 | true | 1 | + 2020-02-01 00:00:13.000 | td32727 | 15 | 15 | true | 1 | + 2020-02-01 00:00:14.000 | td32727 | 5 | 5 | true | 1 | + 2020-02-01 00:00:14.000 | td32727 | 10 | 10 | true | 1 | + 2020-02-01 00:00:14.000 | td32727 | 15 | 15 | true | 1 | + 2020-02-01 00:00:15.000 | td32727 | 5 | 5 | true | 1 | + 2020-02-01 00:00:15.000 | td32727 | 10 | 10 | true | 1 | + 2020-02-01 00:00:15.000 | td32727 | 15 | 15 | false | 15 | + 2020-02-01 00:00:16.000 | td32727 | 5 | 5 | true | 1 | + 2020-02-01 00:00:16.000 | td32727 | 10 | 10 | true | 1 | + 2020-02-01 00:00:16.000 | td32727 | 15 | 15 | true | 1 | + diff --git a/tests/army/query/function/ans/repeat.csv b/tests/army/query/function/ans/repeat.csv index d8f8b3050fe..c303f164a97 100644 --- a/tests/army/query/function/ans/repeat.csv +++ b/tests/army/query/function/ans/repeat.csv @@ -108,13 +108,13 @@ taos> select repeat(nch1, id) from ts_4893.meters where id > 0 order by ts limit novelnovelnovelnovelnovel | taos> select repeat(var1, id) from ts_4893.meters where id > 0 order by ts limit 5 - repeat(var1, id) | -=================== - person | - novelnovel | - plateplateplate | - 一二三四五六... | - updateupdateu... | + repeat(var1, id) | +================================= + person | + novelnovel | + plateplateplate | + 一二三四五六七八九十一二三... | + updateupdateupdateupdateupdate | taos> select repeat('nch1', id) from ts_4893.meters where id > 0 order by ts limit 5 repeat('nch1', id) | @@ -229,32 +229,32 @@ taos> select repeat(var1, 3) from ts_4893.meters order by ts limit 10 plateplateplate | taos> select repeat(name, groupid) from ts_4893.d0 order by ts limit 10 - repeat(name, groupid) | -======================== - lili | - x | - lili | - x | - lili | - taos | - haha | - taos | - taos | - haha | + repeat(name, groupid) | +================================= + lili | + x | + lili | + x | + lili | + taos | + haha | + taos | + taos | + haha | taos> select repeat(name, groupid) from ts_4893.meters order by ts limit 10 - repeat(name, groupid) | -======================== - lili | - x | - lili | - x | - lili | - taos | - haha | - taos | - taos | - haha | + repeat(name, groupid) | +================================= + lili | + x | + lili | + x | + lili | + taos | + haha | + taos | + taos | + haha | taos> select repeat(nch1, groupid) from ts_4893.d0 order by ts limit 10 repeat(nch1, groupid) | @@ -355,9 +355,9 @@ taos> select repeat('你好', 2) 你好你好 | taos> select repeat('abc', length('abc')) - repeat('abc', length('abc')) | -=============================== - abcabcabc | + repeat('abc', length('abc')) | +================================= + abcabcabc | taos> select repeat(concat('A', 'B', 'C'), 3) repeat(concat('A', 'B', 'C'), 3) | diff --git a/tests/army/query/function/in/interp.in b/tests/army/query/function/in/interp.in new file mode 100644 index 00000000000..4825ab46b16 --- /dev/null +++ b/tests/army/query/function/in/interp.in @@ -0,0 +1,15 @@ +select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (null) order by irowts; +select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (next) order by irowts; +select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (prev) order by irowts; +select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (linear) order by irowts; +select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (value, 1) order by irowts; +select _irowts as irowts ,tbname as table_name, c2 as c_c2, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (null) order by irowts, c2; +select _irowts as irowts ,tbname as table_name, c2 as c_c2, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (next) order by irowts, c2; +select _irowts as irowts ,tbname as table_name, c2 as c_c2, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (prev) order by irowts, c2; +select _irowts as irowts ,tbname as table_name, c2 as c_c2, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (linear) order by irowts, c2; +select _irowts as irowts ,tbname as table_name, c2 as c_c2, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (value, 1) order by irowts, c2; +select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (null) order by irowts, c2, c3; +select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (next) order by irowts, c2, c3; +select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (prev) order by irowts, c2, c3; +select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (linear) order by irowts, c2, c3; +select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (value, 1) order by irowts, c2, c3; diff --git a/tests/army/query/function/test_function.py b/tests/army/query/function/test_function.py index bf7cf492908..d54460804a9 100644 --- a/tests/army/query/function/test_function.py +++ b/tests/army/query/function/test_function.py @@ -296,7 +296,7 @@ def test_min(self): def test_error(self): tdSql.error("select * from (select to_iso8601(ts, timezone()), timezone() from ts_4893.meters \ - order by ts desc) limit 1000;", expectErrInfo="Not supported timzone format") # TS-5340 + order by ts desc) limit 1000;", expectErrInfo="Invalid parameter data type : to_iso8601") # TS-5340 def run(self): tdLog.debug(f"start to excute {__file__}") diff --git a/tests/army/query/function/test_interp.py b/tests/army/query/function/test_interp.py new file mode 100644 index 00000000000..f903e7be734 --- /dev/null +++ b/tests/army/query/function/test_interp.py @@ -0,0 +1,72 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from frame import etool +from frame.etool import * +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame.common import * + +class TDTestCase(TBase): + updatecfgDict = { + "keepColumnName": "1", + "ttlChangeOnWrite": "1", + "querySmaOptimize": "1", + "slowLogScope": "none", + "queryBufferSize": 10240 + } + + def insert_data(self): + tdLog.printNoPrefix("==========step1:create table") + + tdSql.execute("create database test keep 36500") + tdSql.execute("use test") + tdSql.execute( + f'''create table if not exists test.td32727 + (ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10), c9 tinyint unsigned, c10 smallint unsigned, c11 int unsigned, c12 bigint unsigned) + ''' + ) + + tdLog.printNoPrefix("==========step2:insert data") + + tdSql.execute(f"insert into test.td32727 values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar', 5, 5, 5, 5)") + tdSql.execute(f"insert into test.td32727 values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar', 10, 10, 10, 10)") + tdSql.execute(f"insert into test.td32727 values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar', 15, 15, 15, 15)") + + + def test_normal_query_new(self, testCase): + # read sql from .sql file and execute + tdLog.info("test normal query.") + self.sqlFile = etool.curFile(__file__, f"in/{testCase}.in") + self.ansFile = etool.curFile(__file__, f"ans/{testCase}.csv") + + tdCom.compare_testcase_result(self.sqlFile, self.ansFile, testCase) + + def test_interp(self): + self.test_normal_query_new("interp") + + def run(self): + tdLog.debug(f"start to excute {__file__}") + + self.insert_data() + + # math function + self.test_interp() + + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/query/function/test_percentile.py b/tests/army/query/function/test_percentile.py new file mode 100644 index 00000000000..004cad54c99 --- /dev/null +++ b/tests/army/query/function/test_percentile.py @@ -0,0 +1,194 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from frame import etool +from frame.etool import * +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame.common import * + +class TDTestCase(TBase): + updatecfgDict = { + "keepColumnName": "1", + "ttlChangeOnWrite": "1", + "querySmaOptimize": "1", + "slowLogScope": "none", + "queryBufferSize": 10240 + } + + def insert_data(self): + tdLog.printNoPrefix("==========step1:create table") + + tdSql.execute(f'create database if not exists td32506') + tdSql.execute(f'use td32506') + tdSql.execute(f'CREATE STABLE fs_table (ts TIMESTAMP, speed INT, color VARCHAR(16), tinyint_type_col_name TINYINT, smallint_type_col_name SMALLINT, bigint_type_col_name BIGINT, ' + f'utinyint_type_col_name TINYINT, usmallint_type_col_name SMALLINT, uint_type_col_name INT, ubigint_type_col_name BIGINT, float_type_col_name FLOAT, ' + f'double_type_col_name DOUBLE, bool_type_col_name BOOL, nchar_type_col_name NCHAR(16), varchar_type_col_name VARCHAR(16), ' + f'varbinary_type_col_name VARBINARY(16),geometry_type_col_name GEOMETRY(32)) TAGS (b VARCHAR(200), f FLOAT, tinyint_type_tag_name TINYINT, ' + f'smallint_type_tag_name SMALLINT, int_type_tag_name INT, bigint_type_tag_name BIGINT, utinyint_type_tag_name TINYINT, ' + f'usmallint_type_tag_name SMALLINT UNSIGNED, uint_type_tag_name INT UNSIGNED, ubigint_type_tag_name BIGINT, double_type_tag_name DOUBLE, ' + f'bool_type_tag_name BOOL, nchar_type_tag_name NCHAR(16), varchar_type_tag_name VARCHAR(16), varbinary_type_tag_name VARBINARY(64), ' + f'geometry_type_tag_name GEOMETRY(32), extratag INT)') + tdSql.execute(f'CREATE TABLE reg_table_159 USING fs_table ' + f'(b, f, tinyint_type_tag_name, smallint_type_tag_name, int_type_tag_name, bigint_type_tag_name, ' + f'utinyint_type_tag_name, usmallint_type_tag_name, uint_type_tag_name, ubigint_type_tag_name, ' + f'double_type_tag_name, bool_type_tag_name, nchar_type_tag_name, varchar_type_tag_name, varbinary_type_tag_name) ' + f'TAGS ("fgiaaopuphardlom", -3.302167e+38, 40, 18667, 1116729408, -6426992149481917950, 55, 4674, 1756351183, ' + f'7228005179153159914, -3.428740e+307, false, "emvhqjcixroitxiw", "fixwxdovhhbizqdm", "\x786565787775656D6F667A666A646463")') + + tdLog.printNoPrefix("==========step2:insert data") + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,double_type_tag_name,bigint_type_tag_name,usmallint_type_tag_name,varchar_type_tag_name,geometry_type_tag_name,int_type_tag_name,varbinary_type_tag_name,ubigint_type_tag_name,smallint_type_tag_name,utinyint_type_tag_name,uint_type_tag_name,f,tinyint_type_tag_name,bool_type_tag_name,b) TAGS("emvhqjcixroitxiw", -3.4287401579952453e+307, -6426992149481917950, 4674, "fixwxdovhhbizqdm", "point(1.0 1.0)", 1116729408, "xeexwuemofzfjddc", 7228005179153159914, 18667, 55, 1756351183, -3.302167385734522e+38, 40, False, "fgiaaopuphardlom") (ts,varchar_type_col_name,uint_type_col_name,speed,smallint_type_col_name,nchar_type_col_name,ubigint_type_col_name,varbinary_type_col_name,float_type_col_name,bigint_type_col_name,double_type_col_name,geometry_type_col_name,color,bool_type_col_name,usmallint_type_col_name,utinyint_type_col_name,tinyint_type_col_name) VALUES ("2016-12-16 00:49:27", "jvudhjbmixxuubhl", 1327384783, 215895363, 16025, "llosyvhgzqpixdru", -3772449087838215561, "jvludkxlqobiigip", -2.978105332100778e+37, -5559805599911459602, -4.028726372555818e+307, "point(1.0 1.0)", "bdencejzdarqaeef", True, 19468, 35, -30);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,bool_type_tag_name) TAGS(1653079398, True) (ts,bigint_type_col_name,ubigint_type_col_name,float_type_col_name,color,double_type_col_name,bool_type_col_name,smallint_type_col_name,uint_type_col_name) VALUES ("2016-12-16 00:58:36", 1083391961316260438, 3613986442426750782, -1.0453149756996617e+38, "tvaiakmmcxzbepra", -1.4689107839018581e+308, True, -18675, 138061020);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,bigint_type_tag_name,ubigint_type_tag_name,int_type_tag_name,varchar_type_tag_name,b,f,smallint_type_tag_name,tinyint_type_tag_name,bool_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,nchar_type_tag_name,usmallint_type_tag_name,geometry_type_tag_name,varbinary_type_tag_name) TAGS(-7.061730755263099e+307, 2107452935481758830, 2258834966577471147, -952230254, "vhnwshrapagnalqu", "levvmtztgprzatat", 6.737169619795772e+37, 8872, 2, True, 1075287886, -60, "mpdmcvwntqfusvhm", 36270, "point(1.0 1.0)", "ctnxatxiaymaekvj") (ts) VALUES ("2016-12-16 01:23:36");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,f,usmallint_type_tag_name,smallint_type_tag_name,double_type_tag_name) TAGS("point(1.0 1.0)", -8.285007090644336e+37, 50936, -28943, 3.4910133200480327e+307) (ts,tinyint_type_col_name,geometry_type_col_name,ubigint_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 01:37:37", 20, "point(1.0 1.0)", -8279051334405446366, -11586);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varbinary_type_tag_name,f,ubigint_type_tag_name,nchar_type_tag_name,uint_type_tag_name,int_type_tag_name,utinyint_type_tag_name,smallint_type_tag_name) TAGS("qbwmnupomqkllhdf", -1.731243531476578e+38, -3044376275988311477, "onwtdayawxuoayuh", 3923375490, -122362890, -50, -25842) (ts,tinyint_type_col_name,uint_type_col_name,bigint_type_col_name,varbinary_type_col_name,color) VALUES ("2016-12-16 01:47:01", 38, -912910938, -7421282029380796738, "zqqrmdatsixdjwmv", "qensugigfedpokag");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,nchar_type_tag_name,bigint_type_tag_name,b,tinyint_type_tag_name,smallint_type_tag_name) TAGS(3.0006598369272955e+38, "qgmecuzexdlbjoen", 2548336842131148813, "kvulvyjmnsujbygx", 54, 12017) (ts,ubigint_type_col_name,double_type_col_name,geometry_type_col_name,color,bool_type_col_name,tinyint_type_col_name) VALUES ("2016-12-16 02:41:15", -8037368176580035774, 1.8094617142119061e+307, "point(1.0 1.0)", "uriutisredzfnels", False, -50);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,usmallint_type_tag_name,uint_type_tag_name,geometry_type_tag_name) TAGS(-18163, 36635, 3511596470, "point(1.0 1.0)") (ts,utinyint_type_col_name,varchar_type_col_name,uint_type_col_name,bigint_type_col_name,bool_type_col_name,varbinary_type_col_name,ubigint_type_col_name,smallint_type_col_name,geometry_type_col_name,float_type_col_name,color,tinyint_type_col_name,usmallint_type_col_name,speed,double_type_col_name) VALUES ("2016-12-16 02:41:32", 53, "izpzycfgyyljiafe", 748493885, 1182091325231355552, True, "wirugruhofvuzvfq", -6622830592002864757, 2403, "point(1.0 1.0)", -2.0533976172415304e+38, "ypyrkfwkzsvykvbq", -98, 7975, 1661529650, 1.4776735328584477e+308);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(tinyint_type_tag_name,varchar_type_tag_name,bool_type_tag_name,uint_type_tag_name,ubigint_type_tag_name,double_type_tag_name,f) TAGS(58, "mxcovlujiaipbrxe", False, 3023725445, -8300199672256037241, 3.493291803670194e+307, -2.0809024953240414e+38) (ts,utinyint_type_col_name,varbinary_type_col_name,bigint_type_col_name,speed,color,uint_type_col_name,usmallint_type_col_name,bool_type_col_name,geometry_type_col_name,nchar_type_col_name) VALUES ("2016-12-16 03:00:08", -47, "achpeufmryizqrmv", -3195561950072510641, -674332102, "ocyfdyeztjbdajyj", 1620966159, 3256, True, "point(1.0 1.0)", "ujaddbbtrpfhirhk");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,bool_type_tag_name,b,smallint_type_tag_name,geometry_type_tag_name,double_type_tag_name) TAGS(22844, False, "mtfrbktpnjmdaazm", 26985, "point(1.0 1.0)", 1.6179238326982066e+308) (ts) VALUES ("2016-12-16 03:02:29");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,f,uint_type_tag_name,smallint_type_tag_name,tinyint_type_tag_name) TAGS("point(1.0 1.0)", -3.0320850510538104e+38, 3560125456, 11119, 100) (ts,varbinary_type_col_name,double_type_col_name,bigint_type_col_name) VALUES ("2016-12-16 03:04:06", "sujwqvsfertzzcuk", -1.4040026870544444e+308, 2475978411998036438);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varbinary_type_tag_name,usmallint_type_tag_name) TAGS("orgrtrdbgldlyogq", 28231) (ts,varchar_type_col_name,double_type_col_name,bool_type_col_name,usmallint_type_col_name,geometry_type_col_name,bigint_type_col_name,color,speed,uint_type_col_name,ubigint_type_col_name) VALUES ("2016-12-16 03:04:22", "xdwjwlgxgpkaqnnb", 9.723011865690129e+307, True, -13258, "point(1.0 1.0)", 7895225088661281332, "srsjscgbnyrhrpmo", -1908698583, -777280648, -562221736344996425);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(tinyint_type_tag_name) TAGS(-86) (ts,nchar_type_col_name) VALUES ("2016-12-16 03:12:18", "vcjkutzmjnmwreep");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,tinyint_type_tag_name) TAGS(-6.142189587223866e+37, -107) (ts,uint_type_col_name,usmallint_type_col_name,bigint_type_col_name,geometry_type_col_name,double_type_col_name,nchar_type_col_name,utinyint_type_col_name,speed,tinyint_type_col_name,ubigint_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 03:16:08", -1334666808, 16860, 3563429449831894323, "point(1.0 1.0)", -6.706661740752272e+307, "vmfhrazqvgrsyjbi", 106, -2118429478, -24, -5510629554223761040, "hcpvgxxsqivxahrs");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,varbinary_type_tag_name,f,b,tinyint_type_tag_name,utinyint_type_tag_name) TAGS("point(1.0 1.0)", "ycquvwnecivxvdkq", 1.0805517348297708e+38, "qvdfdhvjokfwimfb", -16, -28) (ts,color,varbinary_type_col_name) VALUES ("2016-12-16 03:24:44", "khyzooeyfjsndqbl", "mxeaotkheqyjkwfe");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,double_type_tag_name,nchar_type_tag_name,tinyint_type_tag_name,bigint_type_tag_name,bool_type_tag_name,ubigint_type_tag_name,uint_type_tag_name) TAGS(-2.6250101215841932e+38, -1.4224845739873728e+308, "vitarwbjdtoaouwk", -21, 2835005149249208489, False, -2853745842969962537, 2818492304) (ts,float_type_col_name,varbinary_type_col_name,speed,utinyint_type_col_name,double_type_col_name,uint_type_col_name) VALUES ("2016-12-16 03:33:59", 4.143242721974893e+37, "tkcfmjxczscjjbfw", -126722956, 32, 1.5620105176463347e+308, -403334517);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,bool_type_tag_name,usmallint_type_tag_name,varbinary_type_tag_name,uint_type_tag_name) TAGS(1.1219685887309601e+38, True, 38172, "yjkrksyfhkqpxulw", 3797495177) (ts,double_type_col_name,ubigint_type_col_name,speed,nchar_type_col_name,smallint_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 03:37:00", 4.3397415421427257e+307, 2658580196646742125, 1848731552, "dauismisbvpldvbh", -8915, -10205);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,nchar_type_tag_name,f,bool_type_tag_name,varchar_type_tag_name,bigint_type_tag_name,int_type_tag_name,usmallint_type_tag_name,varbinary_type_tag_name,smallint_type_tag_name,ubigint_type_tag_name,geometry_type_tag_name,b,utinyint_type_tag_name) TAGS(1.8933544224270275e+307, "xeuwzufexkviruji", -1.0719023172814157e+38, False, "etafmdbercjranpw", 8783154341984945645, 1620912647, 35459, "amdmlvxpympytkxq", 27136, 4977907901152915273, "point(1.0 1.0)", "sndpqcboosvhuzuq", -27) (ts) VALUES ("2016-12-16 03:38:29");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,f,int_type_tag_name,varbinary_type_tag_name,usmallint_type_tag_name,varchar_type_tag_name,smallint_type_tag_name,b,geometry_type_tag_name,uint_type_tag_name) TAGS(-1374459770575094448, 2.693293747482498e+38, 650971033, "pmviadhwiouunffs", 44961, "nxvgxzcwabujtppn", 17943, "pcctjatkdlbevdrn", "point(1.0 1.0)", 1568392342) (ts,utinyint_type_col_name,varbinary_type_col_name,nchar_type_col_name,float_type_col_name) VALUES ("2016-12-16 03:50:53", 37, "tqdxypikpbpopiid", "evjbmpbxsscpssei", 2.890872457201385e+38);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,nchar_type_tag_name,uint_type_tag_name,varbinary_type_tag_name,varchar_type_tag_name,b,utinyint_type_tag_name,bool_type_tag_name,geometry_type_tag_name,bigint_type_tag_name,ubigint_type_tag_name,tinyint_type_tag_name,usmallint_type_tag_name,int_type_tag_name,smallint_type_tag_name) TAGS(3.1967188798436023e+307, "dlqosxrxmrdlexvu", 490088010, "abfyijmjhtypgmjo", "vrjjkvfezfqjsjxn", "jguyfszmyqtoaiuy", 15, False, "point(1.0 1.0)", 7656873837277040486, -2879397104283241297, 79, 1548, 474297665, 27763) (ts,usmallint_type_col_name,tinyint_type_col_name,float_type_col_name,ubigint_type_col_name,varchar_type_col_name,speed,uint_type_col_name,geometry_type_col_name,double_type_col_name,utinyint_type_col_name,color,varbinary_type_col_name) VALUES ("2016-12-16 04:02:41", -22725, 121, -1.7363002143786742e+38, -5956529834331945772, "fdebpzbdywcxygxq", -1037829517, -344092943, "point(1.0 1.0)", 1.687974407803454e+308, -15, "njduhepaglkeckdd", "svrtpquvktqimuab");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,smallint_type_tag_name,geometry_type_tag_name,usmallint_type_tag_name,int_type_tag_name,tinyint_type_tag_name,utinyint_type_tag_name,varbinary_type_tag_name,bigint_type_tag_name,bool_type_tag_name,ubigint_type_tag_name,uint_type_tag_name) TAGS(2.0855939441502307e+38, 31096, "point(1.0 1.0)", 1835, -1486483188, -63, -123, "dhfkbddofbppzizu", -8035802647163979457, True, -7483826213921795749, 795995100) (ts,geometry_type_col_name,bigint_type_col_name) VALUES ("2016-12-16 04:10:21", "point(1.0 1.0)", 1969408246531214471);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,tinyint_type_tag_name,ubigint_type_tag_name,utinyint_type_tag_name,varbinary_type_tag_name,int_type_tag_name,double_type_tag_name,nchar_type_tag_name,b,f,geometry_type_tag_name,smallint_type_tag_name) TAGS(25593, 43, 7818398761945137262, -55, "vpwzpopkwmijjyqx", 826644318, -8.222056376363248e+307, "jjxxlxujrdkmkqez", "hkzvxvlmluxfhiao", 9.217736393675792e+37, "point(1.0 1.0)", 13064) (ts,float_type_col_name,geometry_type_col_name,nchar_type_col_name,tinyint_type_col_name,usmallint_type_col_name,ubigint_type_col_name,smallint_type_col_name,bool_type_col_name,utinyint_type_col_name,varchar_type_col_name,double_type_col_name,uint_type_col_name,bigint_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 04:12:51", -1.1155033313780145e+38, "point(1.0 1.0)", "vwwljlxnuieozvfo", 5, -21906, 166234169597926020, -26423, True, -124, "johlaoepdvtqhimi", 1.6733911742274567e+308, 2100308092, 5485398075914940612, "goqmambtlodgwgxq");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,bigint_type_tag_name,tinyint_type_tag_name,int_type_tag_name,double_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,varchar_type_tag_name,b,uint_type_tag_name,f) TAGS("point(1.0 1.0)", -303959115421074377, 55, 58404549, 1.1317582542682038e+308, 8228, False, "bctnlrzfozjhicdk", "tgtjnkhoxjrqmnmu", 2346764198, -1.431762906013641e+38) (ts,float_type_col_name,speed,bigint_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 04:13:42", 9.989635187087936e+37, -1325033823, 6439771353766292866, -8002);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name) TAGS("zsmwyzfqmozmyniw") (ts,bool_type_col_name,speed,varbinary_type_col_name,uint_type_col_name,varchar_type_col_name,geometry_type_col_name,double_type_col_name,utinyint_type_col_name,bigint_type_col_name) VALUES ("2016-12-16 04:21:19", True, -1595645133, "fcmpqbuxdduvfwrq", -1256972424, "doijzwclrxdbzwwq", "point(1.0 1.0)", 6.027483417919201e+307, 107, 5847005732723402961);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,smallint_type_tag_name,varchar_type_tag_name,usmallint_type_tag_name,b,geometry_type_tag_name,uint_type_tag_name,ubigint_type_tag_name) TAGS(1.9234061768725975e+306, 27143, "dlsvyehikpxuzggw", 18465, "xhwupqgazckmiobv", "point(1.0 1.0)", 1009963833, 2202148978430704618) (ts,nchar_type_col_name,color,tinyint_type_col_name,ubigint_type_col_name) VALUES ("2016-12-16 04:25:17", "qshpnutukcvqxtaj", "zryqatfkrrhxravq", -123, 5614613512735569542);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,bigint_type_tag_name,uint_type_tag_name,ubigint_type_tag_name,geometry_type_tag_name,usmallint_type_tag_name,double_type_tag_name,varchar_type_tag_name,bool_type_tag_name,nchar_type_tag_name,tinyint_type_tag_name,int_type_tag_name,varbinary_type_tag_name,b,f,utinyint_type_tag_name) TAGS(-25502, -4694006038921375834, 4273176734, -7092260276769281916, "point(1.0 1.0)", 3016, 1.6354692626546982e+308, "hjbzklszxdzdoxqn", False, "dexhfkdiumjzdbtu", 55, 1628959, "caeogfirfkzuqrgm", "kklniqcfjgtnpaat", 1.2381441388115072e+38, 34) (ts,smallint_type_col_name,float_type_col_name,uint_type_col_name,nchar_type_col_name,geometry_type_col_name,varchar_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 04:30:12", -15424, -9.958486706164754e+37, 1543951155, "uonnnwqaxjnzcktw", "point(1.0 1.0)", "sbnhooyzwvvvgdsb", "iguafwamsgzdtcid");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(tinyint_type_tag_name,bigint_type_tag_name,nchar_type_tag_name,geometry_type_tag_name,usmallint_type_tag_name,b,smallint_type_tag_name,f,ubigint_type_tag_name,int_type_tag_name,uint_type_tag_name,varchar_type_tag_name) TAGS(125, 7128483423223572360, "wknmbdmxriwytuzs", "point(1.0 1.0)", 23719, "cehohszybqisbcsr", 29052, -1.2266502524670797e+38, -5190174475276867332, -1054122176, 3316814729, "zeympzallufrrwwk") (ts,float_type_col_name) VALUES ("2016-12-16 04:42:07", -3.0086436232306867e+38);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,smallint_type_tag_name) TAGS(24922, 19120) (ts,ubigint_type_col_name,varbinary_type_col_name,bool_type_col_name,bigint_type_col_name,usmallint_type_col_name,tinyint_type_col_name) VALUES ("2016-12-16 04:44:37", 2119708740431033246, "tfscgthihrecwcca", True, 3858755530043058209, 16911, 49);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bool_type_tag_name,geometry_type_tag_name,varchar_type_tag_name,int_type_tag_name,nchar_type_tag_name,uint_type_tag_name,tinyint_type_tag_name,utinyint_type_tag_name,smallint_type_tag_name,double_type_tag_name,usmallint_type_tag_name) TAGS(False, "point(1.0 1.0)", "lgomrzeuveghuavk", 698597980, "fibwfbupmoxwxwhu", 2299571086, 88, 26, -784, 5.836820071314546e+307, 44181) (ts,color,nchar_type_col_name,ubigint_type_col_name,float_type_col_name,utinyint_type_col_name,usmallint_type_col_name,geometry_type_col_name,varchar_type_col_name,tinyint_type_col_name,double_type_col_name,speed,uint_type_col_name,bigint_type_col_name,varbinary_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 04:48:53", "fdrpbbuwolrgigwu", "ovkkzehagcozavag", -859824747452770489, 2.668460352935784e+38, -36, -4146, "point(1.0 1.0)", "zpnfvkdabqhvvugr", 122, -1.6659109789494941e+308, -339715491, -744820813, 4922622614018944659, "drckbshqwqdgjywk", 26922);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varchar_type_tag_name,nchar_type_tag_name,bool_type_tag_name,varbinary_type_tag_name,usmallint_type_tag_name,geometry_type_tag_name,utinyint_type_tag_name,b,bigint_type_tag_name,uint_type_tag_name,tinyint_type_tag_name,double_type_tag_name) TAGS("fnudpzattpadznjh", "curlvpnayxqyhgnj", True, "qujeqlinronfmnra", 26904, "point(1.0 1.0)", 103, "rmdyvbxvfscrklat", 1772757837581587144, 3755231946, -28, 1.392881389803201e+308) (ts) VALUES ("2016-12-16 04:49:43");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,smallint_type_tag_name,usmallint_type_tag_name,f,uint_type_tag_name,b,varbinary_type_tag_name,bigint_type_tag_name,utinyint_type_tag_name) TAGS("point(1.0 1.0)", 23766, 34510, -9.391278476138455e+37, 1424312679, "usmzlgendwxwjzkd", "gfsuserzmdovezbh", -3630795755071364737, 33) (ts,ubigint_type_col_name,speed,bool_type_col_name,smallint_type_col_name,geometry_type_col_name,varbinary_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 05:08:42", 9043564226598764196, -1058070689, True, -27689, "point(1.0 1.0)", "jkcbtfqphclecaiw", -2968);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,bigint_type_tag_name,b) TAGS("dtpxqkpvzwqhqtlf", 8497835948198860590, "labufkbmgoaihoke") (ts,speed,uint_type_col_name,ubigint_type_col_name,utinyint_type_col_name,usmallint_type_col_name,varchar_type_col_name,color,varbinary_type_col_name,tinyint_type_col_name,smallint_type_col_name,nchar_type_col_name,bigint_type_col_name,float_type_col_name,bool_type_col_name,double_type_col_name) VALUES ("2016-12-16 05:32:32", -1915664223, 1413499274, -5989223773653851128, -116, 26921, "ownicmwaptorttec", "ygzvhoyputddyowk", "wrkogplcbpemkxdn", -33, 18817, "clrhqokhsspzbxgc", -859502208102825417, -1.5128753403587181e+38, True, -1.3508466034943649e+308);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,varbinary_type_tag_name,int_type_tag_name,b,tinyint_type_tag_name,double_type_tag_name,ubigint_type_tag_name,bigint_type_tag_name,uint_type_tag_name) TAGS("point(1.0 1.0)", "uvvthorbwmsnfjoz", 1030009117, "tyysttboblgpkypz", -69, 1.23842724503325e+308, 9192485125431744596, 29000826379644817, 3538861351) (ts) VALUES ("2016-12-16 05:41:58");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,bool_type_tag_name,varchar_type_tag_name,utinyint_type_tag_name,ubigint_type_tag_name,tinyint_type_tag_name,double_type_tag_name,f,varbinary_type_tag_name,b,uint_type_tag_name,bigint_type_tag_name,geometry_type_tag_name,usmallint_type_tag_name,smallint_type_tag_name) TAGS(859129059, True, "uxbtbeicqswhoomz", -62, 3734511001108851286, 34, -1.5815188393505786e+308, 4.745631034394835e+37, "qoxpifitbedupend", "ppnqdxusgmlszoqw", 1004378621, 3705733919632701068, "point(1.0 1.0)", 38489, 29461) (ts,tinyint_type_col_name,usmallint_type_col_name,varchar_type_col_name,color,geometry_type_col_name,float_type_col_name,bool_type_col_name,speed,double_type_col_name,bigint_type_col_name,varbinary_type_col_name,uint_type_col_name) VALUES ("2016-12-16 06:08:50", 76, 12466, "nujbmtudnrleyvdy", "bgzmcovdcpghdlzi", "point(1.0 1.0)", -1.7192977879525407e+38, False, -910281174, 1.5781580952881898e+308, -3488933529074665133, "tfrgpkdcgqesmvbt", -1938221877);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,varchar_type_tag_name,tinyint_type_tag_name,utinyint_type_tag_name,f,double_type_tag_name,bigint_type_tag_name,int_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,smallint_type_tag_name,geometry_type_tag_name,b,ubigint_type_tag_name,varbinary_type_tag_name) TAGS("zfsuqkilwplvrebi", "ccqtsosvtnlbnkig", 103, 78, 1.3362517686508279e+38, -9.563002829076875e+307, -6627606384484374568, 1126761657, 27844, True, 30861, "point(1.0 1.0)", "hmnpllitztvocynw", 8180438212890952155, "qfekhbflxpldbddz") (ts,varchar_type_col_name,geometry_type_col_name,uint_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 06:12:48", "kavbtvnlqfmeyfgh", "point(1.0 1.0)", 86018588, 3835);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,bigint_type_tag_name,utinyint_type_tag_name,varbinary_type_tag_name,varchar_type_tag_name,geometry_type_tag_name,int_type_tag_name) TAGS("rfnbxcuzeizhglks", 7283103569704428551, 94, "dudcfczmlnwyrkvp", "geivplrozxtocipa", "point(1.0 1.0)", -1224429691) (ts,float_type_col_name,varbinary_type_col_name,double_type_col_name,tinyint_type_col_name,uint_type_col_name) VALUES ("2016-12-16 06:26:38", -1.3097826665873164e+37, "xwfgtqoddvxyhcre", 1.5229330656778251e+308, 118, 1385681942);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,nchar_type_tag_name,bigint_type_tag_name,utinyint_type_tag_name) TAGS(9169294385094804151, "rdibiqdizmdrkfrf", 3962954747786936314, 54) (ts,geometry_type_col_name,varbinary_type_col_name,utinyint_type_col_name,smallint_type_col_name,float_type_col_name,tinyint_type_col_name,bigint_type_col_name,speed) VALUES ("2016-12-16 06:32:37", "point(1.0 1.0)", "ciymbzfmbfqqsxkv", -76, 32317, -9.09960741209682e+37, 77, 3789368028357457617, -432822270);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,f,tinyint_type_tag_name,varchar_type_tag_name,bool_type_tag_name,utinyint_type_tag_name,bigint_type_tag_name,smallint_type_tag_name,int_type_tag_name,b,geometry_type_tag_name,nchar_type_tag_name,usmallint_type_tag_name,varbinary_type_tag_name) TAGS(-1.2944016628561449e+308, 7.983554080975201e+37, 53, "fcbmysknvqvehfiz", True, -6, -8922838578018838847, -20254, 1722340321, "wrrcjsdfwrihjnqy", "point(1.0 1.0)", "yflqloxmpchzkdts", 57539, "kvvzxzerqnmnzxwk") (ts,nchar_type_col_name,tinyint_type_col_name,ubigint_type_col_name,geometry_type_col_name,usmallint_type_col_name,double_type_col_name,varchar_type_col_name,uint_type_col_name,bool_type_col_name,speed,bigint_type_col_name,varbinary_type_col_name,utinyint_type_col_name) VALUES ("2016-12-16 06:34:00", "mrzcfbzniotxzbdi", 58, 7707860227701048488, "point(1.0 1.0)", 31402, -5.6095749182351896e+306, "ibzewxdivctecfot", -1652235605, False, -933263235, 384543174410180162, "woixarpkcucglfey", 124);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,tinyint_type_tag_name,nchar_type_tag_name,bool_type_tag_name,bigint_type_tag_name,geometry_type_tag_name,uint_type_tag_name,varchar_type_tag_name,int_type_tag_name,varbinary_type_tag_name,utinyint_type_tag_name) TAGS(-5.930942323411467e+307, -121, "ckbowkjukztkjqoi", True, -2618046353021226711, "point(1.0 1.0)", 2166418040, "yxhuoimpnuqhlhgw", -1659085223, "zeyylgulwwdnwsii", -3) (ts,bigint_type_col_name) VALUES ("2016-12-16 06:34:14", -4941469222634542296);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,b,tinyint_type_tag_name,varbinary_type_tag_name,smallint_type_tag_name,double_type_tag_name,nchar_type_tag_name,bool_type_tag_name,utinyint_type_tag_name,f,uint_type_tag_name) TAGS(5489251136620324287, "igrbjdomcwywiekb", -25, "xhhaerxfxfobwcvn", 14001, -1.796912338313077e+307, "umtnvilvgmbxbxsq", True, 43, -1.8528500716419152e+38, 1497513541) (ts) VALUES ("2016-12-16 06:35:34");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,ubigint_type_tag_name,f,b,bool_type_tag_name,geometry_type_tag_name,utinyint_type_tag_name,nchar_type_tag_name,smallint_type_tag_name,int_type_tag_name,usmallint_type_tag_name,varchar_type_tag_name) TAGS(2431406493780788447, 4861403703027821860, 1.99259675649246e+38, "yzdneqzibyuwrypn", True, "point(1.0 1.0)", -127, "llytlpdymtuetvfm", -25050, -543736632, 11798, "dgznnsdqgrnrmazq") (ts,bool_type_col_name) VALUES ("2016-12-16 06:36:04", False);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,tinyint_type_tag_name,double_type_tag_name,varbinary_type_tag_name,varchar_type_tag_name,geometry_type_tag_name) TAGS(-5334, -79, 1.2315866993517746e+308, "mywxqnmbjqsxdsff", "dahqwtcwipwwbdye", "point(1.0 1.0)") (ts,uint_type_col_name,bigint_type_col_name,geometry_type_col_name,varbinary_type_col_name,tinyint_type_col_name,speed,bool_type_col_name,color,ubigint_type_col_name,float_type_col_name) VALUES ("2016-12-16 07:08:57", -703937341, 508746925742892883, "point(1.0 1.0)", "yymznbjazhklgjxb", 12, 1587775618, True, "ozzyhsagmrpiyrjq", 5401400824616407615, -1.7440186980309764e+37);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,varchar_type_tag_name,tinyint_type_tag_name,bigint_type_tag_name,bool_type_tag_name,geometry_type_tag_name,usmallint_type_tag_name,ubigint_type_tag_name,b,utinyint_type_tag_name,f,smallint_type_tag_name) TAGS(-7.999887918520793e+307, "qwbmvcvwhlkxwwld", 21, 468530456171689843, True, "point(1.0 1.0)", 10723, 4059334937310534108, "wgqsiptkcwhvmjwe", 99, -5.310912588143673e+37, -17363) (ts,float_type_col_name,geometry_type_col_name,smallint_type_col_name,varbinary_type_col_name,uint_type_col_name,speed,nchar_type_col_name,color,ubigint_type_col_name,bigint_type_col_name,usmallint_type_col_name,tinyint_type_col_name) VALUES ("2016-12-16 07:14:40", 2.8869331003756176e+38, "point(1.0 1.0)", -29659, "tbhqqyiqgtgckyyg", -471326270, 944696481, "ysahvyevqkwulqqv", "lzhzxslfmdzwsxpf", -9113216317948681955, 3545622045223678570, -25303, -80);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name) TAGS("auxtouxzloojhqtf") (ts,varbinary_type_col_name,nchar_type_col_name,double_type_col_name,color) VALUES ("2016-12-16 07:15:30", "ddluzbitxrtbkjbj", "ljgnuemqzlbotqwi", -5.352451641324967e+307, "suwegzsujiqfyvqt");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(b,geometry_type_tag_name,utinyint_type_tag_name,double_type_tag_name,nchar_type_tag_name,uint_type_tag_name,bigint_type_tag_name,int_type_tag_name,varbinary_type_tag_name,smallint_type_tag_name,bool_type_tag_name,ubigint_type_tag_name,varchar_type_tag_name,usmallint_type_tag_name,tinyint_type_tag_name,f) TAGS("pizyvqqjzxhkofpm", "point(1.0 1.0)", -22, 1.5081448005935855e+308, "ufwxcfxfunjhxxzm", 282211280, 4861090276718143202, -2091585085, "lbxpmtsvtpucnlzy", -30781, False, 124484168740646, "yjuefabvljbxrgsx", 33815, -72, -1.128908264468181e+38) (ts,bigint_type_col_name,ubigint_type_col_name,bool_type_col_name,varchar_type_col_name,double_type_col_name,smallint_type_col_name,tinyint_type_col_name,uint_type_col_name,speed,geometry_type_col_name) VALUES ("2016-12-16 07:17:15", 1001689612302252183, -105401984359440882, False, "wbbktwsethudmtja", -4.557626601447439e+307, 21541, -98, -2080051974, -1861023150, "point(1.0 1.0)");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(uint_type_tag_name,varchar_type_tag_name,varbinary_type_tag_name,int_type_tag_name,bigint_type_tag_name,geometry_type_tag_name) TAGS(2320968032, "dhquczttiqbnfyjr", "cixjeekgvvixcbud", -484475030, -4789196290580581091, "point(1.0 1.0)") (ts,bigint_type_col_name,varbinary_type_col_name,nchar_type_col_name,double_type_col_name,speed,color,varchar_type_col_name,bool_type_col_name,uint_type_col_name,ubigint_type_col_name,smallint_type_col_name,utinyint_type_col_name,float_type_col_name) VALUES ("2016-12-16 07:18:05", -1206873661752154164, "pjkbldrvpglbnulx", "dztdpobuyksgdhgt", -1.1951640769154985e+308, 542907991, "ekubcyxbjiciigss", "xnxufupmzhskjmpq", False, 267042413, 3084280261011428030, 26039, 31, 2.539995004940438e+38);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,uint_type_tag_name,varbinary_type_tag_name,bool_type_tag_name,tinyint_type_tag_name,nchar_type_tag_name,int_type_tag_name,double_type_tag_name) TAGS(-4414306501424620178, 4231079052, "mthgdnjdhnjraffo", False, -106, "hwcjdivkccrlukjw", 1762002945, -1.0781154156730552e+307) (ts,float_type_col_name,varbinary_type_col_name,usmallint_type_col_name,double_type_col_name,speed,varchar_type_col_name,nchar_type_col_name,utinyint_type_col_name,bigint_type_col_name,bool_type_col_name,geometry_type_col_name,uint_type_col_name,smallint_type_col_name,ubigint_type_col_name,color,tinyint_type_col_name) VALUES ("2016-12-16 07:24:51", -8.579854162437403e+37, "izylbbcyhuxvmohl", -11832, -1.238446106977585e+308, 1414832325, "nukonaftikjqqbdj", "cykvszvrxguiajwg", -28, -4542731759437382057, False, "point(1.0 1.0)", -250287824, -22672, 2059749290085408427, "wvojjnanujjbrjbk", 37);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,uint_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,bigint_type_tag_name,geometry_type_tag_name,varchar_type_tag_name,b,nchar_type_tag_name,int_type_tag_name) TAGS(-87, 3054590850, 10240, False, 7471097903390818886, "point(1.0 1.0)", "gvmesmuwkboibbwm", "qnkzvaetvvsoliqw", "tsaezwjdobewrsgg", 718941504) (ts,uint_type_col_name,bool_type_col_name,color,varchar_type_col_name,speed,utinyint_type_col_name,tinyint_type_col_name,bigint_type_col_name,nchar_type_col_name) VALUES ("2016-12-16 07:47:31", -1418250282, True, "cxyfgdrwrdryoxdv", "urqrvjnzpghqycjl", 1139755242, -53, -40, -3750115493851220318, "yvystiojfnkeinwv");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name) TAGS(11662) (ts,float_type_col_name,smallint_type_col_name,nchar_type_col_name,bigint_type_col_name,bool_type_col_name,tinyint_type_col_name,uint_type_col_name,speed,varchar_type_col_name,varbinary_type_col_name,usmallint_type_col_name,color,double_type_col_name,utinyint_type_col_name,geometry_type_col_name) VALUES ("2016-12-16 07:47:42", 1.5652814126254586e+38, 31965, "jzglkmapiqvmkcgt", -456718528158805320, True, 13, 1550395191, -468850482, "qsuieurxcdhljsyg", "pimzexqqdusasasq", -25133, "heaqwaulngjummmj", 1.4472853493781175e+307, -100, "point(1.0 1.0)");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,tinyint_type_tag_name,b,varbinary_type_tag_name,int_type_tag_name,bool_type_tag_name,bigint_type_tag_name,usmallint_type_tag_name,varchar_type_tag_name,utinyint_type_tag_name,uint_type_tag_name,ubigint_type_tag_name) TAGS("point(1.0 1.0)", 71, "egjrjdgnsdelmvoi", "nnvaiqrgsmsgulsf", -1764160281, False, -3170988760692140923, 59567, "vvwezjglauxztbkw", -28, 3876664873, 2077268665321637137) (ts,geometry_type_col_name,ubigint_type_col_name,utinyint_type_col_name,speed,varbinary_type_col_name,float_type_col_name,bigint_type_col_name,color) VALUES ("2016-12-16 07:53:43", "point(1.0 1.0)", -8518449815289332525, -90, -1716688898, "llphooyquheusfis", 1.8971999924428717e+38, 6265618875544853883, "mhnrsjknurfeejin");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,int_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,uint_type_tag_name,bigint_type_tag_name,varbinary_type_tag_name,geometry_type_tag_name,f,double_type_tag_name,smallint_type_tag_name,nchar_type_tag_name,varchar_type_tag_name,ubigint_type_tag_name) TAGS(-107, -1135898492, 7359, True, 2054944069, -5204892746217048371, "dtbkgxohffiiqvex", "point(1.0 1.0)", -1.6139634795645272e+38, -3.4474223981039393e+307, -4519, "geosetpzbiyvrxsl", "owahukwcrwnazqmg", -6817825300706607165) (ts,nchar_type_col_name) VALUES ("2016-12-16 07:57:23", "yfefrbnkzlnvszku");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(b,varbinary_type_tag_name,utinyint_type_tag_name,tinyint_type_tag_name,geometry_type_tag_name,uint_type_tag_name) TAGS("gujkmscmzrumlzgr", "kyjkxjmmiujedyoc", -63, -30, "point(1.0 1.0)", 3040843900) (ts,varbinary_type_col_name,uint_type_col_name,usmallint_type_col_name,tinyint_type_col_name,bigint_type_col_name,nchar_type_col_name,float_type_col_name,bool_type_col_name,utinyint_type_col_name,speed,varchar_type_col_name,smallint_type_col_name,ubigint_type_col_name,color) VALUES ("2016-12-16 08:00:58", "tzthqrttxnhexwie", -1719097053, 12664, 92, 8111449057800298881, "ckhlhqkuqbbkkqbc", -1.2378307380230564e+38, True, -77, 832149914, "zhaatkireunhqjmr", 10552, -1885112753329295315, "etiwbiqspffpftuy");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,uint_type_tag_name,usmallint_type_tag_name,int_type_tag_name,bool_type_tag_name,tinyint_type_tag_name,smallint_type_tag_name,f,b,nchar_type_tag_name,varbinary_type_tag_name,double_type_tag_name,geometry_type_tag_name,ubigint_type_tag_name,varchar_type_tag_name,utinyint_type_tag_name) TAGS(6936877712832826111, 379134044, 5584, -873981273, True, -36, 23332, 3.0590465783887826e+38, "doyrcpkmqdmkvisu", "ebrzlgpzjdhwgbkg", "favnzcgrgbqhjykc", -5.409211271125824e+306, "point(1.0 1.0)", 7020490078397766201, "dfmaignpkotpzfzp", 0) (ts,utinyint_type_col_name,color,tinyint_type_col_name,usmallint_type_col_name,speed,float_type_col_name,smallint_type_col_name,varbinary_type_col_name,uint_type_col_name,bool_type_col_name,double_type_col_name) VALUES ("2016-12-16 08:08:05", 100, "embwiippvzztzfsx", -29, 30679, 398157649, -2.8271578721561e+38, 16481, "eapyuftmoemhlous", 1346563710, False, 1.2527960914103297e+308);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,varchar_type_tag_name,f,int_type_tag_name) TAGS(14445, "frbgwoxdfgvomhij", 3.112685005605257e+38, 919563343) (ts,smallint_type_col_name,uint_type_col_name,nchar_type_col_name,color,tinyint_type_col_name,utinyint_type_col_name,double_type_col_name,float_type_col_name,bigint_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 08:10:32", 30534, 287062448, "radqyprkqzicegcm", "digmvwqxesuzblxo", -45, 57, 4.2367818689200055e+307, -1.2630859264197575e+38, 5777026179863424498, -18041);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(tinyint_type_tag_name,geometry_type_tag_name,b) TAGS(-125, "point(1.0 1.0)", "sknwbldiaotpguoh") (ts,geometry_type_col_name,speed,bool_type_col_name,uint_type_col_name,nchar_type_col_name,tinyint_type_col_name,smallint_type_col_name,bigint_type_col_name,usmallint_type_col_name,varchar_type_col_name,float_type_col_name,varbinary_type_col_name,double_type_col_name) VALUES ("2016-12-16 08:21:15", "point(1.0 1.0)", -533077180, True, 125980213, "rzpvyyiafbdgsrlt", -18, 516, 1836336637894618829, -32398, "qyolcbkiphumcyni", -1.7088547654566307e+38, "uhfuttyjdsiwfpge", 9.045738084106129e+307);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,varchar_type_tag_name) TAGS("eibgfpzivbkqipxh", "bajctkjmycezvfjy") (ts,uint_type_col_name,nchar_type_col_name,geometry_type_col_name,speed,varbinary_type_col_name,varchar_type_col_name,ubigint_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 08:27:17", 182114551, "bqoiqgcelwawaxoh", "point(1.0 1.0)", -2048616567, "ngeyahwjhojgfrji", "ajndkftrjrhebhkh", 7449846270987100591, 21014);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,double_type_tag_name) TAGS(-4625726455522573231, 2.8073063831603936e+307) (ts,float_type_col_name,bigint_type_col_name,uint_type_col_name,usmallint_type_col_name,color,varchar_type_col_name,tinyint_type_col_name,nchar_type_col_name,bool_type_col_name,varbinary_type_col_name,geometry_type_col_name,utinyint_type_col_name) VALUES ("2016-12-16 08:28:17", 2.389718468668489e+38, 232575111591325522, 1038517632, 22294, "gkuqyywzvitscnmf", "pzotmkbymjkajqxh", 84, "zwaqqoiitsaldvxd", False, "lutscwlzfncvtbzh", "point(1.0 1.0)", 104);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,varchar_type_tag_name,utinyint_type_tag_name,geometry_type_tag_name,smallint_type_tag_name,usmallint_type_tag_name,int_type_tag_name,b,uint_type_tag_name,f,varbinary_type_tag_name,nchar_type_tag_name,double_type_tag_name,tinyint_type_tag_name,bool_type_tag_name,bigint_type_tag_name) TAGS(3789358482002489003, "cbbfcxtbficxjubk", 87, "point(1.0 1.0)", 16290, 65141, 1107108066, "dbvdcpsxhwxkfcyz", 1995515016, 2.7409223337049425e+38, "qngmplwikoseyylt", "tgzwjjwlcizxxwnr", -1.6889546962063194e+308, -114, False, -3482122891531832648) (ts,nchar_type_col_name,tinyint_type_col_name,geometry_type_col_name,double_type_col_name,color,smallint_type_col_name,speed,usmallint_type_col_name,ubigint_type_col_name,varbinary_type_col_name,bigint_type_col_name,varchar_type_col_name,utinyint_type_col_name,float_type_col_name,bool_type_col_name,uint_type_col_name) VALUES ("2016-12-16 08:28:56", "ofdxnlygqcwvsdcj", -29, "point(1.0 1.0)", -1.8130271598650515e+307, "nefvavoukqtdzkgt", -17717, 1471102951, -22978, 6641200189088106550, "tzjbozbjlczkgwxa", -6094740512181599472, "etogqzsgcqipiztv", 83, 3.26367598847478e+38, False, -748583096);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,uint_type_tag_name,nchar_type_tag_name,bool_type_tag_name,double_type_tag_name,int_type_tag_name,tinyint_type_tag_name) TAGS(-3.3072135298943572e+38, 1159943179, "kswqkzvltgixlmfs", True, -4.306722847726777e+307, 232669674, 110) (ts,double_type_col_name,smallint_type_col_name,utinyint_type_col_name,bool_type_col_name,tinyint_type_col_name,color,float_type_col_name,bigint_type_col_name,uint_type_col_name,geometry_type_col_name,ubigint_type_col_name,usmallint_type_col_name,nchar_type_col_name,varbinary_type_col_name,varchar_type_col_name) VALUES ("2016-12-16 08:32:49", 6.47061313170663e+307, 10319, -39, False, 11, "zrdnjfphcwpeinqc", -1.9983066068141309e+37, -7934490781005501591, 254843018, "point(1.0 1.0)", 3715382894868674189, 15127, "czgcvwrtlnhrftfw", "mmsahbdykxugoszh", "oarqmzgnlqfgoetb");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varchar_type_tag_name,usmallint_type_tag_name,nchar_type_tag_name,double_type_tag_name,bigint_type_tag_name,b,f) TAGS("eqpvdmknfwrubsuz", 25062, "viftdmeaoemnzlte", -8.206973826119586e+307, -3370842340933526422, "laotfjfckrruwnso", 1.6151693420601788e+37) (ts,ubigint_type_col_name,usmallint_type_col_name,bool_type_col_name,varchar_type_col_name,uint_type_col_name,color,float_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 08:39:16", 3961511905025407799, 17678, False, "donavljkknjvmamp", 1568179614, "dgtldyeuhhgqsrhz", 4.974589437659843e+37, 4642);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(b,varbinary_type_tag_name,utinyint_type_tag_name,smallint_type_tag_name,int_type_tag_name,geometry_type_tag_name,ubigint_type_tag_name,nchar_type_tag_name,double_type_tag_name,tinyint_type_tag_name,usmallint_type_tag_name,bigint_type_tag_name,uint_type_tag_name,bool_type_tag_name) TAGS("hsbqzsfvkllngzft", "oshuiqqteddxsxhg", 12, -6023, -1854137419, "point(1.0 1.0)", -7820787410908786398, "ktlafrcpckbnasok", -1.1140330045511274e+308, -121, 30336, -5235830165957712064, 1885913417, True) (ts,bigint_type_col_name,varbinary_type_col_name,utinyint_type_col_name,usmallint_type_col_name,tinyint_type_col_name,color) VALUES ("2016-12-16 08:47:05", 1101333002744137125, "ztteslebezjpmdic", 9, -16849, -109, "otxajtxgosmycgwf");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,tinyint_type_tag_name,usmallint_type_tag_name,geometry_type_tag_name,varbinary_type_tag_name) TAGS(12811, 115, 11564, "point(1.0 1.0)", "fvllsyeldqjvxwvh") (ts,nchar_type_col_name,color,float_type_col_name,ubigint_type_col_name,tinyint_type_col_name,geometry_type_col_name,double_type_col_name,varbinary_type_col_name,bigint_type_col_name,uint_type_col_name) VALUES ("2016-12-16 08:52:23", "kzlebaiflssmhrez", "rmgygwigqpcedjfd", -1.0528922331364947e+38, 2053899259005385975, -4, "point(1.0 1.0)", 1.2949613471672877e+307, "xpnwhkobylhldlja", -2081901725973275838, 1084220081);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bool_type_tag_name,int_type_tag_name,tinyint_type_tag_name,uint_type_tag_name,bigint_type_tag_name,geometry_type_tag_name,b,double_type_tag_name,nchar_type_tag_name,smallint_type_tag_name,f,varchar_type_tag_name,ubigint_type_tag_name) TAGS(False, 81017693, -59, 3106871640, 6594200175307481979, "point(1.0 1.0)", "cxlcaxjqbottjwbt", 1.0810790118515493e+307, "mzpfnozkhzccymcy", -23268, -2.5710992840734416e+38, "bzcemqcjmfensoam", -6857344180628167939) (ts,varchar_type_col_name,tinyint_type_col_name,speed,bool_type_col_name,color,nchar_type_col_name,float_type_col_name,bigint_type_col_name,double_type_col_name,utinyint_type_col_name,ubigint_type_col_name,usmallint_type_col_name,geometry_type_col_name,uint_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 08:52:40", "goxjebyqxffhytny", 42, 956707019, False, "mwhbccpdjxrnlfmj", "hsswrkdrgvqzrqvc", -3.1923783000520714e+37, 6439233177980554597, 1.5462567361109987e+308, -104, 5670859655382432947, -13629, "point(1.0 1.0)", -291035521, -28198);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,tinyint_type_tag_name,bool_type_tag_name,ubigint_type_tag_name,geometry_type_tag_name,varchar_type_tag_name,smallint_type_tag_name,f,varbinary_type_tag_name,nchar_type_tag_name,double_type_tag_name,bigint_type_tag_name) TAGS(-1951062887, -15, False, 8992786638768246075, "point(1.0 1.0)", "zkbchpvyaborestv", 14319, -2.9661092483352556e+38, "sgmkfslmxakjfqna", "hujharmlkobjdcnn", -1.3558306099409006e+308, 6268021739102999331) (ts) VALUES ("2016-12-16 09:57:37");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,f,b,bigint_type_tag_name,smallint_type_tag_name,bool_type_tag_name,ubigint_type_tag_name) TAGS(1359, 3.0441073077150407e+38, "hdozqqjsswenceob", 4323504650099562462, -468, False, 2127413600006157512) (ts) VALUES ("2016-12-16 09:57:50");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,double_type_tag_name,tinyint_type_tag_name,geometry_type_tag_name,ubigint_type_tag_name,usmallint_type_tag_name,uint_type_tag_name) TAGS("jrfnpvmonxehrxqh", 5.481850803776123e+307, -72, "point(1.0 1.0)", -6843236102647158686, 12864, 3757105281) (ts) VALUES ("2016-12-16 10:04:07");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,nchar_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,tinyint_type_tag_name,f,ubigint_type_tag_name,int_type_tag_name,double_type_tag_name,varchar_type_tag_name,varbinary_type_tag_name,bigint_type_tag_name,geometry_type_tag_name) TAGS(38, "gdjxupnqalfsczwf", 6556, True, 16, 2.3326392387567076e+38, -3052440068987461367, 1379207868, 9.408456780510208e+307, "yfoiebrvvxepulkf", "gyldwqexpqwtymqb", -8612949720672024630, "point(1.0 1.0)") (ts,nchar_type_col_name,tinyint_type_col_name,uint_type_col_name,varchar_type_col_name,float_type_col_name,smallint_type_col_name,speed) VALUES ("2016-12-16 10:07:23", "xhzhyldsilkilnhj", -111, 29265820, "hfayzyyeielxuajo", -1.9673771091182076e+38, -16871, -687056858);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(b,f,ubigint_type_tag_name,double_type_tag_name) TAGS("ojupiiqggvxiyyeg", 1.828613068144597e+38, -3546696963839193043, 1.063690235483227e+308) (ts,bigint_type_col_name,geometry_type_col_name,color,speed,tinyint_type_col_name,usmallint_type_col_name,double_type_col_name,utinyint_type_col_name,ubigint_type_col_name,uint_type_col_name,float_type_col_name,bool_type_col_name,varbinary_type_col_name,smallint_type_col_name,nchar_type_col_name) VALUES ("2016-12-16 10:08:10", -8093382424883326599, "point(1.0 1.0)", "cqihlukuoejlpfry", 979506951, 21, -22385, -1.5427105800265006e+308, 27, -8297437264602062727, -1746384009, 2.252643841383272e+38, True, "lyyysgtqcxnmrtup", -12771, "lfzvhkaxvgdtprxe");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,tinyint_type_tag_name,smallint_type_tag_name) TAGS(-3588987228330855806, -26, 21365) (ts,tinyint_type_col_name) VALUES ("2016-12-16 10:09:18", -32);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(uint_type_tag_name,nchar_type_tag_name,double_type_tag_name,utinyint_type_tag_name) TAGS(245950801, "yvkkiivzhbqnnmsk", -1.658703905525468e+308, 28) (ts,smallint_type_col_name) VALUES ("2016-12-16 10:13:19", 26054);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(uint_type_tag_name,bool_type_tag_name,ubigint_type_tag_name,varchar_type_tag_name,double_type_tag_name,geometry_type_tag_name,varbinary_type_tag_name,b,utinyint_type_tag_name) TAGS(1694485723, False, -474604515817697594, "rtgthkbkwlbutbgv", 2.3887324912266175e+307, "point(1.0 1.0)", "gopcjcoagmnblahg", "jwnrdxiishgvobmd", 48) (ts,nchar_type_col_name,double_type_col_name,bigint_type_col_name,uint_type_col_name,smallint_type_col_name,tinyint_type_col_name,utinyint_type_col_name) VALUES ("2016-12-16 10:19:33", "xeinqeysquavrsyz", -8.949042643309358e+305, -5607735618216794531, -1459791787, -19075, -55, -127);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,uint_type_tag_name,utinyint_type_tag_name,ubigint_type_tag_name,smallint_type_tag_name,geometry_type_tag_name,usmallint_type_tag_name,nchar_type_tag_name,varchar_type_tag_name,bool_type_tag_name,b,tinyint_type_tag_name,extratag,varbinary_type_tag_name) TAGS(-2.4047245829078376e+38, 2561944294, 9, -8008616984432962570, -13634, "point(1.0 1.0)", 40692, "wnbqrneelpvqwlja", "jvqvobbxkeelzunr", True, "fmdtxmzdhmejwdtg", 34, 935844453, "pejduzquhxddarph") (ts,varbinary_type_col_name,varchar_type_col_name,double_type_col_name,bool_type_col_name,nchar_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 11:27:47", "sgusymsmfcwjnpcy", "npntezujzhtxahzl", -2.8300806976493463e+307, True, "vozigylgtwuwqyxd", -29259);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,geometry_type_tag_name) TAGS(2, "point(1.0 1.0)") (ts,tinyint_type_col_name,color,double_type_col_name,smallint_type_col_name,utinyint_type_col_name,float_type_col_name,varbinary_type_col_name,bool_type_col_name,speed,nchar_type_col_name,ubigint_type_col_name,uint_type_col_name,bigint_type_col_name) VALUES ("2016-12-16 11:38:41", 65, "xbkejkdziselkysy", -1.3741827310590848e+308, 31519, -36, 5.743820075715186e+37, "gnqhbldkghybjgsu", True, 1371169555, "xhfnbxeeaaniflfh", -479840166843776187, 1922629809, -8139122071465810064);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,usmallint_type_tag_name,geometry_type_tag_name,nchar_type_tag_name,varbinary_type_tag_name,smallint_type_tag_name,f,uint_type_tag_name,bigint_type_tag_name,double_type_tag_name,utinyint_type_tag_name,tinyint_type_tag_name,extratag,bool_type_tag_name) TAGS(-270631341, 46930, "point(1.0 1.0)", "gnbznssgvnzwrwxw", "cesmnjhqapqjvrja", 23088, -3.3164207581144432e+38, 1700967625, -1030316662035835968, 1.2011743206213233e+308, -92, 121, 367729172, True) (ts,color,utinyint_type_col_name,usmallint_type_col_name,tinyint_type_col_name,bigint_type_col_name,ubigint_type_col_name,nchar_type_col_name,varchar_type_col_name,speed,double_type_col_name,smallint_type_col_name,float_type_col_name) VALUES ("2016-12-16 11:42:49", "tldfcpsuvkwvvcxt", 27, 23508, 56, 7459772708834616479, 4287277222975526137, "irlpvwywkyfduafy", "fthdcsvpfasrorxm", 2014476868, 6.679126531840444e+307, 24227, 7.882713831816527e+36);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varbinary_type_tag_name) TAGS("ouvptxbwrpffphkp") (ts,color,double_type_col_name,float_type_col_name,nchar_type_col_name,bigint_type_col_name,varchar_type_col_name,ubigint_type_col_name,bool_type_col_name,tinyint_type_col_name,uint_type_col_name,geometry_type_col_name,speed,usmallint_type_col_name,smallint_type_col_name,utinyint_type_col_name) VALUES ("2016-12-16 12:34:44", "wnyagxmugtlzloqs", 2.5224403678140916e+307, -2.9593116477640043e+38, "wpatbvwqhdetaoko", 7131704094598121396, "cenguwerksdlwthz", -3084446013548702169, False, 121, -53423625, "point(1.0 1.0)", 1601558648, 23715, -10339, 45);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(b,varbinary_type_tag_name,int_type_tag_name,varchar_type_tag_name,bool_type_tag_name,smallint_type_tag_name,utinyint_type_tag_name,double_type_tag_name,nchar_type_tag_name,tinyint_type_tag_name,uint_type_tag_name,ubigint_type_tag_name) TAGS("ebiacunkvnicuhlq", "fqqxcyidlzmeaqqn", 1865880042, "fqyixbagtpigrvkm", True, -15973, 52, -1.2233588646642921e+308, "kjegzlhpztxcjjjt", 49, 3661613052, -3064287245566411855) (ts,tinyint_type_col_name,speed,usmallint_type_col_name,uint_type_col_name,bool_type_col_name,double_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 12:35:29", -54, -1777243683, -5292, -240610904, False, -1.4017848527201252e+308, "diubdgptppxueerk");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,tinyint_type_tag_name,varbinary_type_tag_name,int_type_tag_name,utinyint_type_tag_name,varchar_type_tag_name,b,double_type_tag_name,smallint_type_tag_name,uint_type_tag_name) TAGS(7218832446436978154, 34, "jqvhmixowianctmg", -169644439, -111, "yulvetumzeimvdew", "bsbqhjdbykaxhkah", -4.3917327843321925e+307, -12324, 3993659508) (ts,geometry_type_col_name,usmallint_type_col_name,color,tinyint_type_col_name,smallint_type_col_name,varchar_type_col_name,varbinary_type_col_name,double_type_col_name,utinyint_type_col_name,nchar_type_col_name,bigint_type_col_name,bool_type_col_name,uint_type_col_name,ubigint_type_col_name,float_type_col_name,speed) VALUES ("2016-12-16 12:55:41", "point(1.0 1.0)", 631, "nmddklgtjxrugcig", -88, -8868, "miajgamwlraattqs", "kffhsedlnafobisv", 1.4423313487130354e+308, -116, "kxoycofwhmsaeohq", -1136894476255564565, True, -939744415, -6653064581027995564, -1.9832809127199404e+38, -745235635);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,varchar_type_tag_name,utinyint_type_tag_name,nchar_type_tag_name,double_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,geometry_type_tag_name,uint_type_tag_name,smallint_type_tag_name,bigint_type_tag_name,b,varbinary_type_tag_name,int_type_tag_name,extratag,tinyint_type_tag_name) TAGS(6.311858336249026e+37, "jgyxmmpsdxqtdwrb", 109, "ziwccnmvbdvxjqvt", -8.093949887150045e+307, 50753, True, "point(1.0 1.0)", 1312277084, -22696, -6774059684436011400, "cfvnaoytrwsqfgik", "scoopbahexobzuzi", 260884614, 2013977267, -98) (ts,geometry_type_col_name,bigint_type_col_name,float_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 12:58:13", "point(1.0 1.0)", 222826911129858937, 1.4161786490709517e+38, "pwyyqxegncyrgybp");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(extratag,tinyint_type_tag_name,bool_type_tag_name,utinyint_type_tag_name,int_type_tag_name,varbinary_type_tag_name,uint_type_tag_name,ubigint_type_tag_name,smallint_type_tag_name) TAGS(-350969142, -27, True, 95, -2014323156, "ytaraprsrowqgtya", 2187064438, 7657067162517860811, -11739) (ts,ubigint_type_col_name,speed,float_type_col_name,geometry_type_col_name,utinyint_type_col_name,smallint_type_col_name,uint_type_col_name,usmallint_type_col_name,tinyint_type_col_name) VALUES ("2016-12-16 12:58:20", 1695470437837518753, -248107853, -2.308796864773065e+38, "point(1.0 1.0)", 96, 27392, -317204398, -28969, 27);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,varchar_type_tag_name,extratag,geometry_type_tag_name,f,bool_type_tag_name,double_type_tag_name,uint_type_tag_name,ubigint_type_tag_name,smallint_type_tag_name,bigint_type_tag_name,nchar_type_tag_name,utinyint_type_tag_name,b,varbinary_type_tag_name,int_type_tag_name) TAGS(9315, "zcptltyqaoeubpam", -411172808, "point(1.0 1.0)", -1.274336621267979e+38, True, -1.574639717196322e+308, 3207202020, 8037317073978039576, 14431, 1953592773870071856, "qwzkcwunsuoqokru", -83, "crpiwmvdmhhxtnon", "rzvcgilgehavrorx", 1770221516) (ts,float_type_col_name,color,uint_type_col_name,speed,varchar_type_col_name,bigint_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 12:58:25", 2.203270740971935e+38, "gclrvxcyluoaqkul", -1757420543, -1667540040, "fhcuwffqunwujhnu", -6952698129894047939, 31135);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(extratag,geometry_type_tag_name,smallint_type_tag_name,bigint_type_tag_name,int_type_tag_name,b,bool_type_tag_name,varchar_type_tag_name,varbinary_type_tag_name,tinyint_type_tag_name,ubigint_type_tag_name,nchar_type_tag_name,usmallint_type_tag_name,utinyint_type_tag_name) TAGS(928439163, "point(1.0 1.0)", -5510, -6111546345505423465, 1619259430, "zqnekxkjgjysmuiz", False, "uzacypoysstbvzjr", "rzhyionncrfhslvk", -95, 2084839207513824909, "xbakjryihqmzoyxb", 1124, -51) (ts) VALUES ("2016-12-16 13:10:00");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,f,tinyint_type_tag_name,smallint_type_tag_name,varbinary_type_tag_name,uint_type_tag_name,utinyint_type_tag_name) TAGS(-7654723116287337126, 7.0413764481621105e+37, 28, 14707, "ohmuqnoqockmmvyr", 503417135, -1) (ts,varchar_type_col_name,color,geometry_type_col_name,usmallint_type_col_name,float_type_col_name,uint_type_col_name,ubigint_type_col_name,bigint_type_col_name,nchar_type_col_name,bool_type_col_name) VALUES ("2016-12-16 13:11:00", "vvnkffiwzkwchxku", "ettaavibyrfbkwnd", "point(1.0 1.0)", -14161, -1.8652679722611485e+38, -32936243, -5932829237196271077, 1036650175201440454, "gnkdvttswywmpmqr", False);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,utinyint_type_tag_name,b,extratag) TAGS(-1.6449590526148958e+308, 5, "oyqjdboasjbcgkoa", 769124366) (ts,smallint_type_col_name,utinyint_type_col_name,varbinary_type_col_name,float_type_col_name,bool_type_col_name) VALUES ("2016-12-16 13:12:21", -13042, 36, "ioqlptmacnphnudi", -2.8818733348179174e+38, True);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,geometry_type_tag_name,extratag,usmallint_type_tag_name,varchar_type_tag_name,bigint_type_tag_name,utinyint_type_tag_name,b,tinyint_type_tag_name) TAGS(-3.002723197204508e+38, "point(1.0 1.0)", 673924760, 10423, "zqbdmmvwolrmxtdy", 9067788071941407006, 58, "xfzkhhjsdslfocok", -6) (ts,bigint_type_col_name,float_type_col_name,usmallint_type_col_name,bool_type_col_name,uint_type_col_name,color,double_type_col_name) VALUES ("2016-12-16 13:18:58", -8128143152489083329, 3.1863841600401516e+38, -16841, False, -1439274645, "ayaagfubotrfelgd", 3.940540502421231e+307);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,geometry_type_tag_name,uint_type_tag_name,double_type_tag_name,extratag,ubigint_type_tag_name,int_type_tag_name,varchar_type_tag_name,bigint_type_tag_name,varbinary_type_tag_name,usmallint_type_tag_name,nchar_type_tag_name,f) TAGS(72, "point(1.0 1.0)", 664119737, 8.636577697672091e+307, -1956689850, 5442726894170414063, -620118845, "atdkvjdrsslbkqwk", -1730258878203616660, "jetzkpnswyrblizo", 62924, "yubdmiswgttrzlud", -2.6952802203722693e+38) (ts,tinyint_type_col_name,varbinary_type_col_name,bigint_type_col_name,utinyint_type_col_name,nchar_type_col_name,usmallint_type_col_name,geometry_type_col_name,double_type_col_name,color,uint_type_col_name,float_type_col_name,ubigint_type_col_name,varchar_type_col_name,bool_type_col_name,speed) VALUES ("2016-12-16 13:22:31", -101, "pezwjcdegugdqlmj", 2660651858547046010, -106, "xdhkuoxnpbrxzikz", 32513, "point(1.0 1.0)", 1.0237067068787619e+308, "qipropwsssqtdswq", -918947945, -3.2678520762372033e+38, 407015107737783176, "vvigrjnmxmzasgqt", False, -1322323386);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(uint_type_tag_name,varchar_type_tag_name,geometry_type_tag_name,double_type_tag_name,extratag,bool_type_tag_name,usmallint_type_tag_name,tinyint_type_tag_name,f,int_type_tag_name,ubigint_type_tag_name) TAGS(667307468, "lykdtqsuiuxbdejt", "point(1.0 1.0)", -3.233371151222957e+307, 780193507, False, 6454, -104, -1.6348292668856767e+38, -332964092, 6842647149944893582) (ts,bigint_type_col_name,nchar_type_col_name,double_type_col_name,smallint_type_col_name,varbinary_type_col_name,bool_type_col_name,speed,ubigint_type_col_name,utinyint_type_col_name,color,geometry_type_col_name,float_type_col_name,tinyint_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 13:23:55", -2751819826451411816, "fwuvslqvozjdkybl", -1.4309368487421518e+308, -29059, "ncbtjyvhgdmrtwyb", False, 476112352, 5847161143554335168, 103, "rkxynqftsspdyohe", "point(1.0 1.0)", -2.7303867859897275e+38, 120, -15572);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(extratag,int_type_tag_name) TAGS(1467247951, -27058643) (ts,smallint_type_col_name) VALUES ("2016-12-16 13:28:54", 11705);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,double_type_tag_name,bool_type_tag_name,uint_type_tag_name,varbinary_type_tag_name,f,int_type_tag_name,extratag) TAGS(-7641570078560113172, -1.5720182754543317e+308, False, 3978085195, "otsbskgvmxxpqsuv", 1.761118847494028e+38, -798787056, 1618027135) (ts,tinyint_type_col_name,usmallint_type_col_name,float_type_col_name,varchar_type_col_name,color) VALUES ("2016-12-16 13:30:00", 34, -27037, 8.233875034151866e+37, "klvhnzbiagpaygjr", "azavalppgglodkpt");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,smallint_type_tag_name,nchar_type_tag_name,b,extratag,usmallint_type_tag_name,tinyint_type_tag_name,varchar_type_tag_name,f,varbinary_type_tag_name,geometry_type_tag_name) TAGS(-113, 26916, "hiabnhvkskqdxcft", "rilirkzfvvcsoxvu", 1985911354, 63961, -49, "tkqjhnbsfqlkrmen", 2.4436889435795517e+38, "ljxwjovyuimnwfyn", "point(1.0 1.0)") (ts,float_type_col_name,speed,double_type_col_name,nchar_type_col_name,tinyint_type_col_name,smallint_type_col_name,geometry_type_col_name,utinyint_type_col_name,varchar_type_col_name) VALUES ("2016-12-16 13:32:10", -2.8391080467349558e+38, -334011151, -6.231241486564915e+307, "yikfspsjjzvjmiig", 65, 32623, "point(1.0 1.0)", -79, "vqcenjbphvbzuyec");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,bool_type_tag_name,b,nchar_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,varchar_type_tag_name) TAGS(7.628181285293382e+37, False, "xasbwapuqiwnuapm", "wgbkfpitzqilvgam", 1495376777, -22, "cyihglhaaqdhsbrt") (ts,bigint_type_col_name,ubigint_type_col_name,color) VALUES ("2016-12-16 13:33:30", 6511707951588009422, 2723033375589066235, "gxikhegxccxagvge");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,smallint_type_tag_name,bigint_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,nchar_type_tag_name,geometry_type_tag_name,uint_type_tag_name,varchar_type_tag_name) TAGS(106, 142, 1896023978688432555, 64760, True, "kbfjnghakyfiaydm", "point(1.0 1.0)", 1202839796, "hmpxrvvhazkoxktl") (ts,color,double_type_col_name,geometry_type_col_name,float_type_col_name,ubigint_type_col_name,varbinary_type_col_name,bigint_type_col_name) VALUES ("2016-12-16 13:34:57", "rnlnepdxpkshnoon", -9.753180446674174e+307, "point(1.0 1.0)", 7.136745784580883e+37, -5844625605864726019, "cjivuqrqtdkhbiwa", 2274288423212647405);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,double_type_tag_name,tinyint_type_tag_name,utinyint_type_tag_name,geometry_type_tag_name) TAGS("nebcguavuaezyyde", 1.2787498546480036e+308, -102, -1, "point(1.0 1.0)") (ts,bool_type_col_name,geometry_type_col_name,utinyint_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 13:37:33", True, "point(1.0 1.0)", 60, 3057);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,bigint_type_tag_name,utinyint_type_tag_name,double_type_tag_name,usmallint_type_tag_name,int_type_tag_name,b) TAGS(-3.3009424548590525e+38, 7360584785531994343, 20, 1.426103197404768e+308, 47478, -1878972741, "nscbvzswvnmgwucm") (ts,smallint_type_col_name,geometry_type_col_name,utinyint_type_col_name,bool_type_col_name,uint_type_col_name) VALUES ("2016-12-16 13:37:39", -27464, "point(1.0 1.0)", 112, True, 1250310797);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,double_type_tag_name,smallint_type_tag_name,usmallint_type_tag_name,varbinary_type_tag_name) TAGS(-1589972917066394459, 8.923822004171481e+307, 15758, 5488, "ruuxldxnhfwxpstv") (ts,uint_type_col_name,color,bool_type_col_name,ubigint_type_col_name,smallint_type_col_name,varchar_type_col_name,varbinary_type_col_name,float_type_col_name,usmallint_type_col_name,double_type_col_name,nchar_type_col_name,geometry_type_col_name) VALUES ("2016-12-16 13:44:22", 963762484, "wlcggiilxpahshwd", True, 3728201935390266604, -2468, "edlyrnopkadknbig", "odaximfsofanwsiq", 1.2948243726798647e+38, 22342, 1.6685972624971541e+308, "ebxjnldwzyxugscc", "point(1.0 1.0)");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(tinyint_type_tag_name,usmallint_type_tag_name,smallint_type_tag_name,geometry_type_tag_name,bigint_type_tag_name,uint_type_tag_name,b,int_type_tag_name,ubigint_type_tag_name,utinyint_type_tag_name,nchar_type_tag_name) TAGS(-88, 51955, 29505, "point(1.0 1.0)", -6962148594230058647, 2678324900, "kygvkaanownlmuny", -1846762302, -785520152137980973, 49, "oxptoddkmvlzukes") (ts,tinyint_type_col_name,varchar_type_col_name,bigint_type_col_name) VALUES ("2016-12-16 13:49:22", 31, "nicxhhisdvunqmyq", 8945862635913752172);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,b,varchar_type_tag_name,int_type_tag_name,bigint_type_tag_name) TAGS(23202, "imrskuasopukglor", "bvizzkkpezerxhqu", 317353908, 754217902580887681) (ts,geometry_type_col_name) VALUES ("2016-12-16 13:53:17", "point(1.0 1.0)");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,tinyint_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,double_type_tag_name,ubigint_type_tag_name,bigint_type_tag_name,varbinary_type_tag_name,bool_type_tag_name,usmallint_type_tag_name,b,extratag,f,varchar_type_tag_name,nchar_type_tag_name,geometry_type_tag_name,smallint_type_tag_name) TAGS(-1011316156, 122, 3910130646, 43, 8.512388268116912e+307, 1669456457022538791, 2195079875842603602, "erjqjbgtlgvbwwja", True, 42205, "wovnwgvyflnrbkjr", 1404754906, 2.2368721201787224e+38, "yjisfzjurskvjdck", "vmjcbkwgfouvenyq", "point(1.0 1.0)", -6290) (ts,bool_type_col_name,ubigint_type_col_name,speed,tinyint_type_col_name,usmallint_type_col_name,utinyint_type_col_name,nchar_type_col_name,double_type_col_name,varbinary_type_col_name,smallint_type_col_name,uint_type_col_name,bigint_type_col_name,geometry_type_col_name,varchar_type_col_name) VALUES ("2016-12-16 13:55:15", False, -8830864698607791783, 200778707, 80, -12482, -108, "qlzsnerbfpxtrhsu", 7.918325930212065e+307, "bkeuitzjnwepwsoz", -24175, 1796806861, 8456809260387320755, "point(1.0 1.0)", "bfhekhbyzfnbfvbc");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,varbinary_type_tag_name,varchar_type_tag_name,b,nchar_type_tag_name,extratag,double_type_tag_name,smallint_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,tinyint_type_tag_name,bigint_type_tag_name,bool_type_tag_name,f,usmallint_type_tag_name,ubigint_type_tag_name,geometry_type_tag_name) TAGS(-109100963, "fkktjbsjdjozysqb", "kvckvuuvjkhrwvum", "svohobpldtonpiqs", "bgrvrzilrbtkzaim", 1058428584, 7.196238081264505e+306, 1884, 2991872141, -109, 117, 1510579019662137511, False, 1.2245033771852769e+38, 19364, -4972876200109100193, "point(1.0 1.0)") (ts,nchar_type_col_name,utinyint_type_col_name,varbinary_type_col_name,bigint_type_col_name,geometry_type_col_name,ubigint_type_col_name,smallint_type_col_name,bool_type_col_name,speed,color,uint_type_col_name) VALUES ("2016-12-16 13:58:17", "fhofoqbrebyjlnkk", 78, "jbttouyukxktabti", 715764060196867324, "point(1.0 1.0)", -3965830703103714728, 6591, True, -2043004502, "ipjtaiujqrieidut", -1209525645);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varbinary_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,bool_type_tag_name,smallint_type_tag_name,f,bigint_type_tag_name) TAGS("rqnninzykiejsiaq", 2744710939, 125, True, -3120, 2.294739380866984e+38, 3223998974927684831) (ts,nchar_type_col_name,smallint_type_col_name,bigint_type_col_name,geometry_type_col_name,color,ubigint_type_col_name,float_type_col_name,varchar_type_col_name,double_type_col_name,varbinary_type_col_name,speed,utinyint_type_col_name) VALUES ("2016-12-16 13:58:26", "dpuonivwjikfanqj", 28149, -8363753204688150611, "point(1.0 1.0)", "deqhimtcneshajxu", -7721659794023728322, 3.1823124307599837e+38, "lmykqmofvaeejays", 1.3450730693544848e+308, "cpvchycerzkkriye", -935361460, -32);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,varbinary_type_tag_name,b) TAGS("gzexbtkchcrciore", "xzsaapvhaldvnsee", "luhcuujwpvsqcydj") (ts,varbinary_type_col_name,uint_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 14:17:37", "vegoocllfotccxme", 1106951626, 22765);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,nchar_type_tag_name,b,varchar_type_tag_name,bigint_type_tag_name,tinyint_type_tag_name) TAGS(16570, "silgmziwpdxxoyek", "yqvguhekzckruinc", "aajlxcknjrbyhkha", -8045441217037238680, 107) (ts,varchar_type_col_name,float_type_col_name,speed,utinyint_type_col_name,bool_type_col_name,color) VALUES ("2016-12-16 14:19:46", "heftclalnprpfawp", 6.0294286136178425e+37, 302462364, -8, False, "zdadyskvxrgyaqsm");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,varbinary_type_tag_name,tinyint_type_tag_name,uint_type_tag_name,nchar_type_tag_name) TAGS(10191, "lhxvlfkfbtrhsjkt", 124, 77770857, "dmsjeojxddyuxink") (ts,tinyint_type_col_name,speed,uint_type_col_name,bool_type_col_name,varbinary_type_col_name,double_type_col_name,color) VALUES ("2016-12-16 14:32:40", 21, 379371801, 608762533, False, "rjgbwgnnkkemctro", 1.4188655662190125e+308, "sbwlxrlpgnkmvadl");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bool_type_tag_name,int_type_tag_name) TAGS(False, 2060765790) (ts) VALUES ("2016-12-16 14:59:59");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,b) TAGS("ieyyywpqpqushibb", "dkvjhgnakkgqzijs") (ts) VALUES ("2016-12-16 15:16:49");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,varchar_type_tag_name,usmallint_type_tag_name,tinyint_type_tag_name,double_type_tag_name,bigint_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,int_type_tag_name,bool_type_tag_name,extratag,b,geometry_type_tag_name,f) TAGS("mgatrnymxrqqmdyx", "oljnaeurozmvfqhp", 48759, 38, 3.403325685451397e+307, -6460116231033920128, 1576382718, 104, -603843450, True, 309025710, "tcbaxjfsqakxpvji", "point(1.0 1.0)", 2.8892847575707817e+38) (ts,usmallint_type_col_name,ubigint_type_col_name,nchar_type_col_name,uint_type_col_name,double_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 15:20:44", -17861, 7575197484024879527, "txwlduparjczcbnx", -2111059045, 4.196810591621617e+307, "asimawbydzjggkce");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,bigint_type_tag_name,double_type_tag_name) TAGS(58633, -7506966660525849170, 6.430296151375471e+307) (ts,smallint_type_col_name,color,bigint_type_col_name,geometry_type_col_name,speed,nchar_type_col_name,float_type_col_name,double_type_col_name,varchar_type_col_name,usmallint_type_col_name,uint_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 15:51:23", -16594, "twlhrkfaqrwnjumx", -6411319415230721292, "point(1.0 1.0)", 1147845389, "dkuebijphjfmniyc", -2.276532994058693e+38, -4.871085226210322e+307, "ttundubdmhuwtaaa", -29548, -424312121, "zgvfwozyxijapwho");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(uint_type_tag_name,int_type_tag_name) TAGS(4147856182, 710788816) (ts,uint_type_col_name,double_type_col_name,nchar_type_col_name) VALUES ("2016-12-16 16:03:05", -391718933, 9.11108254173214e+306, "mtfdzaczermguiog");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varbinary_type_tag_name,int_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,bool_type_tag_name,nchar_type_tag_name,bigint_type_tag_name,varchar_type_tag_name,extratag,b,smallint_type_tag_name,ubigint_type_tag_name,usmallint_type_tag_name) TAGS("abhddtiwgwsawqif", -326574137, 3092694575, 117, True, "elqkqieyrmvxdfal", 513325224677025259, "coxihpudvcdmvavx", 949926470, "jtbtsxdafkcutiyq", 18084, -3755127971745692605, 15049) (ts,color,uint_type_col_name,bigint_type_col_name,ubigint_type_col_name,bool_type_col_name,geometry_type_col_name,tinyint_type_col_name,varchar_type_col_name,speed,usmallint_type_col_name,varbinary_type_col_name,double_type_col_name,smallint_type_col_name,nchar_type_col_name,float_type_col_name) VALUES ("2016-12-16 16:10:12", "geiqxfxzxvcisxvq", 404296035, 6018544511539939806, -2723124593683508517, False, "point(1.0 1.0)", -35, "vpgmwoviojtaksyc", -1590408139, -21520, "yivgshgarquqsmkx", -5.091863294679578e+307, -6162, "hwxnlpshligbgdwr", 2.8760534412729593e+38);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,f,nchar_type_tag_name,double_type_tag_name,ubigint_type_tag_name,extratag,b,varbinary_type_tag_name) TAGS("point(1.0 1.0)", -8.890969508566344e+37, "olgmpbkzjdyaaxkw", 1.1670001553372841e+308, -6483556380230416046, -1489326043, "kavalxtywfupgxae", "nimnqvcrgffqrnry") (ts,geometry_type_col_name,double_type_col_name,varchar_type_col_name,tinyint_type_col_name,nchar_type_col_name,bool_type_col_name,float_type_col_name,utinyint_type_col_name,usmallint_type_col_name,bigint_type_col_name,speed,uint_type_col_name,smallint_type_col_name,color) VALUES ("2016-12-16 16:31:17", "point(1.0 1.0)", -1.6228106351276176e+308, "mrbiqastjhshphqy", -31, "sldcopievsrcpmdn", True, 1.2788940507924604e+38, -53, 1934, -93937490102293875, 251909968, -1180421292, 2009, "fvcuvlqkjdivmbhf");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,varchar_type_tag_name,uint_type_tag_name,geometry_type_tag_name,extratag,bigint_type_tag_name,double_type_tag_name,usmallint_type_tag_name,int_type_tag_name,varbinary_type_tag_name,bool_type_tag_name,ubigint_type_tag_name,f) TAGS("hcwwkenozisgewdo", "ftzlnzgbqcyttqkz", 557595811, "point(1.0 1.0)", -1874674248, -2833874801024332821, 1.106348478672925e+308, 10205, -377441795, "nlqfaejgqdmixvco", True, -8116329742748132558, 2.0010693036315465e+37) (ts,varbinary_type_col_name,smallint_type_col_name,geometry_type_col_name,tinyint_type_col_name,usmallint_type_col_name,color,nchar_type_col_name,bigint_type_col_name,utinyint_type_col_name,bool_type_col_name,ubigint_type_col_name,speed,double_type_col_name,float_type_col_name,varchar_type_col_name) VALUES ("2016-12-16 16:59:27", "kjukvijrgrplxrqt", 15581, "point(1.0 1.0)", 94, 25505, "btgdgtpeqlczgeua", "sstxtgzlyuxqmpla", -8232897057478421459, -38, False, 1902106109546699459, -2035112348, -6.609585770864842e+307, -5.473155208537856e+37, "wkymamybnhbkbwod");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(b,extratag,varchar_type_tag_name,double_type_tag_name,tinyint_type_tag_name,utinyint_type_tag_name,geometry_type_tag_name,varbinary_type_tag_name,bool_type_tag_name) TAGS("obtjobsephyqxyky", 1926001254, "scxtotlkrgfzsqej", 8.631070766476075e+307, 116, -122, "point(1.0 1.0)", "venokzvoaqfytamy", False) (ts,tinyint_type_col_name,varchar_type_col_name,utinyint_type_col_name,geometry_type_col_name,varbinary_type_col_name,ubigint_type_col_name,nchar_type_col_name,bool_type_col_name,double_type_col_name,bigint_type_col_name,float_type_col_name,smallint_type_col_name,uint_type_col_name) VALUES ("2016-12-16 17:09:37", 38, "wihymoxehtmockaj", 119, "point(1.0 1.0)", "lamqyqtuicaumtur", 228827014834750467, "wjeizsgkbechyzya", True, -4.942776530531653e+307, -7607643892481897470, -8.442862379478159e+36, -19143, -2117225342);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,varchar_type_tag_name,f,smallint_type_tag_name,tinyint_type_tag_name,double_type_tag_name,varbinary_type_tag_name,extratag,nchar_type_tag_name,bool_type_tag_name,bigint_type_tag_name,usmallint_type_tag_name,geometry_type_tag_name) TAGS(-103, "rlxmlphalsiwfxkb", -3.256319143371585e+38, -23823, -50, -4.530638219225517e+307, "ycpjvlrnkgdcxbxl", -1782343000, "eibzodmwkyfectbm", False, -3662763902289367996, 11348, "point(1.0 1.0)") (ts,color,bool_type_col_name,uint_type_col_name) VALUES ("2016-12-16 17:13:24", "bpczdwxzdhdlacff", True, 853512564);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,geometry_type_tag_name,double_type_tag_name,usmallint_type_tag_name,b,uint_type_tag_name,varbinary_type_tag_name,varchar_type_tag_name,bool_type_tag_name,bigint_type_tag_name,extratag,smallint_type_tag_name,int_type_tag_name,tinyint_type_tag_name,nchar_type_tag_name) TAGS(2.8559644361518244e+38, "point(1.0 1.0)", -8.660488863866903e+307, 51580, "mfdzkxvqcmvlwcab", 1370077655, "gsocrcrzmrbagyvu", "gwtxcyfmdzlvijkk", False, 4749716129372507648, -1343610171, 3648, 67398117, 55, "onuoalimpzxphmrb") (ts,varchar_type_col_name,nchar_type_col_name) VALUES ("2016-12-16 17:19:38", "lbdzxuiflnytuwcm", "qyiblgcvjolvnfxd");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bool_type_tag_name,utinyint_type_tag_name,varchar_type_tag_name,b,nchar_type_tag_name,double_type_tag_name,uint_type_tag_name,varbinary_type_tag_name,bigint_type_tag_name,ubigint_type_tag_name,f,int_type_tag_name,usmallint_type_tag_name,smallint_type_tag_name,geometry_type_tag_name) TAGS(True, -128, "ghxlfvebdngntlsy", "gyshejexhflthjyt", "csoufooqhjixarep", -3.560079945924916e+307, 3796289973, "xrwzaykbqpnpowzg", -5254959553491612400, 6959026343484215526, -2.426597157147208e+38, 1953534210, 56530, -27622, "point(1.0 1.0)") (ts,nchar_type_col_name,varbinary_type_col_name,varchar_type_col_name,uint_type_col_name,speed,ubigint_type_col_name,double_type_col_name,usmallint_type_col_name,bool_type_col_name,bigint_type_col_name,tinyint_type_col_name,color,float_type_col_name,geometry_type_col_name,smallint_type_col_name,utinyint_type_col_name) VALUES ("2016-12-16 17:28:43", "bxarvuznezbovivi", "ljmaaewnxbwquqbv", "vmttcfyjpofkfxdz", -160343511, -1153710436, -6078325270497694207, -1.6708621643614452e+308, -18667, False, -5283782545471513173, 34, "xmbhflvmvnjsjukp", 1.7453583209432804e+38, "point(1.0 1.0)", 21766, 96);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varchar_type_tag_name,ubigint_type_tag_name,bool_type_tag_name,nchar_type_tag_name,varbinary_type_tag_name,int_type_tag_name,b,utinyint_type_tag_name) TAGS("xzxnwavsfkqexqst", 6248701313661488236, False, "tfzyiigcdgdviljn", "pyaziteptbnjwetv", 1550386893, "hvgiajgmbrfjbhzv", -50) (ts,uint_type_col_name,ubigint_type_col_name,utinyint_type_col_name,nchar_type_col_name) VALUES ("2016-12-16 17:29:35", -2140028488, 9126620526143931960, 48, "vzlvftkxtgejntwx");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,f,varchar_type_tag_name) TAGS(962788507, 1.835407119522404e+38, "dcugcviilgfzpjup") (ts,usmallint_type_col_name,smallint_type_col_name,bigint_type_col_name,geometry_type_col_name,ubigint_type_col_name,speed,nchar_type_col_name,double_type_col_name,float_type_col_name) VALUES ("2016-12-16 17:33:20", 2254, 28827, -8316728271968247646, "point(1.0 1.0)", 6630573292051824500, -1180210336, "bvbksqzbwgmpufzu", 2.591615616226201e+307, 1.8393414054114685e+38);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,b,f,double_type_tag_name,ubigint_type_tag_name,int_type_tag_name,varbinary_type_tag_name,bool_type_tag_name,smallint_type_tag_name,tinyint_type_tag_name,uint_type_tag_name,varchar_type_tag_name,usmallint_type_tag_name,nchar_type_tag_name,geometry_type_tag_name,extratag) TAGS(-1652523720828862491, "rshcsvsqtejnnyzt", -2.8033588099999885e+38, 1.1267740923697902e+308, 6134475623749100063, 275423891, "symfelluxyjghegv", True, 28636, 15, 2850861607, "dluujqzooplynbpi", 11747, "xkepcycandqyqhng", "point(1.0 1.0)", 28000589) (ts,float_type_col_name) VALUES ("2016-12-16 17:35:20", -2.768468628882595e+38);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,utinyint_type_tag_name,b,varbinary_type_tag_name,varchar_type_tag_name,smallint_type_tag_name,nchar_type_tag_name,tinyint_type_tag_name,bigint_type_tag_name) TAGS(40614, -17, "xwadkvwyxpirlkuv", "tfgddotpqkdjbtbh", "weehvoxpqcioyipx", -25537, "xtrykrvmibasrfpk", 64, 2611368116480765912) (ts,color,geometry_type_col_name,usmallint_type_col_name,float_type_col_name,smallint_type_col_name,bigint_type_col_name,utinyint_type_col_name) VALUES ("2016-12-16 17:37:51", "jdyzckzuwxieaecb", "point(1.0 1.0)", -27205, -1.711911078586609e+38, 31375, -6123067329853056480, -90);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,usmallint_type_tag_name) TAGS(1819937892824962656, 8171) (ts,ubigint_type_col_name,float_type_col_name,bool_type_col_name,uint_type_col_name,color,double_type_col_name) VALUES ("2016-12-16 17:43:42", -4609243792627697209, 2.006334657982245e+38, False, 1603774652, "czkpwgrrldvfefpq", -5.301467469348041e+307);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,b,usmallint_type_tag_name,int_type_tag_name,uint_type_tag_name,double_type_tag_name,tinyint_type_tag_name,utinyint_type_tag_name) TAGS(1581454524159189903, "aauchczywgtewzec", 10564, -1624200982, 1778830750, 1.007077574465233e+308, -117, -8) (ts,double_type_col_name,color,nchar_type_col_name,utinyint_type_col_name,bigint_type_col_name,uint_type_col_name,tinyint_type_col_name,varchar_type_col_name) VALUES ("2016-12-16 17:47:52", 3.178641188947165e+307, "plewvtmwmncovgwi", "afkkkizxcmpvjrof", 97, -4082154569696236500, 785036327, 36, "tnjnpkqduaiplcbr");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name) TAGS(-9259) (ts,ubigint_type_col_name,utinyint_type_col_name,varbinary_type_col_name,bool_type_col_name,varchar_type_col_name,nchar_type_col_name,geometry_type_col_name,usmallint_type_col_name,speed) VALUES ("2016-12-16 17:50:22", 6575849309295898287, -110, "ebiistxhnphfulox", False, "djlqhenlzryusbkd", "ltldluztvsgqjeku", "point(1.0 1.0)", 8278, 1651340708);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,nchar_type_tag_name,geometry_type_tag_name,smallint_type_tag_name,utinyint_type_tag_name,extratag) TAGS(1.0764964917532312e+308, "ykuhegoyikhmcgre", "point(1.0 1.0)", 9094, 74, 1323341877) (ts,varbinary_type_col_name) VALUES ("2016-12-16 17:51:12", "nafbgufapnohskpj");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,double_type_tag_name) TAGS(28583, 1.953720446700205e+307) (ts,nchar_type_col_name,varbinary_type_col_name,utinyint_type_col_name,geometry_type_col_name,varchar_type_col_name,speed) VALUES ("2016-12-16 18:11:59", "qwqpnycixypxdgkx", "sqdczulupdskltin", -2, "point(1.0 1.0)", "gbqmwfvaobmucwws", -1295442484);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,nchar_type_tag_name,bigint_type_tag_name,tinyint_type_tag_name,geometry_type_tag_name,f,int_type_tag_name,double_type_tag_name,varchar_type_tag_name,extratag,bool_type_tag_name,smallint_type_tag_name,b,usmallint_type_tag_name) TAGS(-1899740088608098071, 3000382785, -90, "lyiqgjrtcmygsulu", 3674173029313761846, 24, "point(1.0 1.0)", -2.0614150673404532e+38, -83384014, 8.266327304822514e+307, "hmriafyufcvjdmxp", -1458878109, True, 2751, "ldrlpabgmvlmmvpg", 14211) (ts,color,ubigint_type_col_name,uint_type_col_name,smallint_type_col_name,varbinary_type_col_name,speed,tinyint_type_col_name,double_type_col_name,float_type_col_name,varchar_type_col_name,usmallint_type_col_name,geometry_type_col_name,bool_type_col_name) VALUES ("2016-12-16 18:14:29", "hlfayalksfvnguaw", 7446576754933889227, -200330992, 28598, "bqqqhufmncpjhsuv", 218558271, -85, -9.741398605743586e+307, 7.376357821892906e+37, "tpvxybkpxgmhowjr", 25868, "point(1.0 1.0)", False);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,nchar_type_tag_name,usmallint_type_tag_name,uint_type_tag_name,varchar_type_tag_name,b,int_type_tag_name,utinyint_type_tag_name,varbinary_type_tag_name,double_type_tag_name,bigint_type_tag_name,smallint_type_tag_name,f,ubigint_type_tag_name,bool_type_tag_name) TAGS("point(1.0 1.0)", "sjzthkwujsqenwih", 48703, 4087817577, "xmqagtnxequrrylr", "ijtbyhilxioxzuah", -352262545, 1, "sckpgzmooxqjheoe", 1.1198965017615236e+308, 6916146561643120258, 32531, -1.6419733589465474e+38, -4935693961629680589, False) (ts,nchar_type_col_name,geometry_type_col_name,double_type_col_name,varbinary_type_col_name,varchar_type_col_name,bigint_type_col_name,smallint_type_col_name,tinyint_type_col_name) VALUES ("2016-12-16 18:16:09", "jssdvwbxvsbmilue", "point(1.0 1.0)", 6.391478347681934e+307, "zdtetttfsnleoyet", "ppoptsqilikhsino", -840467639559539794, -31830, -103);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,smallint_type_tag_name,bigint_type_tag_name,int_type_tag_name,utinyint_type_tag_name,f,uint_type_tag_name,tinyint_type_tag_name) TAGS("point(1.0 1.0)", 24091, 2578286530814614745, 1008675772, -8, 7.362335563770809e+37, 1819796668, 44) (ts,utinyint_type_col_name,bigint_type_col_name,tinyint_type_col_name,ubigint_type_col_name,smallint_type_col_name,varbinary_type_col_name,float_type_col_name,speed,varchar_type_col_name,bool_type_col_name,usmallint_type_col_name,color,geometry_type_col_name) VALUES ("2016-12-16 18:16:59", 93, -2924293332667446092, 18, -1926645562871095772, 25986, "xjlujcacaynnnmub", -9.456838052905681e+37, 529848418, "nxkirdhlqqloitkl", True, 4620, "acujdfhatzkugcrk", "point(1.0 1.0)");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(tinyint_type_tag_name,usmallint_type_tag_name,b,f,varbinary_type_tag_name,bigint_type_tag_name,smallint_type_tag_name,geometry_type_tag_name,varchar_type_tag_name,extratag,utinyint_type_tag_name,uint_type_tag_name,ubigint_type_tag_name,double_type_tag_name,bool_type_tag_name,int_type_tag_name) TAGS(-98, 2946, "qgvkaujyrikpmgxp", 3.2784411370027565e+38, "dspxtsyqnqdyciab", -794888918858663169, -1378, "point(1.0 1.0)", "diaerlydilfkqveu", -1591170700, -9, 88084273, -8326438069324931384, -6.790019844508353e+307, False, 1326851510) (ts,speed,double_type_col_name,geometry_type_col_name,ubigint_type_col_name,bool_type_col_name,utinyint_type_col_name,bigint_type_col_name,usmallint_type_col_name,color,smallint_type_col_name,varchar_type_col_name,nchar_type_col_name,float_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 18:17:51", -1374792144, -1.149850424042433e+308, "point(1.0 1.0)", 8907011909602006795, False, 104, 1588514034331429385, -18984, "namrvnonxcrzziic", -26616, "ueddmjfyoclvjwfx", "mkkexxinfededdfr", -2.403953587086065e+38, "kadxrwlomevmukfr");') + + def test_percentile(self): + tdSql.error('SELECT SPREAD(bigint_type_tag_name),LEASTSQUARES(bigint_type_tag_name, 9, 2),COUNT(bigint_type_tag_name),HYPERLOGLOG(bigint_type_tag_name),PERCENTILE(bigint_type_tag_name,89,27,20,19,87,99,26,17,45),AVG(bigint_type_tag_name), HYPERLOGLOG(tinyint_type_tag_name),AVG(tinyint_type_tag_name),SPREAD(tinyint_type_tag_name),STDDEV(tinyint_type_tag_name),SUM(tinyint_type_tag_name),COUNT(tinyint_type_tag_name),APERCENTILE(tinyint_type_tag_name, 1, "t-digest"),LEASTSQUARES(tinyint_type_tag_name, 8, 1), STDDEV(usmallint_type_tag_name), COUNT(nchar_type_tag_name),HYPERLOGLOG(nchar_type_tag_name), PERCENTILE(double_type_col_name,6,23,52,24,1,53,95,51,38),HYPERLOGLOG(double_type_col_name),SPREAD(double_type_col_name) FROM td32506.reg_table_159 STATE_WINDOW(1);') + + def run(self): + tdLog.debug(f"start to excute {__file__}") + + self.insert_data() + + # math function + self.test_percentile() + + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/query/function/test_resinfo.py b/tests/army/query/function/test_resinfo.py index 51d51f3ce16..5a59ed45cd0 100644 --- a/tests/army/query/function/test_resinfo.py +++ b/tests/army/query/function/test_resinfo.py @@ -26,7 +26,7 @@ from frame.caseBase import * from frame import * -initial_hash_resinfoInt = "e739cde34b98f13dd9ad696d18f060cc" +initial_hash_resinfoInt = "fbfd69d6f0aa6e015a7b5475b33ee8c8" initial_hash_resinfo = "172d04aa7af0d8cd2e4d9df284079958" class TDTestCase(TBase): @@ -43,6 +43,7 @@ def testFileChanged(self): resinfoIntFile = etool.curFile(__file__, "../../../../source/libs/function/inc/functionResInfoInt.h") resinfoFile = etool.curFile(__file__, "../../../../include/libs/function/functionResInfo.h") current_hash = self.get_file_hash(resinfoIntFile) + tdLog.info(current_hash) if current_hash != initial_hash_resinfoInt: tdLog.exit(f"{resinfoIntFile} has been modified.") else: diff --git a/tests/army/query/test_case_when.py b/tests/army/query/test_case_when.py new file mode 100644 index 00000000000..e7729141839 --- /dev/null +++ b/tests/army/query/test_case_when.py @@ -0,0 +1,375 @@ +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame import * +from frame.eos import * +from datetime import datetime, timedelta + + +class TDTestCase(TBase): + """Verify the case...when... expression in the query statement + """ + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.stable_schema = { + "columns": { + "ts": "timestamp", + "c_null": "int", + "c_bool": "bool", + "c_tinyint": "tinyint", + "c_smallint": "smallint", + "c_int": "int", + "c_bigint": "bigint", + "c_float": "float", + "c_double": "double", + "c_varchar": "varchar(16)", + "c_timestamp": "timestamp", + "c_nchar": "nchar(16)", + "c_utinyint": "tinyint unsigned", + "c_usmallint": "smallint unsigned", + "c_uint": "int unsigned", + "c_ubigint": "bigint unsigned", + "c_varbinary": "varbinary(16)", + "c_geometry": "geometry(32)" + }, + "tags": { + "t_null": "int", + "t_bool": "bool", + "t_tinyint": "tinyint", + "t_smallint": "smallint", + "t_int": "int", + "t_bigint": "bigint", + "t_float": "float", + "t_double": "double", + "t_varchar": "varchar(16)", + "t_timestamp": "timestamp", + "t_nchar": "nchar(16)", + "t_utinyint": "tinyint unsigned", + "t_usmallint": "smallint unsigned", + "t_uint": "int unsigned", + "t_ubigint": "bigint unsigned", + "t_varbinary": "varbinary(16)", + "t_geometry": "geometry(32)" + } + } + + def prepare_data(self): + # create database + tdSql.execute("create database test_case_when;") + tdSql.execute("use test_case_when;") + # create stable + columns = ",".join([f"{k} {v}" for k, v in self.stable_schema["columns"].items()]) + tags = ",".join([f"{k} {v}" for k, v in self.stable_schema["tags"].items()]) + st_sql = f"create stable st1 ({columns}) tags ({tags});" + tdSql.execute(st_sql) + st_sql_json_tag = f"create stable st2 ({columns}) tags (t json);" + tdSql.execute(st_sql_json_tag) + # create child table + tdSql.execute("create table ct1 using st1 tags(NULL, True, 1, 1, 1, 1, 1.1, 1.11, 'aaaaaaaa', '2021-09-01 00:00:00.000', 'aaaaaaaa', 1, 1, 1, 1, \"0x06\",'POINT(1 1)');") + tdSql.execute("""create table ct2 using st2 tags('{"name": "test", "location": "beijing"}');""") + # insert data + ct1_data = [ + """'2024-10-01 00:00:00.000', NULL, True, 2, 2, 2, 2, 2.2, 2.22, 'bbbbbbbb', '2021-09-01 00:00:00.000', 'bbbbbbbb', 2, 2, 2, 2, "0x07",'POINT(2 2)'""", + """'2024-10-01 00:00:01.000', NULL, False, 3, 3, 3, 3, 3.3, 3.33, 'cccccccc', '2021-09-01 00:00:00.000', 'cccccccc', 3, 3, 3, 3, "0x08",'POINT(3 3)'""", + """'2024-10-01 00:00:02.000', NULL, True, 4, 4, 4, 4, 4.4, 4.44, 'dddddddd', '2021-09-01 00:00:00.000', 'dddddddd', 4, 4, 4, 4, "0x09",'POINT(4 4)'""", + """'2024-10-01 00:00:03.000', NULL, False, 5, 5, 5, 5, 5.5, 5.55, 'eeeeeeee', '2021-09-01 00:00:00.000', 'eeeeeeee', 5, 5, 5, 5, "0x0A",'POINT(5 5)'""", + """'2024-10-01 00:00:04.000', NULL, True, 6, 6, 6, 6, 6.6, 6.66, 'ffffffff', '2021-09-01 00:00:00.000', 'ffffffff', 6, 6, 6, 6, "0x0B",'POINT(6 6)'""", + """'2024-10-01 00:00:05.000', NULL, False, 7, 7, 7, 7, 7.7, 7.77, 'gggggggg', '2021-09-01 00:00:00.000', 'gggggggg', 7, 7, 7, 7, "0x0C",'POINT(7 7)'""", + """'2024-10-01 00:00:06.000', NULL, True, 8, 8, 8, 8, 8.8, 8.88, 'hhhhhhhh', '2021-09-01 00:00:00.000', 'hhhhhhhh', 8, 8, 8, 8, "0x0D",'POINT(8 8)'""", + """'2024-10-01 00:00:07.000', NULL, False, 9, 9, 9, 9, 9.9, 9.99, 'iiiiiiii', '2021-09-01 00:00:00.000', 'iiiiiiii', 9, 9, 9, 9, "0x0E",'POINT(9 9)'""", + """'2024-10-01 00:00:08.000', NULL, True, 10, 10, 10, 10, 10.10, 10.1010, 'jjjjjjjj', '2021-09-01 00:00:00.000', 'jjjjjjjj', 10, 10, 10, 10, "0x0F",'POINT(10 10)'""", + """'2024-10-01 00:00:09.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL""" + ] + ct1_insert_sql = "insert into ct1 values(%s);" % "),(".join(ct1_data) + tdSql.execute(ct1_insert_sql) + ct2_data = [ + """'2024-10-01 00:00:00.000', NULL, True, 2, 2, 2, 2, 2.2, 2.22, 'bbbbbbbb', '2021-09-01 00:00:00.000', 'bbbbbbbb', 2, 2, 2, 2, "0x07",'POINT(2 2)'""", + """'2024-10-01 00:00:01.000', NULL, False, 3, 3, 3, 3, 3.3, 3.33, 'cccccccc', '2021-09-01 00:00:00.000', 'cccccccc', 3, 3, 3, 3, "0x08",'POINT(3 3)'""", + """'2024-10-01 00:00:02.000', NULL, True, 4, 4, 4, 4, 4.4, 4.44, 'dddddddd', '2021-09-01 00:00:00.000', 'dddddddd', 4, 4, 4, 4, "0x09",'POINT(4 4)'""", + """'2024-10-01 00:00:03.000', NULL, False, 5, 5, 5, 5, 5.5, 5.55, 'eeeeeeee', '2021-09-01 00:00:00.000', 'eeeeeeee', 5, 5, 5, 5, "0x0A",'POINT(5 5)'""", + """'2024-10-01 00:00:04.000', NULL, True, 6, 6, 6, 6, 6.6, 6.66, 'ffffffff', '2021-09-01 00:00:00.000', 'ffffffff', 6, 6, 6, 6, "0x0B",'POINT(6 6)'""", + """'2024-10-01 00:00:05.000', NULL, False, 7, 7, 7, 7, 7.7, 7.77, 'gggggggg', '2021-09-01 00:00:00.000', 'gggggggg', 7, 7, 7, 7, "0x0C",'POINT(7 7)'""", + """'2024-10-01 00:00:06.000', NULL, True, 8, 8, 8, 8, 8.8, 8.88, 'hhhhhhhh', '2021-09-01 00:00:00.000', 'hhhhhhhh', 8, 8, 8, 8, "0x0D",'POINT(8 8)'""", + """'2024-10-01 00:00:07.000', NULL, False, 9, 9, 9, 9, 9.9, 9.99, 'iiiiiiii', '2021-09-01 00:00:00.000', 'iiiiiiii', 9, 9, 9, 9, "0x0E",'POINT(9 9)'""", + """'2024-10-01 00:00:08.000', NULL, True, 10, 10, 10, 10, 10.10, 10.1010, 'jjjjjjjj', '2021-09-01 00:00:00.000', 'jjjjjjjj', 10, 10, 10, 10, "0x0F",'POINT(10 10)'""", + """'2024-10-01 00:00:09.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL""" + ] + ct2_insert_sql = "insert into ct2 values(%s);" % "),(".join(ct2_data) + tdSql.execute(ct2_insert_sql) + + def test_case_when_statements(self): + tdSql.execute("use test_case_when;") + tdSql.query("select case when c_null is null then c_null else t_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_null is not null then c_null else t_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_bool is null then c_bool else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_bool is not null then c_bool else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_tinyint is null then c_tinyint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_tinyint is not null then c_tinyint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_smallint is null then c_smallint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_smallint is not null then c_smallint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_int is null then c_int else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_int is not null then c_int else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_bigint is null then c_bigint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_bigint is not null then c_bigint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_float is null then c_float else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_float is not null then c_float else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('2.200000',), ('3.300000',), ('4.400000',), ('5.500000',), ('6.600000',), ('7.700000',), ('8.800000',), ('9.900000',), ('10.100000',), (None,)]) + + tdSql.query("select case when c_double is null then c_double else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_double is not null then c_double else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('2.220000',), ('3.330000',), ('4.440000',), ('5.550000',), ('6.660000',), ('7.770000',), ('8.880000',), ('9.990000',), ('10.101000',), (None,)]) + + tdSql.query("select case when c_varchar is null then c_varchar else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_varchar is not null then c_varchar else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('bbbbbbbb',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)]) + + tdSql.query("select case when c_nchar is null then c_nchar else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_nchar is not null then c_nchar else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('bbbbbbbb',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)]) + + tdSql.query("select case when c_utinyint is null then c_utinyint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_utinyint is not null then c_utinyint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_usmallint is null then c_usmallint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_usmallint is not null then c_usmallint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_uint is null then c_uint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_uint is not null then c_uint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_ubigint is null then c_ubigint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_ubigint is not null then c_ubigint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('2',), ('3',), ('4',), ('5',), ('6',), ('7',), ('8',), ('9',), ('10',), (None,)]) + + tdSql.error("select case when c_varbinary is null then c_varbinary else c_null end from st1;") + tdSql.error("select case when c_varbinary is not null then c_varbinary else c_null end from st1;") + + tdSql.query("select case when c_null is null then NULL else c_bool end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_null is not null then NULL else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(True,), (False,), (True,), (False,), (True,), (False,), (True,), (False,), (True,), (None,)]) + + tdSql.query("select case when c_bool=true then NULL else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(None,), (False,), (None,), (False,), (None,), (False,), (None,), (False,), (None,), (None,)]) + + tdSql.query("select case when c_bool!=true then NULL else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(True,), (None,), (True,), (None,), (True,), (None,), (True,), (None,), (True,), (None,)]) + + tdSql.query("select case when c_tinyint=2 then c_tinyint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_tinyint!=2 then c_tinyint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_smallint=2 then c_smallint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_smallint!=2 then c_smallint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_int=2 then c_int else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_int!=2 then c_int else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_bigint=2 then c_bigint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_bigint!=2 then c_bigint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_float=2.2 then c_float else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res[1:] == [(0.0,), (1.0,), (0.0,), (1.0,), (0.0,), (1.0,), (0.0,), (1.0,), (None,)]) + + tdSql.query("select case when c_float!=2.2 then c_float else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res[0] == (1.0,)) + + tdSql.query("select case when c_double=2.22 then c_double else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2.22,), (0.0,), (1.0,), (0.0,), (1.0,), (0.0,), (1.0,), (0.0,), (1.0,), (None,)]) + + tdSql.query("select case when c_double!=2.2 then c_double else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2.22,), (3.33,), (4.44,), (5.55,), (6.66,), (7.77,), (8.88,), (9.99,), (10.101,), (None,)]) + + tdSql.query("select case when c_varchar='bbbbbbbb' then c_varchar else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('bbbbbbbb',), ('false',), ('true',), ('false',), ('true',), ('false',), ('true',), ('false',), ('true',), (None,)]) + + tdSql.query("select case when c_varchar!='bbbbbbbb' then c_varchar else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('true',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)]) + + tdSql.query("select case when c_timestamp='2021-09-01 00:00:00.000' then c_timestamp else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (None,)]) + + tdSql.query("select case when c_timestamp!='2021-09-01 00:00:00.000' then c_timestamp else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_nchar='bbbbbbbb' then c_nchar else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('bbbbbbbb',), ('false',), ('true',), ('false',), ('true',), ('false',), ('true',), ('false',), ('true',), (None,)]) + + tdSql.query("select case when c_nchar!='bbbbbbbb' then c_nchar else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('true',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)]) + + tdSql.query("select case when c_utinyint=2 then c_utinyint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_utinyint!=2 then c_utinyint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_usmallint=2 then c_usmallint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_usmallint!=2 then c_usmallint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_uint=2 then c_uint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_uint!=2 then c_uint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_ubigint=2 then c_ubigint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_ubigint!=2 then c_ubigint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_ubigint=2 then c_ubigint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_ubigint!=2 then c_ubigint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.error("select case when c_varbinary='\x30783037' then c_varbinary else c_bool end from st1;") + tdSql.error("select case when c_varbinary!='\x30783037' then c_varbinary else c_bool end from st1;") + + tdSql.query("select case when c_null is null then NULL else c_tinyint end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_null is not null then NULL else c_tinyint end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_bool=true then false else c_tinyint end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(0,), (3,), (0,), (5,), (0,), (7,), (0,), (9,), (0,), (None,)]) + + tdSql.query("select case when c_bool!=true then false else c_tinyint end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (4,), (0,), (6,), (0,), (8,), (0,), (10,), (None,)]) + + tdSql.query("select case when c_smallint=2 then c_smallint else c_tinyint end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_smallint!=2 then c_smallint else c_tinyint end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_int=2 then c_smallint else c_int end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_int!=2 then c_smallint else c_int end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_float=2.2 then 387897 else 'test message' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('387897',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',)]) + + tdSql.query("select case when c_double=2.22 then 387897 else 'test message' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('387897',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',)]) + + tdSql.query("select case when c_varchar='cccccccc' then 'test' when c_varchar='bbbbbbbb' then 'bbbb' else 'test message' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('bbbb',), ('test',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',)]) + + tdSql.query("select case when ts='2024-10-01 00:00:04.000' then 456646546 when ts>'2024-10-01 00:00:04.000' then 'after today' else 'before today or unknow date' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('before today or unknow date',), ('before today or unknow date',), ('before today or unknow date',), ('before today or unknow date',), ('456646546',), ('after today',), ('after today',), ('after today',), ('after today',), ('after today',)]) + + tdSql.error("select case when c_geometry is null then c_geometry else c_null end from st1;") + tdSql.error("select case when c_geometry is not null then c_geometry else c_null end from st1;") + tdSql.error("select case when c_geometry='POINT(2 2)' then c_geometry else c_bool end from st1;") + tdSql.error("select case when c_geometry!='POINT(2 2)' then c_geometry else c_bool end from st1;") + + tdSql.error("select case when t is null then t else c_null end from st2;") + tdSql.error("select case when t is not null then t else c_null end from st2;") + tdSql.error("select case when t->'location'='beijing' then t->'location' else c_bool end from st2;") + tdSql.error("select case when t->'location'!='beijing' then t->'location' else c_bool end from st1;") + + tdSql.query("select case when c_float!=2.2 then 387897 else 'test message' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('test message',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('test message',)]) + + tdSql.query("select case when c_double!=2.22 then 387897 else 'test message' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('test message',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('test message',)]) + + tdSql.query("select case c_tinyint when 2 then -2147483648 when 3 then 'three' else '4294967295' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('-2147483648',), ('three',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',)]) + + tdSql.query("select case c_float when 2.2 then 9.2233720e+18 when 3.3 then -9.2233720e+18 else 'aa' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('9223372000000000000.000000',), ('-9223372000000000000.000000',), ('aa',), ('aa',), ('aa',), ('aa',), ('aa',), ('aa',), ('aa',), ('aa',)]) + + tdSql.query("select case t1.c_int when 2 then 'run' when t1.c_int is null then 'other' else t2.c_varchar end from st1 t1, st2 t2 where t1.ts=t2.ts;") + print(tdSql.res) + assert(tdSql.checkRows(10) and tdSql.res == [('run',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)]) + + tdSql.query("select avg(case when c_tinyint>=2 then c_tinyint else c_null end) from st1;") + assert(tdSql.checkRows(1) and tdSql.res == [(6.0,)]) + + tdSql.query("select sum(case when c_tinyint>=2 then c_tinyint else c_null end) from st1;") + assert(tdSql.checkRows(1) and tdSql.res == [(54,)]) + + tdSql.query("select first(case when c_int >=2 then 'abc' else 0 end) from st1;") + assert(tdSql.checkRows(1) and tdSql.res == [('abc',)]) + + tdSql.query("select last(case when c_int >=2 then c_int else 0 end) from st1;") + assert(tdSql.checkRows(1) and tdSql.res == [(0,)]) + + def run(self): + self.prepare_data() + self.test_case_when_statements() + + def stop(self): + tdSql.execute("drop database test_case_when;") + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/army/storage/blob/ablob.py b/tests/army/storage/blob/ablob.py index fae492a3dff..d3e00f3424d 100644 --- a/tests/army/storage/blob/ablob.py +++ b/tests/army/storage/blob/ablob.py @@ -31,8 +31,6 @@ class TDTestCase(TBase): - index = eutil.cpuRand(20) + 1 - bucketName = f"ci-bucket{index}" updatecfgDict = { "supportVnodes":"1000", 's3EndPoint': 'https://.blob.core.windows.net', @@ -44,7 +42,6 @@ class TDTestCase(TBase): 's3MigrateEnabled': '1' } - tdLog.info(f"assign bucketName is {bucketName}\n") maxFileSize = (128 + 10) * 1014 * 1024 # add 10M buffer def insertData(self): @@ -152,13 +149,13 @@ def checkCreateDb(self, keepLocal, chunkSize, compact): if keepLocal is not None: kw1 = f"s3_keeplocal {keepLocal}" if chunkSize is not None: - kw2 = f"s3_chunksize {chunkSize}" + kw2 = f"s3_chunkpages {chunkSize}" if compact is not None: kw3 = f"s3_compact {compact}" sql = f" create database db1 vgroups 1 duration 1h {kw1} {kw2} {kw3}" tdSql.execute(sql, show=True) - #sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';" + #sql = f"select name,s3_keeplocal,s3_chunkpages,s3_compact from information_schema.ins_databases where name='db1';" sql = f"select * from information_schema.ins_databases where name='db1';" tdSql.query(sql) # 29 30 31 -> chunksize keeplocal compact @@ -172,15 +169,32 @@ def checkCreateDb(self, keepLocal, chunkSize, compact): sql = "drop database db1" tdSql.execute(sql) + def checkDefault(self, keepLocal, chunkSize, compact): + sql = f" create database db1 vgroups 1" + tdSql.execute(sql, show=True) + #sql = f"select name,s3_keeplocal,s3_chunkpages,s3_compact from information_schema.ins_databases where name='db1';" + sql = f"select * from information_schema.ins_databases where name='db1';" + tdSql.query(sql) + # 29 30 31 -> chunksize keeplocal compact + if chunkSize is not None: + tdSql.checkData(0, 29, chunkSize) + if keepLocal is not None: + keepLocalm = keepLocal * 24 * 60 + tdSql.checkData(0, 30, f"{keepLocalm}m") + if compact is not None: + tdSql.checkData(0, 31, compact) + sql = "drop database db1" + tdSql.execute(sql) + def checkExcept(self): # errors sqls = [ f"create database db2 s3_keeplocal -1", f"create database db2 s3_keeplocal 0", f"create database db2 s3_keeplocal 365001", - f"create database db2 s3_chunksize -1", - f"create database db2 s3_chunksize 0", - f"create database db2 s3_chunksize 900000000", + f"create database db2 s3_chunkpages -1", + f"create database db2 s3_chunkpages 0", + f"create database db2 s3_chunkpages 900000000", f"create database db2 s3_compact -1", f"create database db2 s3_compact 100", f"create database db2 duration 1d s3_keeplocal 1d" @@ -226,16 +240,7 @@ def checkBasic(self): # except self.checkExcept() - - # - def preDb(self, vgroups): - cnt = int(time.time())%2 + 1 - for i in range(cnt): - vg = eutil.cpuRand(9) + 1 - sql = f"create database predb vgroups {vg}" - tdSql.execute(sql, show=True) - sql = "drop database predb" - tdSql.execute(sql, show=True) + self.checkDefault(365, 131072, 1) # history def insertHistory(self): @@ -287,9 +292,6 @@ def run(self): if eos.isArm64Cpu(): tdLog.success(f"{__file__} arm64 ignore executed") else: - - self.preDb(10) - # insert data self.insertData() @@ -311,7 +313,6 @@ def run(self): # check insert correct again self.checkInsertCorrect() - # check stream correct and drop stream #self.checkStreamCorrect() @@ -321,7 +322,7 @@ def run(self): # insert history disorder data self.insertHistory() - # checkBasic + # check db params self.checkBasic() #self.checkInsertCorrect() @@ -335,10 +336,8 @@ def run(self): # drop database and free s3 file self.dropDb() - tdLog.success(f"{__file__} successfully executed") - tdCases.addLinux(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/storage/blob/perf.json b/tests/army/storage/blob/perf.json new file mode 100644 index 00000000000..002515873ea --- /dev/null +++ b/tests/army/storage/blob/perf.json @@ -0,0 +1,67 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "num_of_records_per_req": 4000, + "prepared_rand": 500, + "thread_count": 4, + "create_table_thread_count": 1, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 3, + "replica": 3, + "duration":"10d", + "s3_keeplocal":"30d", + "s3_chunkpages":"131072", + "tsdb_pagesize":"1", + "s3_compact":"1", + "wal_retention_size":"1", + "wal_retention_period":"1", + "flush_each_batch":"no", + "keep": "3650d" + }, + "super_tables": [ + { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 500, + "insert_rows": 200000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "timestamp_step": 100, + "start_timestamp": 1600000000000, + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc" }, + { "type": "double", "name": "dc"}, + { "type": "tinyint", "name": "ti"}, + { "type": "smallint", "name": "si" }, + { "type": "int", "name": "ic" ,"max": 1,"min": 1}, + { "type": "bigint", "name": "bi" }, + { "type": "utinyint", "name": "uti"}, + { "type": "usmallint", "name": "usi"}, + { "type": "uint", "name": "ui" }, + { "type": "ubigint", "name": "ubi"}, + { "type": "binary", "name": "bin", "len": 50}, + { "type": "nchar", "name": "nch", "len": 100} + ], + "tags": [ + {"type": "tinyint", "name": "groupid","max": 10,"min": 1}, + {"name": "location","type": "binary", "len": 16, "values": + ["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} + diff --git a/tests/army/storage/blob/s3Basic.json b/tests/army/storage/blob/s3Basic.json index ee341b20961..2b911a989ff 100644 --- a/tests/army/storage/blob/s3Basic.json +++ b/tests/army/storage/blob/s3Basic.json @@ -20,7 +20,7 @@ "replica": 1, "duration":"10d", "s3_keeplocal":"30d", - "s3_chunksize":"131072", + "s3_chunkpages":"131072", "tsdb_pagesize":"1", "s3_compact":"1", "wal_retention_size":"1", diff --git a/tests/army/storage/blob/s3Basic1.json b/tests/army/storage/blob/s3Basic1.json index 02be308443c..087f89edec4 100644 --- a/tests/army/storage/blob/s3Basic1.json +++ b/tests/army/storage/blob/s3Basic1.json @@ -20,7 +20,7 @@ "replica": 1, "duration":"10d", "s3_keeplocal":"30d", - "s3_chunksize":"131072", + "s3_chunkpages":"131072", "tsdb_pagesize":"1", "s3_compact":"1", "wal_retention_size":"1", diff --git a/tests/army/storage/s3/s3Basic.json b/tests/army/storage/s3/s3Basic.json index ee341b20961..2b911a989ff 100644 --- a/tests/army/storage/s3/s3Basic.json +++ b/tests/army/storage/s3/s3Basic.json @@ -20,7 +20,7 @@ "replica": 1, "duration":"10d", "s3_keeplocal":"30d", - "s3_chunksize":"131072", + "s3_chunkpages":"131072", "tsdb_pagesize":"1", "s3_compact":"1", "wal_retention_size":"1", diff --git a/tests/army/storage/s3/s3Basic.py b/tests/army/storage/s3/s3Basic.py index bc55fe6f5cc..273a6129e1c 100644 --- a/tests/army/storage/s3/s3Basic.py +++ b/tests/army/storage/s3/s3Basic.py @@ -168,13 +168,13 @@ def checkCreateDb(self, keepLocal, chunkSize, compact): if keepLocal is not None: kw1 = f"s3_keeplocal {keepLocal}" if chunkSize is not None: - kw2 = f"s3_chunksize {chunkSize}" + kw2 = f"s3_chunkpages {chunkSize}" if compact is not None: kw3 = f"s3_compact {compact}" sql = f" create database db1 vgroups 1 duration 1h {kw1} {kw2} {kw3}" tdSql.execute(sql, show=True) - #sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';" + #sql = f"select name,s3_keeplocal,s3_chunkpages,s3_compact from information_schema.ins_databases where name='db1';" sql = f"select * from information_schema.ins_databases where name='db1';" tdSql.query(sql) # 29 30 31 -> chunksize keeplocal compact @@ -194,9 +194,9 @@ def checkExcept(self): f"create database db2 s3_keeplocal -1", f"create database db2 s3_keeplocal 0", f"create database db2 s3_keeplocal 365001", - f"create database db2 s3_chunksize -1", - f"create database db2 s3_chunksize 0", - f"create database db2 s3_chunksize 900000000", + f"create database db2 s3_chunkpages -1", + f"create database db2 s3_chunkpages 0", + f"create database db2 s3_chunkpages 900000000", f"create database db2 s3_compact -1", f"create database db2 s3_compact 100", f"create database db2 duration 1d s3_keeplocal 1d" diff --git a/tests/army/storage/s3/s3Basic1.json b/tests/army/storage/s3/s3Basic1.json index 02be308443c..087f89edec4 100644 --- a/tests/army/storage/s3/s3Basic1.json +++ b/tests/army/storage/s3/s3Basic1.json @@ -20,7 +20,7 @@ "replica": 1, "duration":"10d", "s3_keeplocal":"30d", - "s3_chunksize":"131072", + "s3_chunkpages":"131072", "tsdb_pagesize":"1", "s3_compact":"1", "wal_retention_size":"1", diff --git a/tests/army/storage/s3/s3azure.py b/tests/army/storage/s3/s3azure.py index 43857cb7ca5..e0226b0aa46 100644 --- a/tests/army/storage/s3/s3azure.py +++ b/tests/army/storage/s3/s3azure.py @@ -202,13 +202,13 @@ def checkCreateDb(self, keepLocal, chunkSize, compact): if keepLocal is not None: kw1 = f"s3_keeplocal {keepLocal}" if chunkSize is not None: - kw2 = f"s3_chunksize {chunkSize}" + kw2 = f"s3_chunkpages {chunkSize}" if compact is not None: kw3 = f"s3_compact {compact}" sql = f" create database db1 vgroups 1 duration 1h {kw1} {kw2} {kw3}" tdSql.execute(sql, show=True) - # sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';" + # sql = f"select name,s3_keeplocal,s3_chunkpages,s3_compact from information_schema.ins_databases where name='db1';" sql = f"select * from information_schema.ins_databases where name='db1';" tdSql.query(sql) # 29 30 31 -> chunksize keeplocal compact @@ -228,9 +228,9 @@ def checkExcept(self): f"create database db2 s3_keeplocal -1", f"create database db2 s3_keeplocal 0", f"create database db2 s3_keeplocal 365001", - f"create database db2 s3_chunksize -1", - f"create database db2 s3_chunksize 0", - f"create database db2 s3_chunksize 900000000", + f"create database db2 s3_chunkpages -1", + f"create database db2 s3_chunkpages 0", + f"create database db2 s3_chunkpages 900000000", f"create database db2 s3_compact -1", f"create database db2 s3_compact 100", f"create database db2 duration 1d s3_keeplocal 1d" diff --git a/tests/army/tmq/drop_lost_comsumers.py b/tests/army/tmq/drop_lost_comsumers.py new file mode 100644 index 00000000000..a5e8140c4aa --- /dev/null +++ b/tests/army/tmq/drop_lost_comsumers.py @@ -0,0 +1,337 @@ +import time +import os +import threading +import datetime +from taos.tmq import Consumer +from taos.error import TmqError + +from frame.log import tdLog +from frame.cases import tdCases +from frame.sql import tdSql +from frame.caseBase import * +from frame import etool +from frame.common import tdCom + + +class TaosConsumer: + # TODO: Move this class to tq.py and remove it from here + def __init__(self): + self.sub_once = True + self.once_consumer_rows = 0 + self.sub_log = False + self.safe_counter = ThreadSafeCounter() + + def log_info(self, message): + if self.sub_log: + tdLog.info(message) + + #TODO merge sub_consumer and sub_consumer_once + def sub_consumer(self, consumer, group_id, topic_name): + group_id = int(group_id) + if group_id < 100: + try: + consumer.subscribe([topic_name]) + except TmqError: + tdLog.exit(f"subscribe error") + nrows = 0 + while True: + start = datetime.datetime.now() + tdLog.info(f"time:{start},consumer:{group_id}, start to consume") + message = consumer.poll(timeout=10.0) + + if message: + message_offset = message.offset() + # topic = message.topic() + # database = message.database() + + for block in message: + addrows = block.nrows() + nrows += block.nrows() + ncols = block.ncols() + # values = block.fetchall + end = datetime.datetime.now() + elapsed_time = end - start + tdLog.info( + f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time}," + f"consumer_nrows:{nrows},consumer_addrows:{addrows}," + f"consumer_ncols:{ncols},offset:{id}" + ) + consumer.commit() + tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") + # consumer.unsubscribe() + # consumer.close() + + def set_conf( + self, + td_connect_ip="localhost", + group_id=1, + client_id="test_consumer_py", + enable_auto_commit="false", + auto_commit_interval_ms="1000", + auto_offset_reset="earliest", + msg_with_table_name="true", + session_timeout_ms=10000, + max_poll_interval_ms=180000, + experimental_snapshot_enable="false", + ): + conf = { + # auth options + # consume options + "td.connect.ip": f"{td_connect_ip}", + "group.id": f"{group_id}", + "client.id": f"{client_id}", + "enable.auto.commit": f"{enable_auto_commit}", + "auto.commit.interval.ms": f"{auto_commit_interval_ms}", + "auto.offset.reset": f"{auto_offset_reset}", + "msg.with.table.name": f"{msg_with_table_name}", + "session.timeout.ms": f"{session_timeout_ms}", + "max.poll.interval.ms": f"{max_poll_interval_ms}", + "experimental.snapshot.enable": f"{experimental_snapshot_enable}", + } + return conf + + def sub_consumer_once(self, consumer, group_id, topic_name, stop_event): + group_id = int(group_id) + if group_id < 100: + consumer.subscribe([topic_name]) + nrows = 0 + consumer_nrows = 0 + count = 0 + while not stop_event.is_set(): + start = datetime.datetime.now() + # self.log_info( + # f"time:{start},consumer:{group_id}, start to consume,consumer_nrows:{consumer_nrows}" + # ) + message = None + if consumer_nrows < self.once_consumer_rows: + message = consumer.poll(timeout=1.0) + elif consumer_nrows >= self.once_consumer_rows: + if count == 0: + # when break the loop, the consumer will be closed, so we need to continue to keep consumer alive util the stop_event is set + tdLog.info("stop consumer when consumer all rows") + count += 1 + # tdLog.info("stop consumer when consumer all rows") + else: + continue + if message: + message_offset = message.offset() + # topic = message.topic() + # database = message.database() + for block in message: + addrows = block.nrows() + nrows += block.nrows() + self.safe_counter.rows(block.nrows()) + ncols = block.ncols() + # values = block.fetchall + end = datetime.datetime.now() + elapsed_time = end - start + + # self.log_info( + # f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{message_offset}" + # ) + self.log_info( + f"consumer:{group_id},consumer_nrows:{nrows},counter.counter:{self.safe_counter.counter},counter.get():{self.safe_counter.get()}" + ) + + # consumer.commit() + consumer_nrows = nrows + + tdLog.info("Consumer subscription thread is stopping.") + + def taosc_consumer(self, conf: list, topic_name: str, stop_event: threading.Event): + try: + tdLog.info(conf) + tdLog.info("start to config consumer") + consumer = Consumer(conf) + tdLog.info("start to subscribe") + group_id = int(conf["group.id"]) + tdLog.info(f"{consumer},{group_id}") + if self.sub_once: + self.sub_consumer_once(consumer, group_id, topic_name, stop_event) + else: + self.sub_consumer(consumer, group_id, topic_name) + # only consumer once + except Exception as e: + tdLog.exit(f"{e}") + + # consumer.close() + + +class ThreadSafeCounter: + def __init__(self): + self.counter = 0 + self.lock = threading.Lock() + + def rows(self, rows): + with self.lock: + self.counter += rows + + def get(self): + with self.lock: + return self.counter + + +class TDTestCase: + # updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def __init__(self): + # db parameter + self.table_number = 1000 + self.rows_per_table = 1000 + # consumer parameter + self.consumer_groups_num = 2 + self.session_timeout_ms = 180000 + self.max_poll_interval_ms = 180000 + # case consumer parameter + self.consumer_rows_per_thread = self.table_number * self.rows_per_table + self.consumer_all_rows = ( + self.consumer_rows_per_thread * self.consumer_groups_num + ) + self.topic_name = "select_d1" + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), logSql) + self.consumer_instance = TaosConsumer() + # tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def caseDescription(self): + """ + drop_lost_consmuers: + 1. verifying that the boundary and valid values of session_timeout_ms are in effect + 2. verifying that the boundary and valid values of max_poll_interval_ms are in effect + 3. verifying that consumer will be closed when the session_timeout_ms and max_poll_interval_ms is expired + """ + return + + def check_consumer(self, count, rows, stop_event=None): + time.sleep(count) + try: + tdLog.info( + f"wait timeout count:{count} and check consumer status whether is closed" + ) + for _ in range(2): + tdSql.query("show consumers") + anser_rows = tdSql.getRows() + if anser_rows == rows: + break + time.sleep(1) + tdLog.info( + f"wait for {count} seconds to check that consumers number is {anser_rows}" + ) + if anser_rows != rows: + if stop_event: + stop_event.set() + tdLog.exit(f"consumer number is {anser_rows } but not expected {rows}") + except Exception as e: + tdLog.exit(f"{e},check consumer error") + + def drop_session_timeout_consmuers(self): + tdSql.execute(f"drop topic if exists {self.topic_name};") + tdSql.execute("use db_sub") + tdSql.execute(f"create topic {self.topic_name} as select * from db_sub.meters;") + + # start consumer and config some parameters + os.system( + f"nohup python3 ./tmq/per_consumer.py -c {self.consumer_groups_num} -s {self.session_timeout_ms} -p {self.max_poll_interval_ms} -t {self.topic_name} > consumer.log &" + ) + # wait 5s for consuming data + time.sleep(5) + # kill consumer to simulate session_timeout_ms + tdLog.info("kill per_consumer.py") + tdCom.kill_signal_process( + signal=9, processor_name=r"python3\s*./tmq/per_consumer.py" + ) + self.check_consumer(int(self.session_timeout_ms / 1000), 0) + tdSql.execute(f"drop topic if exists {self.topic_name};") + os.system("rm -rf consumer.log") + + def drop_max_poll_timeout_consmuers(self): + tdSql.execute(f"drop topic if exists {self.topic_name};") + tdSql.execute("use db_sub") + tdSql.execute(f"create topic {self.topic_name} as select * from db_sub.meters;") + + threads = [] + self.safe_counter = ThreadSafeCounter() + self.consumer_instance.safe_counter = self.safe_counter + stop_event = threading.Event() + self.consumer_instance.once_consumer_rows = self.consumer_rows_per_thread + tdLog.info(f"consumer_rows:{self.consumer_instance.once_consumer_rows}") + self.consumer_instance.sub_once = True + for group_id in range(self.consumer_groups_num): + conf = self.consumer_instance.set_conf( + group_id=group_id, + session_timeout_ms=self.session_timeout_ms, + max_poll_interval_ms=self.max_poll_interval_ms, + ) + threads.append( + threading.Thread( + target=self.consumer_instance.taosc_consumer, + args=(conf, self.topic_name, stop_event), + ) + ) + for tr in threads: + tr.start() + + while True: + if self.safe_counter.counter < self.consumer_all_rows: + # control print log frequency + time.sleep(1) + tdLog.info( + f"consumer_all_rows:{self.consumer_all_rows},counter.get():{self.safe_counter.counter}" + ) + elif self.safe_counter.counter == self.consumer_all_rows: + # adding 5s is for heartbeat check + self.check_consumer(int(self.max_poll_interval_ms / 1000 ) + 5, 0, stop_event) + stop_event.set() + break + + time.sleep(1) + tdSql.execute(f"drop topic if exists {self.topic_name};") + + def case_session_timeout(self): + """ + TEST CASE: verifying that the boundary and valid values of session_timeout_ms are in effect + """ + + tdLog.info("start to test session_timeout_ms=12s") + # test session_timeout_ms=12s + self.session_timeout_ms = 12000 + self.max_poll_interval_ms = 180000 + # self.set_session_timeout = int(self.session_timeout_ms / 1000) + self.drop_session_timeout_consmuers() + tdLog.info("stop to test session_timeout_ms=12s and done ") + + def case_max_poll_timeout(self): + """ + TEST CASE: verifying that the boundary and valid values of max_poll_interval_ms are in effect + """ + tdLog.info("start to test max_poll_interval_ms=20s") + # test max_poll_interval_ms=20s + self.session_timeout_ms = 300000 + self.max_poll_interval_ms = 20000 + self.drop_max_poll_timeout_consmuers() + tdLog.info("stop to test max_poll_interval_ms=20s and done ") + + def run(self): + """ + Run the test cases for session timeout and max poll timeout. + """ + vgroups = 4 + etool.benchMark( + command=f"-d db_sub -t {self.table_number} -n {self.rows_per_table} -v {vgroups} -a {self.replicaVar} -y" + ) + # test case start here + self.topic_name = "select_d1" + # self.case_session_timeout() + self.case_max_poll_timeout() + + def stop(self): + """ + Closes the taos connection and logs the success message. + """ + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/tmq/per_consumer.py b/tests/army/tmq/per_consumer.py new file mode 100644 index 00000000000..b8f409d7109 --- /dev/null +++ b/tests/army/tmq/per_consumer.py @@ -0,0 +1,182 @@ +import os +import taos +import sys +from datetime import datetime +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +from frame.log import tdLog +import subprocess +from multiprocessing import Process +import threading +from taos.tmq import Consumer +import click + +# TDDO +# 1. using tmq common class to replace the function, file drop_lost_consumers.py has the same function + +try: + conn = taos.connect() +except Exception as e: + tdLog.info(str(e)) + + +@click.command() +@click.option( + "-c", + "--consumer-groups-num", + "consumer_group_num", + default=1, + help="Number of consumer group.", +) +@click.option( + "-s", + "--session-timeout-ms", + "session_timeout_ms", + default=60000, + help="session timeout:ms", +) +@click.option( + "-p", + "--max-poll-interval-ms", + "max_poll_interval_ms", + default=180000, + help="max poll interval timeout:ms", +) +@click.option( + "-t", + "--topic-name", + "topic_name", + default="select_d1", + help="topic name", +) +def test_timeout_sub(consumer_group_num, session_timeout_ms, max_poll_interval_ms, topic_name): + threads = [] + tdLog.info(f"consumer_group_num:{consumer_group_num}, session_timeout_ms:{session_timeout_ms}, max_poll_interval_ms:{max_poll_interval_ms}") + for id in range(consumer_group_num): + conf = set_conf( + group_id=id, + session_timeout_ms=session_timeout_ms, + max_poll_interval_ms=max_poll_interval_ms, + ) + tdLog.info(f"conf:{conf}") + threads.append(threading.Thread(target=taosc_consumer, args=(conf,topic_name))) + for tr in threads: + tr.start() + for tr in threads: + tr.join() + + +def sub_consumer(consumer, group_id, topic_name): + group_id = int(group_id) + if group_id < 100: + try: + consumer.subscribe([topic_name]) + except Exception as e: + tdLog.info(f"subscribe error") + exit(1) + + nrows = 0 + while True: + start = datetime.now() + tdLog.info(f"time:{start},consumer:{group_id}, start to consume") + message = consumer.poll(timeout=10.0) + + if message: + id = message.offset() + topic = message.topic() + database = message.database() + + for block in message: + addrows = block.nrows() + nrows += block.nrows() + ncols = block.ncols() + values = block.fetchall + end = datetime.now() + elapsed_time = end - start + tdLog.info( + f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}" + ) + consumer.commit() + tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") + # consumer.unsubscribe() + # consumer.close() + + +def sub_consumer_once(consumer, group_id, topic_name): + group_id = int(group_id) + if group_id < 100: + consumer.subscribe([topic_name]) + nrows = 0 + consumer_nrows = 0 + while True: + start = datetime.now() + tdLog.info(f"time:{start},consumer:{group_id}, start to consume") + # start = datetime.now() + # tdLog.info(f"time:{start},consumer:{group_id}, start to consume") + tdLog.info(f"consumer_nrows:{consumer_nrows}") + if consumer_nrows < 1000000: + message = consumer.poll(timeout=10.0) + else: + tdLog.info(" stop consumer when consumer all rows") + + if message: + id = message.offset() + topic = message.topic() + database = message.database() + + for block in message: + addrows = block.nrows() + nrows += block.nrows() + ncols = block.ncols() + values = block.fetchall + end = datetime.now() + elapsed_time = end - start + # tdLog.info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") + consumer.commit() + # tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") + consumer_nrows = nrows + # consumer.unsubscribe() + # consumer.close() + # break + + +def set_conf( + td_connect_ip="localhost", + group_id=1, + client_id="test_consumer_py", + enable_auto_commit="false", + auto_commit_interval_ms="1000", + auto_offset_reset="earliest", + msg_with_table_name="true", + session_timeout_ms=10000, + max_poll_interval_ms=20000, + experimental_snapshot_enable="false", +): + conf = { + # auth options + # consume options + "td.connect.ip": f"{td_connect_ip}", + "group.id": f"{group_id}", + "client.id": f"{client_id}", + "enable.auto.commit": f"{enable_auto_commit}", + "auto.commit.interval.ms": f"{auto_commit_interval_ms}", + "auto.offset.reset": f"{auto_offset_reset}", + "msg.with.table.name": f"{msg_with_table_name}", + "session.timeout.ms": f"{session_timeout_ms}", + "max.poll.interval.ms": f"{max_poll_interval_ms}", + "experimental.snapshot.enable": f"{experimental_snapshot_enable}", + } + return conf + + +def taosc_consumer(conf,topic_name): + consumer = Consumer(conf) + group_id = int(conf["group.id"]) + tdLog.info(f"{consumer},{group_id}") + try: + sub_consumer_once(consumer, group_id, topic_name) + except Exception as e: + tdLog.info(str(e)) + + +if __name__ == "__main__": + test_timeout_sub() diff --git a/tests/ci/count_assert.py b/tests/ci/count_assert.py index aecc57578d0..65a3192aba4 100644 --- a/tests/ci/count_assert.py +++ b/tests/ci/count_assert.py @@ -40,6 +40,7 @@ # List of files to exclude exclude_source_files = [ f"{TD_project_path}/community/source/libs/parser/src/sql.c", + f"{TD_project_path}/community/source/libs/parser/inc/sql.c", f"{TD_project_path}/community/source/util/src/tlog.c", f"{TD_project_path}/community/include/util/tlog.h" ] diff --git a/tests/develop-test/2-query/show_create_db.py b/tests/develop-test/2-query/show_create_db.py index d7d093aa782..9589b6dc6fd 100644 --- a/tests/develop-test/2-query/show_create_db.py +++ b/tests/develop-test/2-query/show_create_db.py @@ -42,17 +42,17 @@ def run(self): tdSql.query('show create database scd;') tdSql.checkRows(1) tdSql.checkData(0, 0, 'scd') - tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 2 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKSIZE 262144 S3_KEEPLOCAL 5256000m S3_COMPACT 0") + tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 2 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKPAGES 131072 S3_KEEPLOCAL 525600m S3_COMPACT 1") tdSql.query('show create database scd2;') tdSql.checkRows(1) tdSql.checkData(0, 0, 'scd2') - tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKSIZE 262144 S3_KEEPLOCAL 5256000m S3_COMPACT 0") + tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKPAGES 131072 S3_KEEPLOCAL 525600m S3_COMPACT 1") tdSql.query('show create database scd4') tdSql.checkRows(1) tdSql.checkData(0, 0, 'scd4') - tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKSIZE 262144 S3_KEEPLOCAL 5256000m S3_COMPACT 0") + tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKPAGES 131072 S3_KEEPLOCAL 525600m S3_COMPACT 1") self.restartTaosd(1, dbname='scd') @@ -60,16 +60,16 @@ def run(self): tdSql.query('show create database scd;') tdSql.checkRows(1) tdSql.checkData(0, 0, 'scd') - tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 2 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKSIZE 262144 S3_KEEPLOCAL 5256000m S3_COMPACT 0") + tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 2 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKPAGES 131072 S3_KEEPLOCAL 525600m S3_COMPACT 1") tdSql.query('show create database scd2;') tdSql.checkRows(1) tdSql.checkData(0, 0, 'scd2') - tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKSIZE 262144 S3_KEEPLOCAL 5256000m S3_COMPACT 0") + tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKPAGES 131072 S3_KEEPLOCAL 525600m S3_COMPACT 1") tdSql.query('show create database scd4') tdSql.checkRows(1) tdSql.checkData(0, 0, 'scd4') - tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKSIZE 262144 S3_KEEPLOCAL 5256000m S3_COMPACT 0") + tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKPAGES 131072 S3_KEEPLOCAL 525600m S3_COMPACT 1") tdSql.execute('drop database scd') diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 09216add826..151358aec31 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -15,11 +15,14 @@ ,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_function.py +,,y,army,./pytest.sh python3 ./test.py -f query/function/test_percentile.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_resinfo.py +,,y,army,./pytest.sh python3 ./test.py -f query/function/test_interp.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/concat.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/cast.py ,,y,army,./pytest.sh python3 ./test.py -f query/test_join.py ,,y,army,./pytest.sh python3 ./test.py -f query/test_compare.py +,,y,army,./pytest.sh python3 ./test.py -f query/test_case_when.py ,,y,army,./pytest.sh python3 ./test.py -f insert/test_column_tag_boundary.py ,,y,army,./pytest.sh python3 ./test.py -f query/fill/fill_desc.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f query/fill/fill_null.py @@ -47,7 +50,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f query/window/base.py ,,y,army,./pytest.sh python3 ./test.py -f query/sys/tb_perf_queries_exist_test.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f query/test_having.py - +,,n,army,python3 ./test.py -f tmq/drop_lost_comsumers.py # # system test # @@ -71,6 +74,8 @@ #,,n,system-test,python3 ./test.py -f 8-stream/vnode_restart.py -N 4 #,,n,system-test,python3 ./test.py -f 8-stream/snode_restart.py -N 4 ,,n,system-test,python3 ./test.py -f 8-stream/snode_restart_with_checkpoint.py -N 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/force_window_close_interp.py +,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/force_window_close_interval.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/pk_error.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/pk_func.py @@ -1083,6 +1088,7 @@ ,,y,script,./test.sh -f tsim/user/privilege_table.sim ,,y,script,./test.sh -f tsim/user/privilege_create_db.sim ,,y,script,./test.sh -f tsim/db/alter_option.sim +,,y,script,./test.sh -f tsim/db/dnodelist.sim # ,,y,script,./test.sh -f tsim/db/alter_replica_31.sim ,,y,script,./test.sh -f tsim/db/basic1.sim ,,y,script,./test.sh -f tsim/db/basic2.sim @@ -1317,6 +1323,8 @@ ,,y,script,./test.sh -f tsim/stream/basic2.sim ,,y,script,./test.sh -f tsim/stream/basic3.sim ,,y,script,./test.sh -f tsim/stream/basic4.sim +,,y,script,./test.sh -f tsim/stream/snodeCheck.sim +,,y,script,./test.sh -f tsim/stream/concurrentcheckpt.sim ,,y,script,./test.sh -f tsim/stream/checkpointInterval0.sim ,,y,script,./test.sh -f tsim/stream/checkStreamSTable1.sim ,,y,script,./test.sh -f tsim/stream/checkStreamSTable.sim @@ -1365,10 +1373,38 @@ ,,y,script,./test.sh -f tsim/stream/sliding.sim ,,y,script,./test.sh -f tsim/stream/state0.sim ,,y,script,./test.sh -f tsim/stream/state1.sim -,,y,script,./test.sh -f tsim/stream/streamPrimaryKey0.sim -,,y,script,./test.sh -f tsim/stream/streamPrimaryKey1.sim -,,y,script,./test.sh -f tsim/stream/streamPrimaryKey2.sim -,,y,script,./test.sh -f tsim/stream/streamPrimaryKey3.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpDelete0.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpDelete1.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpDelete2.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpError.sim +,,y,script,./test.sh -f tsim/stream/streamInterpForceWindowClose.sim +,,y,script,./test.sh -f tsim/stream/streamInterpForceWindowClose1.sim +,,y,script,./test.sh -f tsim/stream/streamInterpFwcError.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpHistory.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpHistory1.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpLarge.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpLinear0.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpNext0.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpOther.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpOther1.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpPartitionBy0.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpPrev0.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpPrev1.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpPrimaryKey0.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpPrimaryKey1.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpPrimaryKey2.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpPrimaryKey3.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpUpdate.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpUpdate1.sim +#,,y,script,./test.sh -f tsim/stream/streamInterpValue0.sim +#,,y,script,./test.sh -f tsim/stream/streamPrimaryKey0.sim +#,,y,script,./test.sh -f tsim/stream/streamPrimaryKey1.sim +#,,y,script,./test.sh -f tsim/stream/streamPrimaryKey2.sim +#,,y,script,./test.sh -f tsim/stream/streamPrimaryKey3.sim +,,y,script,./test.sh -f tsim/stream/streamTwaError.sim +,,y,script,./test.sh -f tsim/stream/streamTwaFwcFill.sim +,,y,script,./test.sh -f tsim/stream/streamTwaFwcFillPrimaryKey.sim +,,y,script,./test.sh -f tsim/stream/streamTwaFwcIntervalPrimaryKey.sim ,,y,script,./test.sh -f tsim/stream/triggerInterval0.sim ,,y,script,./test.sh -f tsim/stream/triggerSession0.sim ,,y,script,./test.sh -f tsim/stream/udTableAndCol0.sim diff --git a/tests/pytest/auto_crash_gen.py b/tests/pytest/auto_crash_gen.py index 343cbd72c3d..4e4679db6a4 100755 --- a/tests/pytest/auto_crash_gen.py +++ b/tests/pytest/auto_crash_gen.py @@ -6,15 +6,12 @@ # -*- coding: utf-8 -*- import os ,sys import random -import argparse import subprocess -import time -import platform # valgrind mode ? valgrind_mode = False -msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" } +msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4: "Invalid read/write", 5: "memory leak", 6: "dead locked"} # formal hostname = socket.gethostname() @@ -112,9 +109,9 @@ def random_args(args_list): # args_list["--connector-type"]=connect_types[random.randint(0,2)] args_list["--connector-type"]= connect_types[0] args_list["--max-dbs"]= random.randint(1,10) - + # dnodes = [1,3] # set single dnodes; - + # args_list["--num-dnodes"]= random.sample(dnodes,1)[0] # args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"]) args_list["--debug"]=False @@ -125,7 +122,7 @@ def random_args(args_list): # args_list["--ignore-errors"]=[] ## can add error codes for detail - + args_list["--run-tdengine"]= False args_list["--use-shadow-db"]= False args_list["--dynamic-db-table-names"]= True @@ -162,7 +159,7 @@ def random_args(args_list): if args_list["--larger-data"]: threads = [16,32] else: - threads = [32,64,128,256] + threads = [32,64,128,256] args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug return args_list @@ -176,7 +173,7 @@ def limits(args_list): pass # env is start by test frame , not crash_gen instance - + # elif args_list["--num-replicas"]==0: # print(" make sure num-replicas is at least 1 ") # args_list["--num-replicas"]=1 @@ -186,10 +183,10 @@ def limits(args_list): # elif args_list["--num-replicas"]>1: # if not args_list["--auto-start-service"]: # print("it should be deployed by crash_gen auto-start-service for multi replicas") - + # else: # pass - + return args_list def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): @@ -216,9 +213,9 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): arguments+="" else: arguments+=(k+"="+str(v)+" ") - + if valgrind : - + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012 '%(crash_gen_path ,arguments) else: @@ -239,7 +236,7 @@ def start_taosd(): start_cmd = 'cd %s && python3 test.py >>/dev/null '%(start_path) os.system(start_cmd) -def get_cmds(args_list): +def get_cmds(args_list): crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode) return crash_gen_cmd @@ -276,11 +273,15 @@ def check_status(): os.system("tail -n 50 %s>>%s"%(result_file,exit_status_logs)) core_check = subprocess.Popen('ls -l %s | grep "^-" | wc -l'%core_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + dead_lock_check = subprocess.Popen("grep -i 'dead locked' %s "%result_file, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") if int(core_check.strip().rstrip()) > 0: # it means core files has occured return 3 - + + if dead_lock_check: + return 6 + if "Crash_Gen is now exiting with status code: 1" in run_code: return 1 elif "Crash_Gen is now exiting with status code: 0" in run_code: @@ -293,7 +294,7 @@ def main(): args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[], "--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False, - "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , + "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , "--continue-on-exception":False } args = random_args(args_list) @@ -301,24 +302,24 @@ def main(): build_path = get_path() - + if repo =="community": crash_gen_path = build_path[:-5]+"community/tests/pytest/" elif repo =="TDengine": crash_gen_path = build_path[:-5]+"/tests/pytest/" else: pass - + if os.path.exists(crash_gen_path+"crash_gen.sh"): print(" make sure crash_gen.sh is ready") else: print( " crash_gen.sh is not exists ") sys.exit(1) - + git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[7:16] - + # crash_cmds = get_cmds() - + crash_cmds = get_cmds(args) # clean run_dir os.system('rm -rf %s'%run_dir ) @@ -329,9 +330,9 @@ def main(): run_crash_gen(crash_cmds) endtime = datetime.datetime.now() status = check_status() - + print("exit status : ", status) - + if status ==4: print('======== crash_gen found memory bugs ========') if status ==5: @@ -344,15 +345,15 @@ def main(): try: cmd = crash_cmds.split('&')[2] if status == 0: - log_dir = "none" + log_dir = "none" else: - log_dir= "/root/pxiao/crash_gen_logs" - + log_dir= "/root/pxiao/crash_gen_logs" + if status == 3: core_dir = "/root/pxiao/crash_gen_logs" else: core_dir = "none" - + text = f''' exit status: {msg_dict[status]} test scope: crash_gen @@ -364,12 +365,12 @@ def main(): log dir: {log_dir} core dir: {core_dir} cmd: {cmd}''' - - send_msg(get_msg(text)) + + send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) - + if __name__ == '__main__': main() diff --git a/tests/pytest/auto_crash_gen_valgrind.py b/tests/pytest/auto_crash_gen_valgrind.py index 29d9d617320..1e0de6ace11 100755 --- a/tests/pytest/auto_crash_gen_valgrind.py +++ b/tests/pytest/auto_crash_gen_valgrind.py @@ -9,15 +9,12 @@ # -*- coding: utf-8 -*- import os ,sys import random -import argparse import subprocess -import time -import platform # valgrind mode ? valgrind_mode = True -msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" } +msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4: "Invalid read/write", 5: "memory leak", 6: "dead locked"} # formal hostname = socket.gethostname() @@ -48,6 +45,7 @@ def send_msg(json): 'Content-Type': 'application/json' } + req = requests.post(url=group_url, headers=headers, json=json) inf = req.json() if "StatusCode" in inf and inf["StatusCode"] == 0: @@ -115,9 +113,9 @@ def random_args(args_list): # args_list["--connector-type"]=connect_types[random.randint(0,2)] args_list["--connector-type"]= connect_types[0] args_list["--max-dbs"]= random.randint(1,10) - + # dnodes = [1,3] # set single dnodes; - + # args_list["--num-dnodes"]= random.sample(dnodes,1)[0] # args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"]) args_list["--debug"]=False @@ -125,13 +123,13 @@ def random_args(args_list): args_list["--track-memory-leaks"]=False args_list["--max-steps"]=random.randint(200,500) - + threads = [16,32] args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug # args_list["--ignore-errors"]=[] ## can add error codes for detail - + args_list["--run-tdengine"]= False args_list["--use-shadow-db"]= False args_list["--dynamic-db-table-names"]= True @@ -177,7 +175,7 @@ def limits(args_list): pass # env is start by test frame , not crash_gen instance - + # elif args_list["--num-replicas"]==0: # print(" make sure num-replicas is at least 1 ") # args_list["--num-replicas"]=1 @@ -187,10 +185,10 @@ def limits(args_list): # elif args_list["--num-replicas"]>1: # if not args_list["--auto-start-service"]: # print("it should be deployed by crash_gen auto-start-service for multi replicas") - + # else: # pass - + return args_list def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): @@ -217,9 +215,9 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): arguments+="" else: arguments+=(k+"="+str(v)+" ") - + if valgrind : - + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012 '%(crash_gen_path ,arguments) else: @@ -228,7 +226,6 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): return crash_gen_cmd - def start_taosd(): build_path = get_path() if repo == "community": @@ -272,7 +269,7 @@ def check_status(): if int(core_check.strip().rstrip()) > 0: # it means core files has occured return 3 - + mem_status = check_memory() if mem_status >0: return mem_status @@ -281,8 +278,8 @@ def check_status(): elif "Crash_Gen is now exiting with status code: 0" in run_code: return 0 else: - return 2 - + return 2 + def check_memory(): @@ -301,34 +298,37 @@ def check_memory(): os.mkdir(back_path) stderr_file = os.path.join(crash_gen_path , "valgrind.err") - + stdout_file = os.path.join(crash_gen_path, 'valgrind.out') + status = 0 grep_res = subprocess.Popen("grep -i 'Invalid read' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") - + if grep_res: # os.system("cp %s %s"%(stderr_file , back_path)) status = 4 - + grep_res = subprocess.Popen("grep -i 'Invalid write' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") - + if grep_res: # os.system("cp %s %s"%(stderr_file , back_path)) status = 4 - + grep_res = subprocess.Popen("grep -i 'taosMemoryMalloc' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") - + if grep_res: + # mem-leak can be also occure when exit normally when dead lock # os.system("cp %s %s"%(stderr_file , back_path)) - status = 5 - + dead_lock_res = subprocess.Popen("grep -i 'dead locked' %s "%stdout_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + status = 6 if dead_lock_res else 5 + return status def main(): args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[], "--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False, - "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , + "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , "--continue-on-exception":False } args = random_args(args_list) @@ -341,17 +341,17 @@ def main(): crash_gen_path = build_path[:-5]+"/tests/pytest/" else: pass - + if os.path.exists(crash_gen_path+"crash_gen.sh"): print(" make sure crash_gen.sh is ready") else: print( " crash_gen.sh is not exists ") sys.exit(1) - + git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[7:16] # crash_cmds = get_cmds() - + crash_cmds = get_cmds(args) # clean run_dir @@ -364,9 +364,9 @@ def main(): endtime = datetime.datetime.now() status = check_status() # back_path = os.path.join(core_path,"valgrind_report") - + print("exit status : ", status) - + if status ==4: print('======== crash_gen found memory bugs ========') if status ==5: @@ -379,15 +379,15 @@ def main(): try: cmd = crash_cmds.split('&')[2] if status == 0: - log_dir = "none" + log_dir = "none" else: - log_dir= "/root/pxiao/crash_gen_logs" - + log_dir= "/root/pxiao/crash_gen_logs" + if status == 3: core_dir = "/root/pxiao/crash_gen_logs" else: core_dir = "none" - + text = f''' exit status: {msg_dict[status]} test scope: crash_gen @@ -399,12 +399,12 @@ def main(): log dir: {log_dir} core dir: {core_dir} cmd: {cmd}''' - + send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) - + if __name__ == '__main__': main() diff --git a/tests/pytest/auto_crash_gen_valgrind_cluster.py b/tests/pytest/auto_crash_gen_valgrind_cluster.py index 8546d436de0..22f453e51ef 100755 --- a/tests/pytest/auto_crash_gen_valgrind_cluster.py +++ b/tests/pytest/auto_crash_gen_valgrind_cluster.py @@ -1,23 +1,17 @@ #!/usr/bin/python3 - - +# -*- coding: utf-8 -*- import datetime import os import socket import requests - -# -*- coding: utf-8 -*- import os ,sys import random -import argparse import subprocess -import time -import platform # valgrind mode ? valgrind_mode = True -msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" } +msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4: "Invalid read/write", 5: "memory leak", 6: "dead locked"} # formal hostname = socket.gethostname() @@ -115,9 +109,9 @@ def random_args(args_list): # args_list["--connector-type"]=connect_types[random.randint(0,2)] args_list["--connector-type"]= connect_types[0] args_list["--max-dbs"]= random.randint(1,10) - + # dnodes = [1,3] # set single dnodes; - + # args_list["--num-dnodes"]= random.sample(dnodes,1)[0] # args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"]) args_list["--debug"]=False @@ -125,13 +119,12 @@ def random_args(args_list): args_list["--track-memory-leaks"]=False args_list["--max-steps"]=random.randint(200,500) - + threads = [16,32] args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug # args_list["--ignore-errors"]=[] ## can add error codes for detail - args_list["--run-tdengine"]= False args_list["--use-shadow-db"]= False args_list["--dynamic-db-table-names"]= True @@ -177,7 +170,7 @@ def limits(args_list): pass # env is start by test frame , not crash_gen instance - + # elif args_list["--num-replicas"]==0: # print(" make sure num-replicas is at least 1 ") # args_list["--num-replicas"]=1 @@ -187,10 +180,9 @@ def limits(args_list): # elif args_list["--num-replicas"]>1: # if not args_list["--auto-start-service"]: # print("it should be deployed by crash_gen auto-start-service for multi replicas") - + # else: # pass - return args_list def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): @@ -217,18 +209,13 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): arguments+="" else: arguments+=(k+"="+str(v)+" ") - + if valgrind : - crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0707,0x0203,0x4012 '%(crash_gen_path ,arguments) - else: - crash_gen_cmd = 'cd %s && ./crash_gen.sh -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0014,0x0707,0x0203,0x4012'%(crash_gen_path ,arguments) - return crash_gen_cmd - def start_taosd(): build_path = get_path() if repo == "community": @@ -242,7 +229,7 @@ def start_taosd(): os.system(start_cmd +">>/dev/null") def get_cmds(args_list): - + crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode) return crash_gen_cmd @@ -272,7 +259,7 @@ def check_status(): if int(core_check.strip().rstrip()) > 0: # it means core files has occured return 3 - + mem_status = check_memory() if mem_status >0: return mem_status @@ -281,8 +268,7 @@ def check_status(): elif "Crash_Gen is now exiting with status code: 0" in run_code: return 0 else: - return 2 - + return 2 def check_memory(): @@ -301,57 +287,58 @@ def check_memory(): os.mkdir(back_path) stderr_file = os.path.join(crash_gen_path , "valgrind.err") - + stdout_file = os.path.join(crash_gen_path, 'valgrind.out') status = 0 grep_res = subprocess.Popen("grep -i 'Invalid read' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") - + if grep_res: # os.system("cp %s %s"%(stderr_file , back_path)) status = 4 - + grep_res = subprocess.Popen("grep -i 'Invalid write' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") - + if grep_res: # os.system("cp %s %s"%(stderr_file , back_path)) status = 4 - + grep_res = subprocess.Popen("grep -i 'taosMemoryMalloc' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") - + if grep_res: + # mem-leak can be also occure when exit normally when dead lock # os.system("cp %s %s"%(stderr_file , back_path)) - status = 5 - + dead_lock_res = subprocess.Popen("grep -i 'dead locked' %s "%stdout_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + status = 6 if dead_lock_res else 5 + return status def main(): - args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[], "--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False, - "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , + "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , "--continue-on-exception":False } args = random_args(args_list) args = limits(args) - build_path = get_path() + build_path = get_path() if repo =="community": crash_gen_path = build_path[:-5]+"community/tests/pytest/" elif repo =="TDengine": crash_gen_path = build_path[:-5]+"/tests/pytest/" else: pass - + if os.path.exists(crash_gen_path+"crash_gen.sh"): print(" make sure crash_gen.sh is ready") else: print( " crash_gen.sh is not exists ") sys.exit(1) - + git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[7:16] # crash_cmds = get_cmds() - + crash_cmds = get_cmds(args) # clean run_dir @@ -364,9 +351,9 @@ def main(): endtime = datetime.datetime.now() status = check_status() # back_path = os.path.join(core_path,"valgrind_report") - + print("exit status : ", status) - + if status ==4: print('======== crash_gen found memory bugs ========') if status ==5: @@ -379,15 +366,15 @@ def main(): try: cmd = crash_cmds.split('&')[2] if status == 0: - log_dir = "none" + log_dir = "none" else: - log_dir= "/root/pxiao/crash_gen_logs" - + log_dir= "/root/pxiao/crash_gen_logs" + if status == 3: core_dir = "/root/pxiao/crash_gen_logs" else: core_dir = "none" - + text = f''' exit status: {msg_dict[status]} test scope: crash_gen @@ -399,12 +386,11 @@ def main(): log dir: {log_dir} core dir: {core_dir} cmd: {cmd}''' - - send_msg(get_msg(text)) + + send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) - if __name__ == '__main__': main() diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 1dfcf8b5ddc..c12f324fd7b 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -170,6 +170,8 @@ def __init__(self): self.fill_tb_source_select_str = ','.join(self.fill_function_list[0:13]) self.ext_tb_source_select_str = ','.join(self.downsampling_function_list[0:13]) self.stream_case_when_tbname = "tbname" + self.tag_value_str = "" + self.tag_value_list = [] self.update = True self.disorder = True @@ -202,7 +204,7 @@ def __init__(self): self.cast_tag_stb_filter_des_select_elm = "ts, t1, t2, t3, t4, cast(t1 as TINYINT UNSIGNED), t6, t7, t8, t9, t10, cast(t2 as varchar(256)), t12, cast(t3 as bool)" self.tag_count = len(self.tag_filter_des_select_elm.split(",")) self.state_window_range = list() - + self.custom_col_val = 0 self.part_val_list = [1, 2] # def init(self, conn, logSql): @@ -754,10 +756,10 @@ def create_ctable(self, tsql, dbname=None, stbname=None, tag_elm_list=None, coun if len(kwargs) > 0: for param, value in kwargs.items(): ctb_params += f'{param} "{value}" ' - tag_value_list = self.gen_tag_value_list(tag_elm_list) + self.tag_value_list = self.gen_tag_value_list(tag_elm_list) tag_value_str = "" # tag_value_str = ", ".join(str(v) for v in self.tag_value_list) - for tag_value in tag_value_list: + for tag_value in self.tag_value_list: if isinstance(tag_value, str): tag_value_str += f'"{tag_value}", ' else: @@ -913,12 +915,13 @@ def create_stream(self, stream_name, des_table, source_sql, trigger_mode=None, w else: stream_options += f" ignore update 0" if not use_except: - tdSql.execute(f'create stream if not exists {stream_name} trigger at_once {stream_options} {fill_history} into {des_table} {subtable} as {source_sql} {fill};') + tdSql.execute(f'create stream if not exists {stream_name} trigger at_once {stream_options} {fill_history} into {des_table} {subtable} as {source_sql} {fill};',queryTimes=3) time.sleep(self.create_stream_sleep) return None else: return f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table} {subtable} as {source_sql} {fill};' else: + if watermark is None: if trigger_mode == "max_delay": stream_options = f'trigger {trigger_mode} {max_delay}' @@ -938,12 +941,14 @@ def create_stream(self, stream_name, des_table, source_sql, trigger_mode=None, w stream_options += f" ignore update {ignore_update}" else: stream_options += f" ignore update 0" + if not use_except: - tdSql.execute(f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table}{stb_field_name} {tags} {subtable} as {source_sql} {fill};') + tdSql.execute(f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table}{stb_field_name} {tags} {subtable} as {source_sql} {fill};',queryTimes=3) time.sleep(self.create_stream_sleep) return None else: return f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table}{stb_field_name} {tags} {subtable} as {source_sql} {fill};' + def pause_stream(self, stream_name, if_exist=True, if_not_exist=False): """pause_stream @@ -1007,7 +1012,7 @@ def check_stream_wal_info(self, wal_info): # If no match was found, or the pattern does not match the expected format, return False return False - def check_stream_task_status(self, stream_name, vgroups, stream_timeout=None): + def check_stream_task_status(self, stream_name, vgroups, stream_timeout=0, check_wal_info=True): """check stream status Args: @@ -1043,13 +1048,16 @@ def check_stream_task_status(self, stream_name, vgroups, stream_timeout=None): print(f"result_task_status:{result_task_status},result_task_history:{result_task_history},result_task_alll:{result_task_alll}") if result_task_status_rows == 1 and result_task_status ==[('ready',)] : if result_task_history_rows == 1 and result_task_history == [(None,)] : - for vgroup_num in range(vgroups): - if self.check_stream_wal_info(result_task_alll[vgroup_num][4]) : - check_stream_success += 1 - tdLog.info(f"check stream task list[{check_stream_success}] sucessfully :") - else: - check_stream_success = 0 - break + if check_wal_info: + for vgroup_num in range(vgroups): + if self.check_stream_wal_info(result_task_alll[vgroup_num][4]) : + check_stream_success += 1 + tdLog.info(f"check stream task list[{check_stream_success}] sucessfully :") + else: + check_stream_success = 0 + break + else: + check_stream_success = vgroups if check_stream_success == vgroups: break @@ -1761,6 +1769,7 @@ def check_query_data(self, sql1, sql2, sorted=False, fill_value=None, tag_value_ bool: False if failed """ tdLog.info("checking query data ...") + tdLog.info(f"sq1:{sql1}; sql2:{sql2};") if tag_value_list: dvalue = len(self.tag_type_str.split(',')) - defined_tag_count tdSql.query(sql1) @@ -1796,7 +1805,7 @@ def check_query_data(self, sql1, sql2, sorted=False, fill_value=None, tag_value_ res2 = self.round_handle(res2) if not reverse_check: while res1 != res2: - tdLog.info("query retrying ...") + # tdLog.info("query retrying ...") new_list = list() tdSql.query(sql1) res1 = tdSql.queryResult @@ -2160,4 +2169,4 @@ def dict2toml(in_dict: dict, file:str): -tdCom = TDCom() +tdCom = TDCom() \ No newline at end of file diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index bb6f8ff030e..38325302181 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -48,6 +48,7 @@ def __init__(self, path): "telemetryReporting": "0", "tqDebugflag": "135", "stDebugflag":"135", + "safetyCheckLevel":"2" } def getLogDir(self): @@ -149,7 +150,8 @@ def __init__(self, index): "statusInterval": "1", "enableQueryHb": "1", "supportVnodes": "1024", - "telemetryReporting": "0" + "telemetryReporting": "0", + "safetyCheckLevel":"2" } def init(self, path, remoteIP = ""): diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 3bc784063e4..1d3333264ac 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -104,7 +104,7 @@ def prepare(self, dbname="db", drop=True, **kwargs): for k, v in kwargs.items(): s += f" {k} {v}" if "duration" not in kwargs: - s += " duration 300" + s += " duration 100" self.cursor.execute(s) s = f'use {dbname}' self.cursor.execute(s) diff --git a/tests/script/api/makefile b/tests/script/api/makefile index d8a4e192187..9c2bb6be3d5 100644 --- a/tests/script/api/makefile +++ b/tests/script/api/makefile @@ -25,6 +25,7 @@ exe: gcc $(CFLAGS) ./stmt.c -o $(ROOT)stmt $(LFLAGS) gcc $(CFLAGS) ./stmt2.c -o $(ROOT)stmt2 $(LFLAGS) gcc $(CFLAGS) ./stmt2-example.c -o $(ROOT)stmt2-example $(LFLAGS) + gcc $(CFLAGS) ./stmt2-get-fields.c -o $(ROOT)stmt2-get-fields $(LFLAGS) gcc $(CFLAGS) ./stmt2-nohole.c -o $(ROOT)stmt2-nohole $(LFLAGS) gcc $(CFLAGS) ./stmt-crash.c -o $(ROOT)stmt-crash $(LFLAGS) @@ -42,5 +43,6 @@ clean: rm $(ROOT)stmt rm $(ROOT)stmt2 rm $(ROOT)stmt2-example + rm $(ROOT)stmt2-get-fields rm $(ROOT)stmt2-nohole rm $(ROOT)stmt-crash diff --git a/tests/script/api/stmt2-get-fields.c b/tests/script/api/stmt2-get-fields.c new file mode 100644 index 00000000000..befde39f8a6 --- /dev/null +++ b/tests/script/api/stmt2-get-fields.c @@ -0,0 +1,129 @@ +// TAOS standard API example. The same syntax as MySQL, but only a subet +// to compile: gcc -o stmt2-get-fields stmt2-get-fields.c -ltaos + +#include +#include +#include +#include "taos.h" + +void getFields(TAOS *taos, const char *sql) { + TAOS_STMT2_OPTION option = {0}; + TAOS_STMT2 *stmt = taos_stmt2_init(taos, &option); + int code = taos_stmt2_prepare(stmt, sql, 0); + if (code != 0) { + printf("failed to execute taos_stmt2_prepare. error:%s\n", taos_stmt2_error(stmt)); + taos_stmt2_close(stmt); + return; + } + int fieldNum = 0; + TAOS_FIELD_STB *pFields = NULL; + code = taos_stmt2_get_stb_fields(stmt, &fieldNum, &pFields); + if (code != 0) { + printf("failed get col,ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_stmt2_error(stmt)); + } else { + printf("col nums:%d\n", fieldNum); + for (int i = 0; i < fieldNum; i++) { + printf("field[%d]: %s, data_type:%d, field_type:%d\n", i, pFields[i].name, pFields[i].type, + pFields[i].field_type); + } + } + printf("====================================\n"); + taos_stmt2_free_stb_fields(stmt, pFields); + taos_stmt2_close(stmt); +} + +void do_query(TAOS *taos, const char *sql) { + TAOS_RES *result = taos_query(taos, sql); + int code = taos_errno(result); + if (code) { + printf("failed to query: %s, reason:%s\n", sql, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); +} + +void do_stmt(TAOS *taos) { + do_query(taos, "drop database if exists db"); + do_query(taos, "create database db"); + do_query(taos, + "create table db.stb (ts timestamp, b binary(10)) tags(t1 " + "int, t2 binary(10))"); + do_query(taos, "CREATE TABLE db.d0 USING db.stb (t1,t2) TAGS (7,'Cali');"); + do_query(taos, "CREATE TABLE db.ntb(nts timestamp, nb binary(10),nvc varchar(16),ni int);"); + + printf("field_type: TAOS_FIELD_COL = 1, TAOS_FIELD_TAG=2, TAOS_FIELD_QUERY=3, TAOS_FIELD_TBNAME=4\n"); + + // case 1 : INSERT INTO db.stb(t1,t2,ts,b,tbname) values(?,?,?,?,?) + // test super table + const char *sql = "insert into db.stb(t1,t2,ts,b,tbname) values(?,?,?,?,?)"; + printf("====================================\n"); + printf("case 1 : %s\n", sql); + getFields(taos, sql); + + // case 2 : INSERT INTO db.d0 VALUES (?,?) + // test child table + sql = "INSERT INTO db.d0(ts,b) VALUES (?,?)"; + printf("case 2 : %s\n", sql); + getFields(taos, sql); + + // case 3 : INSERT INTO db.ntb VALUES(?,?,?,?) + // test normal table + sql = "INSERT INTO db.ntb VALUES(?,?,?,?)"; + printf("case 3 : %s\n", sql); + getFields(taos, sql); + + // case 4 : INSERT INTO db.? using db.stb TAGS(?,?) VALUES(?,?) + // not support this clause + sql = "insert into db.? using db.stb tags(?, ?) values(?,?)"; + printf("case 4 (not support): %s\n", sql); + getFields(taos, sql); + + // case 5 : INSERT INTO db.stb(t1,t2,ts,b) values(?,?,?,?) + // no tbname error + sql = "INSERT INTO db.stb(t1,t2,ts,b) values(?,?,?,?)"; + printf("case 5 (no tbname error): %s\n", sql); + getFields(taos, sql); + + // case 6 : INSERT INTO db.d0 using db.stb values(?,?) + // none para for ctbname + sql = "INSERT INTO db.d0 using db.stb values(?,?)"; + printf("case 6 (no tags error): %s\n", sql); + getFields(taos, sql); + + // case 7 : insert into db.stb(t1,t2,tbname) values(?,?,?) + // no value + sql = "insert into db.stb(t1,t2,tbname) values(?,?,?)"; + printf("case 7 (no PK error): %s\n", sql); + getFields(taos, sql); + + // case 8 : insert into db.stb(ts,b,tbname) values(?,?,?) + // no tag + sql = "insert into db.stb(ts,b,tbname) values(?,?,?)"; + printf("case 8 : %s\n", sql); + getFields(taos, sql); + + // case 9 : insert into db.stb(ts,b,tbname) values(?,?,?,?,?) + // wrong para nums + sql = "insert into db.stb(ts,b,tbname) values(?,?,?,?,?)"; + printf("case 9 (wrong para nums): %s\n", sql); + getFields(taos, sql); + + // case 10 : insert into db.ntb(nts,ni) values(?,?,?,?,?) + // wrong para nums + sql = "insert into db.ntb(nts,ni) values(?,?)"; + printf("case 10 : %s\n", sql); + getFields(taos, sql); +} + +int main() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", "", 0); + if (!taos) { + printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); + exit(1); + } + + do_stmt(taos); + taos_close(taos); + taos_cleanup(); +} \ No newline at end of file diff --git a/tests/script/tsim/compress/commitlog.sim b/tests/script/tsim/compress/commitlog.sim index 38899b95ba9..d0380dd5595 100644 --- a/tests/script/tsim/compress/commitlog.sim +++ b/tests/script/tsim/compress/commitlog.sim @@ -20,7 +20,7 @@ sql create table $tb (ts timestamp, b bool, t tinyint, s smallint, i int, big bi $count = 0 while $count < $N $ms = 1591200000000 + $count - sql insert into $tb values( $ms , 1, 0, $count , $count , $count ,'it is a string') + sql insert into $tb values( $ms , 10, 0, $count , $count , $count ,'it is a string') $count = $count + 1 endw @@ -29,6 +29,13 @@ if $rows != $N then return -1 endi +sql flush database $db + +sql select * from $tb +if $rows != $N then + return -1 +endi + print =============== step2 $i = 1 $db = $dbPrefix . $i diff --git a/tests/script/tsim/compress/compressDisable.sim b/tests/script/tsim/compress/compressDisable.sim new file mode 100644 index 00000000000..dac7ef712ae --- /dev/null +++ b/tests/script/tsim/compress/compressDisable.sim @@ -0,0 +1,146 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ============================ dnode1 start +$i = 0 +$dbPrefix = db +$tbPrefix = tb +$db = $dbPrefix . $i +$tb = $tbPrefix . $i + +$N = 2000 + +print =============== step1 +sql create database $db +sql use $db +sql create table $tb (ts timestamp, b bool encode 'disabled', t tinyint encode 'disabled', s smallint encode 'disabled', i int encode 'disabled', big bigint encode 'disabled', str binary(256)) + +$count = 0 +while $count < $N + $ms = 1591200000000 + $count + sql insert into $tb values( $ms , 1, 0, $count , $count , $count ,'it is a string') + $count = $count + 1 +endw + +sql select * from $tb +if $rows != $N then + return -1 +endi + +sql flush database $db + +sql select * from $tb +if $rows != $N then + return -1 +endi + +sql alter table $tb modify column ts encode 'disabled' + +$count = 0 +while $count < $N + $ms = 1591200030000 + $count + sql insert into $tb values( $ms , 1, 0, $count , $count , $count ,'it is a string') + $count = $count + 1 +endw + +$M = 4000 +sql select * from $tb +if $rows != $M then + return -1 +endi + +sql flush database $db + +sql select * from $tb +if $rows != $M then + return -1 +endi + + +$stb1 = txx1 +sql create table txx1 (ts timestamp encode 'disabled' compress 'disabled' level 'h', f int compress 'lz4') tags(t int) + +$count = 0 +$subTb1 = txx1_sub1 +$subTb2 = txx1_sub2 + +sql create table $subTb1 using $stb1 tags(1) +sql create table $subTb2 using $stb1 tags(2) + +while $count < $N + $ms = 1591200030000 + $count + sql insert into $subTb1 values( $ms , 1) + + $ms2 = 1591200040000 + $count + sql insert into $subTb2 values( $ms2 , 1) + $count = $count + 1 +endw + +$count = 0 +sql select * from $stb1 +if $rows != $M then + return -1 +endi + +sql flush database $db + +sql select * from $stb1 +if $rows != $M then + return -1 +endi + +$L = 8000 +sql alter table $stb1 modify column ts encode 'delta-i' +sql alter table $stb1 modify column f encode 'disabled' + +while $count < $N + $ms = 1591200050000 + $count + sql insert into $subTb1 values( $ms , 1) + + $ms2 = 1591200060000 + $count + sql insert into $subTb2 values( $ms2 , 1) + $count = $count + 1 +endw + + +sql select * from $stb1 +if $rows != $L then + return -1 +endi + +sql flush database $db + +sql select * from $stb1 +if $rows != $L then + return -1 +endi + +sql alter table $stb1 modify column ts encode 'disabled' + + +$count = 0 +$I = 12000 +while $count < $N + $ms = 1591200070000 + $count + sql insert into $subTb1 values( $ms , 1) + + $ms2 = 1591200080000 + $count + sql insert into $subTb2 values( $ms2 , 1) + $count = $count + 1 +endw + + +sql select * from $stb1 +if $rows != $I then + return -1 +endi + +sql flush database $db + +sql select * from $stb1 +if $rows != $I then + return -1 +endi + diff --git a/tests/script/tsim/db/create_all_options.sim b/tests/script/tsim/db/create_all_options.sim index e402223d93b..4b39829b248 100644 --- a/tests/script/tsim/db/create_all_options.sim +++ b/tests/script/tsim/db/create_all_options.sim @@ -128,6 +128,12 @@ endi if $data21_db != 3000 then # wal_fsync_period return -1 endi +if $data30_db != 525600m then # s3_keeplocal + return -1 +endi +if $data31_db != 1 then # s3_compact + return -1 +endi sql drop database db diff --git a/tests/script/tsim/db/dnodelist.sim b/tests/script/tsim/db/dnodelist.sim new file mode 100644 index 00000000000..08bf08a1fef --- /dev/null +++ b/tests/script/tsim/db/dnodelist.sim @@ -0,0 +1,258 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 +system sh/deploy.sh -n dnode4 -i 4 +system sh/deploy.sh -n dnode5 -i 5 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start +system sh/exec.sh -n dnode5 -s start + +sql connect +sql create dnode $hostname port 7200 +sql create dnode $hostname port 7300 +sql create dnode $hostname port 7400 +sql create dnode $hostname port 7500 + +$x = 0 +step1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql select * from information_schema.ins_dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +print ===> $data20 $data21 $data22 $data23 $data24 $data25 +print ===> $data30 $data31 $data32 $data33 $data34 $data35 +print ===> $data40 $data41 $data42 $data43 $data44 $data45 +if $rows != 5 then + return -1 +endi +if $data(1)[4] != ready then + goto step1 +endi +if $data(2)[4] != ready then + goto step1 +endi +if $data(3)[4] != ready then + goto step1 +endi +if $data(4)[4] != ready then + goto step1 +endi +if $data(5)[4] != ready then + goto step1 +endi + +print --- error case + +sql_error create database d1 vgroups 1 dnodes '1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890'; +sql_error create database d1 vgroups 1 dnodes '1 '; +sql_error create database d1 vgroups 1 dnodes ' 1'; +sql_error create database d1 vgroups 1 dnodes '1,'; +sql_error create database d1 vgroups 1 dnodes '1, '; +sql_error create database d1 vgroups 1 dnodes 'a '; +sql_error create database d1 vgroups 1 dnodes '- '; +sql_error create database d1 vgroups 1 dnodes '1,1'; +sql_error create database d1 vgroups 1 dnodes '1, 1'; +sql_error create database d1 vgroups 1 dnodes '1,1234567890'; +sql_error create database d1 vgroups 1 dnodes '1,2,6'; +sql_error create database d1 vgroups 1 dnodes ',1,2'; +sql_error create database d1 vgroups 1 dnodes 'x1,2'; +sql_error create database d1 vgroups 1 dnodes 'c1,ab2'; +sql_error create database d1 vgroups 1 dnodes '1,1,2'; + +sql_error create database d1 vgroups 1 replica 2 dnodes '1'; +sql_error create database d1 vgroups 1 replica 2 dnodes '1,8'; +sql_error create database d1 vgroups 1 replica 3 dnodes '1'; +sql_error create database d1 vgroups 1 replica 3 dnodes '1,2'; +sql_error create database d1 vgroups 1 replica 3 dnodes '1,2,4,6'; + +print --- replica 1 + +print --- case10 +sql create database d10 vgroups 1 dnodes '1'; +sql show dnodes; +if $data(1)[2] != 1 then + return -1 +endi +sql_error alter database d10 replica 1 dnodes '1,2,3'; +sql drop database d10; + +print --- case11 +sql create database d11 vgroups 1 dnodes '2'; +sql show dnodes; +if $data(2)[2] != 1 then + return -1 +endi +sql drop database d11; + +print --- case12 +sql create database d12 vgroups 2 dnodes '3,4'; +sql show dnodes; +if $data(3)[2] != 1 then + return -1 +endi +if $data(4)[2] != 1 then + return -1 +endi +sql drop database d12; + +print --- case13 +sql create database d13 vgroups 2 dnodes '5'; +sql show dnodes; +if $data(5)[2] != 2 then + return -1 +endi +sql drop database d13; + +print --- case14 +sql create database d14 vgroups 1 dnodes '1,2,5'; +sql drop database d14; + +print --- case15 +sql create database d15 vgroups 2 dnodes '1,4,3'; +sql drop database d15; + +print --- case16 +sql create database d16 vgroups 3 dnodes '1'; +sql show dnodes; +if $data(1)[2] != 3 then + return -1 +endi +sql drop database d16; + +print --- case17 +sql create database d17 vgroups 3 dnodes '1,4'; +sql drop database d17; + +print --- case18 +sql create database d18 vgroups 3 dnodes '1,2,4'; +sql show dnodes; +if $data(1)[2] != 1 then + return -1 +endi +if $data(2)[2] != 1 then + return -1 +endi +if $data(4)[2] != 1 then + return -1 +endi +sql drop database d18; + +print --- replica 2 + +print --- case20 +sql create database d20 replica 2 vgroups 1 dnodes '1,2'; +sql show dnodes; +if $data(1)[2] != 1 then + return -1 +endi +if $data(2)[2] != 1 then + return -1 +endi +sql drop database d20; + +print --- case21 +sql create database d21 replica 2 vgroups 3 dnodes '1,2,3'; +sql show dnodes; +if $data(1)[2] != 2 then + return -1 +endi +if $data(2)[2] != 2 then + return -1 +endi +if $data(3)[2] != 2 then + return -1 +endi +sql drop database d21; + +print --- case22 +sql create database d22 replica 2 vgroups 2 dnodes '1,2'; +sql show dnodes; +if $data(1)[2] != 2 then + return -1 +endi +if $data(2)[2] != 2 then + return -1 +endi +sql drop database d22; + +print --- replica 3 + +print --- case30 +sql create database d30 replica 3 vgroups 3 dnodes '1,2,3'; +sql show dnodes; +if $data(1)[2] != 3 then + return -1 +endi +if $data(2)[2] != 3 then + return -1 +endi +if $data(3)[2] != 3 then + return -1 +endi +sql_error alter database d30 replica 1 dnodes '1'; +sql drop database d30; + +print --- case31 +sql create database d31 replica 3 vgroups 2 dnodes '1,2,4'; +sql show dnodes; +if $data(1)[2] != 2 then + return -1 +endi +if $data(2)[2] != 2 then + return -1 +endi +if $data(4)[2] != 2 then + return -1 +endi +sql drop database d31; + +print --- case32 +sql create database d32 replica 3 vgroups 4 dnodes '4,2,3,1'; +sql show dnodes; +if $data(1)[2] != 3 then + return -1 +endi +if $data(2)[2] != 3 then + return -1 +endi +if $data(3)[2] != 3 then + return -1 +endi +if $data(4)[2] != 3 then + return -1 +endi +sql drop database d32; + +print --- case33 +sql create database d33 replica 3 vgroups 5 dnodes '4,2,3,1,5'; +sql show dnodes; +if $data(1)[2] != 3 then + return -1 +endi +if $data(2)[2] != 3 then + return -1 +endi +if $data(3)[2] != 3 then + return -1 +endi +if $data(4)[2] != 3 then + return -1 +endi +if $data(5)[2] != 3 then + return -1 +endi +sql drop database d33; + +return + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT diff --git a/tests/script/tsim/insert/commit-merge0.sim b/tests/script/tsim/insert/commit-merge0.sim index da66560cbd9..78dbb44d61e 100644 --- a/tests/script/tsim/insert/commit-merge0.sim +++ b/tests/script/tsim/insert/commit-merge0.sim @@ -4,7 +4,7 @@ system sh/exec.sh -n dnode1 -s start sql connect print =============== create database -sql create database db duration 300 keep 365000d,365000d,365000d +sql create database db duration 120 keep 365000d,365000d,365000d sql select * from information_schema.ins_databases if $rows != 3 then return -1 diff --git a/tests/script/tsim/query/interval-offset.sim b/tests/script/tsim/query/interval-offset.sim index fe3e4c9844b..50b3efdc39e 100644 --- a/tests/script/tsim/query/interval-offset.sim +++ b/tests/script/tsim/query/interval-offset.sim @@ -4,7 +4,7 @@ system sh/exec.sh -n dnode1 -s start sql connect print =============== create database -sql create database d0 duration 300 +sql create database d0 duration 120 sql use d0 print =============== create super table and child table diff --git a/tests/script/tsim/scalar/caseWhen.sim b/tests/script/tsim/scalar/caseWhen.sim index 67c8ac36735..4753e245137 100644 --- a/tests/script/tsim/scalar/caseWhen.sim +++ b/tests/script/tsim/scalar/caseWhen.sim @@ -838,7 +838,7 @@ endi if $data20 != 11 then return -1 endi -if $data30 != 1664176504 then +if $data30 != 1664176504000 then return -1 endi @@ -1130,38 +1130,38 @@ if $data00 != varchar_val then return -1 endi -sql select case when ts > '2022-01-01 00:00:00' then c_varchar else c_geometry end as result from t_test; -if $data00 != varchar_val then - return -1 -endi - -sql select case when ts > '2022-01-01 00:00:00' then c_bool else c_geometry end as result from t_test; -if $data00 != true then +sql select case when 1 then 1234567890987654 else 'abcertyuiojhgfddhjgfcvbn' end; +if $data00 != 1234567890987654 then return -1 endi -sql select case when 0 then tag_id else c_geometry end as result from t_test; -if $data00 != 16842773 then +sql select case when 0 then 1234567890987654 else 'abcertyuiojhgfddhjgfcvbn' end; +if $data00 != abcertyuiojhgfddhjgfcvbn then return -1 endi -sql select case when 0 then tag_id else c_nchar end as result from t_test; +sql select case when 0 then 1234567890987654 else c_nchar end from t_test; if $data00 != 涛思数据 then return -1 endi -sql select case when 0 then tag_id else c_int end as result from t_test; -if $data00 != 123 then +sql select case when 1 then 1234567890987654 else c_nchar end from t_test; +if $data00 != 1234567890987654 then return -1 endi -sql select case when 0 then tag_id else c_float end as result from t_test; -if $data00 != 123.449997 then +sql select case when 1 then c_varchar else c_varbinary end from t_test; +if $data00 != null then return -1 endi +sql_error select case when ts > '2022-01-01 00:00:00' then c_varchar else c_geometry end as result from t_test; +sql_error select case when ts > '2022-01-01 00:00:00' then c_bool else c_geometry end as result from t_test; +sql_error select case when 0 then tag_id else c_geometry end as result from t_test; +sql_error select case when 0 then tag_id else c_nchar end as result from t_test; +sql_error select case when 0 then tag_id else c_int end as result from t_test; +sql_error select case when 0 then tag_id else c_float end as result from t_test; sql_error select case when c_double > 100 then c_varbinary else c_geometry end as result from t_test; sql_error select case when c_bool then c_double else c_varbinary end as result from t_test; -sql_error select case when c_bool then c_varbinary else c_varchar end as result from t_test; system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/scalar/in.sim b/tests/script/tsim/scalar/in.sim index 75e1face88a..a2164675f05 100644 --- a/tests/script/tsim/scalar/in.sim +++ b/tests/script/tsim/scalar/in.sim @@ -35,6 +35,14 @@ if $rows != 3 then return -1 endi +sql explain verbose true select * from tb1 where tts in ('2022-07-10 16:31:01', '2022-07-10 16:31:03', 1657441865000); +if $rows != 3 then + return -1 +endi +if $data20 != @ Time Range: [-9223372036854775808, 9223372036854775807]@ then + return -1 +endi + sql select * from tb1 where fbool in (0, 3); if $rows != 5 then return -1 @@ -80,4 +88,45 @@ if $rows != 0 then return -1 endi +sql explain verbose true select * from tb1 where fts in ('2022-07-10 16:31:00', '2022-07-10 16:33:00', 1657441840000); +if $rows != 4 then + return -1 +endi +if $data20 != @ Time Range: [1657441840000, 1657441980000]@ then + return -1 +endi + +sql explain verbose true select * from tb1 where fts in ('2022-07-10 16:31:00', '2022-07-10 16:33:00', 1657441840000, true); +if $rows != 4 then + return -1 +endi +if $data20 != @ Time Range: [1, 1657441980000]@ then + return -1 +endi + +sql explain verbose true select * from tb1 where fts in ('2022-07-10 16:31:00', '2022-07-10 16:33:00', 1657441840000, false); +if $rows != 4 then + return -1 +endi +if $data20 != @ Time Range: [0, 1657441980000]@ then + return -1 +endi + +sql explain verbose true select * from tb1 where fts in ('2022-07-10 16:31:00', '2022-07-10 16:33:00', 1657441840000, 1.02); +if $rows != 4 then + return -1 +endi +if $data20 != @ Time Range: [1, 1657441980000]@ then + return -1 +endi + +sql explain verbose true select * from tb1 where fts in ('2022-07-10 16:31:00', '2022-07-10 16:33:00', 1657441840000, -1.02); +if $rows != 4 then + return -1 +endi +if $data20 != @ Time Range: [-1, 1657441980000]@ then + return -1 +endi + +sql_error explain verbose true select * from tb1 where fts in ('2022-07-10 16:31:00', '2022-07-10 16:33:00', 1657441840000, 'abc'); system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/sma/drop_sma.sim b/tests/script/tsim/sma/drop_sma.sim index 7121f402fa1..b88fdc8955f 100644 --- a/tests/script/tsim/sma/drop_sma.sim +++ b/tests/script/tsim/sma/drop_sma.sim @@ -126,21 +126,21 @@ sql drop table stb; print ========== step5 sql drop database if exists db; -sql create database db duration 300; +sql create database db duration 120; sql use db; sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint, c_float float, c_double double, c_bool bool, c_binary binary(16), c_nchar nchar(32), c_ts timestamp, c_tint_un tinyint unsigned, c_sint_un smallint unsigned, c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int); sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s; print ========== step6 repeat sql drop database if exists db; -sql create database db duration 300; +sql create database db duration 120; sql use db; sql create table stb1(ts timestamp, c_int int, c_bint bigint ) tags (t_int int); sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s; print ========== step7 sql drop database if exists db; -sql create database db duration 300; +sql create database db duration 120; sql use db; sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int); @@ -160,7 +160,7 @@ sql DROP INDEX sma_index_3 ; print ========== step8 sql drop database if exists db; sleep 2000 -sql create database db duration 300; +sql create database db duration 120; sql use db; sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int); diff --git a/tests/script/tsim/sma/sma_leak.sim b/tests/script/tsim/sma/sma_leak.sim index 4f2d1ebeb0e..14f03541b52 100644 --- a/tests/script/tsim/sma/sma_leak.sim +++ b/tests/script/tsim/sma/sma_leak.sim @@ -98,21 +98,21 @@ sql drop table stb; print ========== step5 sql drop database if exists db; -sql create database db duration 300; +sql create database db duration 120; sql use db; sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint, c_float float, c_double double, c_bool bool, c_binary binary(16), c_nchar nchar(32), c_ts timestamp, c_tint_un tinyint unsigned, c_sint_un smallint unsigned, c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int); sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s; print ========== step6 repeat sql drop database if exists db; -sql create database db duration 300; +sql create database db duration 120; sql use db; sql create table stb1(ts timestamp, c_int int, c_bint bigint ) tags (t_int int); sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s; print ========== step7 sql drop database if exists db; -sql create database db duration 300; +sql create database db duration 120; sql use db; sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int); @@ -131,7 +131,7 @@ sql DROP INDEX sma_index_3 ; print ========== step8 sql drop database if exists db; -sql create database db duration 300; +sql create database db duration 120; sql use db; sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int); diff --git a/tests/script/tsim/stream/basic4.sim b/tests/script/tsim/stream/basic4.sim index 8868c3fd60e..cadce94ef4f 100644 --- a/tests/script/tsim/stream/basic4.sim +++ b/tests/script/tsim/stream/basic4.sim @@ -189,10 +189,10 @@ $loop_count = 0 loop4: -sleep 200 +sleep 500 $loop_count = $loop_count + 1 -if $loop_count == 10 then +if $loop_count == 20 then return -1 endi @@ -324,5 +324,197 @@ if $data[29][1] != 2 then goto loop7 endi +print step4==== + +sql create database test4 vgroups 1; +sql use test4; + +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create table t3 using st tags(2,2,2); +sql create table t4 using st tags(2,2,2); +sql create table t5 using st tags(2,2,2); +sql create table t6 using st tags(2,2,2); + +sql create stream streams4 trigger window_close IGNORE EXPIRED 0 into streamt as select _wstart, count(*), now from st partition by tbname interval(1s); +sql create stream streams5 trigger window_close IGNORE EXPIRED 0 into streamt1 as select _wstart, count(*), now from st partition by b interval(1s); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791211000,1,1,1,1.1) t2 values (1648791211000,2,2,2,2.1) t3 values(1648791211000,3,3,3,3.1) t4 values(1648791211000,4,4,4,4.1) t5 values (1648791211000,5,5,5,5.1) t6 values(1648791211000,6,6,6,6.1); + +sql insert into t1 values(now,1,1,1,1.1) t2 values (now,2,2,2,2.1) t3 values(now,3,3,3,3.1) t4 values(now,4,4,4,4.1) t5 values (now,5,5,5,5.1) t6 values(now,6,6,6,6.1); + + +$loop_count = 0 + +loop8: + +sleep 200 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print sql select * from streamt; +sql select * from streamt; + +if $rows != 6 then + print ======rows=$rows + goto loop8 +endi + +if $data01 != 1 then + print ======data01=$data01 + return -1 +endi + +if $data11 != 1 then + print ======data11=$data11 + return -1 +endi + +if $data21 != 1 then + print ======data21=$data21 + return -1 +endi + +$loop_count = 0 + +loop8_1: + +sleep 200 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print sql select * from streamt1; +sql select * from streamt1; + +if $rows != 6 then + print ======rows=$rows + goto loop8_1 +endi + +if $data01 != 1 then + print ======data01=$data01 + return -1 +endi + +if $data11 != 1 then + print ======data11=$data11 + return -1 +endi + +if $data21 != 1 then + print ======data21=$data21 + return -1 +endi + +sleep 2000 + +sql insert into t1 values(now,1,1,1,1.1) t2 values (now,2,2,2,2.1) t3 values(now,3,3,3,3.1) t4 values(now,4,4,4,4.1) t5 values (now,5,5,5,5.1) t6 values(now,6,6,6,6.1); + +sleep 2000 + +sql insert into t1 values(now,1,1,1,1.1) t2 values (now,2,2,2,2.1) t3 values(now,3,3,3,3.1) t4 values(now,4,4,4,4.1) t5 values (now,5,5,5,5.1) t6 values(now,6,6,6,6.1); + +$loop_count = 0 + +loop8_1: + +sleep 200 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print sql select * from streamt order by 1 desc; +sql select * from streamt order by 1 desc; + +if $data01 != 1 then + print ======data01=$data01 + goto loop8_1 +endi + +print sql select * from streamt1 order by 1 desc; +sql select * from streamt1 order by 1 desc; + +if $data01 != 1 then + print ======data01=$data01 + goto loop8_1 +endi + +sleep 2000 + +sql insert into t1 values(now,1,1,1,1.1) +sql insert into t2 values(now,2,2,2,2.1); +sql insert into t3 values(now,3,3,3,3.1); +sql insert into t4 values(now,4,4,4,4.1); +sql insert into t5 values(now,5,5,5,5.1); +sql insert into t6 values(now,6,6,6,6.1); + +$loop_count = 0 + +loop9: + +sleep 200 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print sql select * from streamt order by 1 desc; +sql select * from streamt order by 1 desc; + +if $data01 != 1 then + print ======data01=$data01 + goto loop9 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop9 +endi + +if $data21 != 1 then + print ======data21=$data21 + goto loop9 +endi + +$loop_count = 0 + +loop10: + +sleep 200 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print sql select * from streamt1 order by 1 desc; +sql select * from streamt1 order by 1 desc; + +if $data01 != 1 then + print ======data01=$data01 + goto loop10 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop10 +endi + +if $data21 != 1 then + print ======data21=$data21 + goto loop10 +endi system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/stream/checkTaskStatus.sim b/tests/script/tsim/stream/checkTaskStatus.sim index 8b30d27841e..0e171b3059f 100644 --- a/tests/script/tsim/stream/checkTaskStatus.sim +++ b/tests/script/tsim/stream/checkTaskStatus.sim @@ -9,6 +9,7 @@ sleep 1000 $loop_count = $loop_count + 1 if $loop_count == 60 then + print check task status failed return 1 endi diff --git a/tests/script/tsim/stream/checkpointSession1.sim b/tests/script/tsim/stream/checkpointSession1.sim index 0427585122c..5ad7ef0b961 100644 --- a/tests/script/tsim/stream/checkpointSession1.sim +++ b/tests/script/tsim/stream/checkpointSession1.sim @@ -62,6 +62,8 @@ system sh/exec.sh -n dnode1 -s start sleep 2000 +run tsim/stream/checkTaskStatus.sim + sql insert into t1 values(1648791213002,3,2,3,1.1); sql insert into t2 values(1648791233003,4,2,3,1.1); diff --git a/tests/script/tsim/stream/concurrentcheckpt.sim b/tests/script/tsim/stream/concurrentcheckpt.sim new file mode 100644 index 00000000000..4162617debc --- /dev/null +++ b/tests/script/tsim/stream/concurrentcheckpt.sim @@ -0,0 +1,79 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c supportVnodes -v 1 + +print ========== step1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database abc1 vgroups 1; +sql use abc1; +sql create table st1(ts timestamp, k int) tags(a int); +sql create table t1 using st1 tags(1); +sql create table t2 using st1 tags(2); +sql insert into t1 values(now, 1); + +sql create stream str1 trigger at_once into str_dst1 as select count(*) from st1 interval(30s); +sql create stream str2 trigger at_once into str_dst2 as select count(*) from st1 interval(30s); +sql create stream str3 trigger at_once into str_dst3 as select count(*) from st1 interval(30s); + +print ============== create 3 streams, check the concurrently checkpoint +sleep 180000 + +sql select task_id, checkpoint_id from information_schema.ins_stream_tasks order by checkpoint_id; + +print $data01 $data11 $data21 +if $data01 == $data11 then + print not allowed 2 checkpoint start completed + return -1 +endi + +if $data11 == $data21 then + print not allowed 2 checkpoints start concurrently + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT + +print ========== concurrent checkpoint is set 2 + +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c concurrentCheckpoint -v 2 + +system sh/exec.sh -n dnode1 -s start + +print ========== step2 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database abc1 vgroups 1; +sql use abc1; +sql create table st1(ts timestamp, k int) tags(a int); +sql create table t1 using st1 tags(1); +sql create table t2 using st1 tags(2); +sql insert into t1 values(now, 1); + +sql create stream str1 trigger at_once into str_dst1 as select count(*) from st1 interval(30s); +sql create stream str2 trigger at_once into str_dst2 as select count(*) from st1 interval(30s); +sql create stream str3 trigger at_once into str_dst3 as select count(*) from st1 interval(30s); + +print ============== create 3 streams, check the concurrently checkpoint +sleep 180000 + +sql select count(*) a, checkpoint_id from information_schema.ins_stream_tasks group by checkpoint_id order by a; +print $data00 $data01 +print $data10 $data11 + +if $data00 != 1 then + print expect 1, actual $data00 + return -1 +endi + +if $data10 != 2 then + print expect 2, actual $data10 + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/forcewindowclose.sim b/tests/script/tsim/stream/forcewindowclose.sim new file mode 100644 index 00000000000..77def52b3cb --- /dev/null +++ b/tests/script/tsim/stream/forcewindowclose.sim @@ -0,0 +1,50 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 2; +sql select * from information_schema.ins_databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test +sql create stable st(ts timestamp, a int) tags(t int); +sql create table tu1 using st tags(1); + +sql create stream stream1 trigger force_window_close into str_dst as select _wstart, count(*) from st partition by tbname interval(5s); + +run tsim/stream/checkTaskStatus.sim + +sql insert into tu1 values(now, 1); +sleep 5500 + +sql pause stream stream1 + +$loop_count = 0 + +loop0: +sleep 500 +$loop_count = $loop_count + 1 +if $loop_count == 20 then + goto end_loop +endi + +sql insert into tu1 values(now, 1); +goto loop0 + +end_loop: + +sql resume stream stream1 +sql select * from str_dst + +if $rows != 3 then + print expect 3, actual: $rows +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/snodeCheck.sim b/tests/script/tsim/stream/snodeCheck.sim new file mode 100644 index 00000000000..f4ab8c81249 --- /dev/null +++ b/tests/script/tsim/stream/snodeCheck.sim @@ -0,0 +1,64 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 + +system sh/cfg.sh -n dnode1 -c supportVnodes -v 4 +system sh/cfg.sh -n dnode2 -c supportVnodes -v 4 +system sh/cfg.sh -n dnode3 -c supportVnodes -v 4 + +print ========== step1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print ========== step2 +sql create dnode $hostname port 7200 +system sh/exec.sh -n dnode2 -s start + +sql create dnode $hostname port 7300 +system sh/exec.sh -n dnode3 -s start + +$x = 0 +step2: + $x = $x + 1 + sleep 1000 + if $x == 10 then + print ====> dnode not ready! + return -1 + endi +sql select * from information_schema.ins_dnodes +print ===> $data00 $data01 $data02 $data03 $data04 $data05 +print ===> $data10 $data11 $data12 $data13 $data14 $data15 +if $rows != 3 then + return -1 +endi +if $data(1)[4] != ready then + goto step2 +endi +if $data(2)[4] != ready then + goto step2 +endi + +print ========== step3 +sql drop database if exists test; +sql create database if not exists test vgroups 4 replica 3 precision "ms" ; +sql use test; + +sql create table test.test (ts timestamp, c1 int) tags (t1 int) ; + +print create stream without snode existing +sql_error create stream stream_t1 trigger at_once into str_dst as select count(*) from test interval(20s); + +print create snode +sql create snode on dnode 1; + +sql create stream stream_t1 trigger at_once into str_dst as select count(*) from test interval(20s); + +print drop snode and then create stream +sql drop snode on dnode 1; + +sql_error create stream stream_t2 trigger at_once into str_dst as select count(*) from test interval(20s); + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamFwcIntervalFill.sim b/tests/script/tsim/stream/streamFwcIntervalFill.sim new file mode 100644 index 00000000000..27859974283 --- /dev/null +++ b/tests/script/tsim/stream/streamFwcIntervalFill.sim @@ -0,0 +1,235 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 4; +sql use test; + +sql create stable st(ts timestamp, a int, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _wstart, count(a) as ca, now, ta, sum(b) as cb, timezone() from st partition by tbname,ta interval(2s) fill(value, 100, 200); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now + 3000a,1,1,1) (now + 3100a,5,10,10) (now + 3200a,5,10,10) (now + 5100a,20,1,1) (now + 5200a,30,10,10) (now + 5300a,40,10,10); +sql insert into t2 values(now + 3000a,1,1,1) (now + 3100a,2,10,10) (now + 3200a,30,10,10) (now + 5100a,10,1,1) (now + 5200a,40,10,10) (now + 5300a,7,10,10); + + +print sql select _wstart, count(a) as ca, now, ta, sum(b) as cb, timezone() from t1 partition by tbname,ta interval(2s) +sql select _wstart, count(a) as ca, now, ta, sum(b) as cb, timezone() from t1 partition by tbname,ta interval(2s); + +$query1_data01 = $data01 +$query1_data11 = $data11 + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select _wstart, count(a) as ca, now, ta, sum(b) as cb, timezone() from t2 partition by tbname,ta interval(2s); +sql select _wstart, count(a) as ca, now, ta, sum(b) as cb, timezone() from t2 partition by tbname,ta interval(2s); + +$query2_data01 = $data01 +$query2_data11 = $data11 + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop0: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 1 order by 1; +sql select * from streamt where ta == 1 order by 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop0 +endi + +if $data01 != $query1_data01 then + print ======data01========$data01 + print ======query1_data01=$query1_data01 + return -1 +endi + +if $data11 != $query1_data11 then + print ======data11========$data11 + print ======query1_data11=$query1_data11 + goto loop0 +endi + +$loop_count = 0 +loop1: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 2 order by 1; +sql select * from streamt where ta == 2 order by 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop1 +endi + +if $data01 != $query2_data01 then + print ======data01======$data01 + print ====query2_data01=$query2_data01 + return -1 +endi + +if $data11 != $query2_data11 then + print ======data11======$data11 + print ====query2_data11=$query2_data11 + goto loop1 +endi + +$loop_count = 0 +loop2: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +if $rows < 6 then + print ======rows=$rows + goto loop2 +endi + + +print step2 +print =============== create database +sql create database test2 vgroups 4; +sql use test2; + +sql create stable st(ts timestamp, a int, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams2 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _wstart, count(*), ta from st partition by tbname,ta interval(2s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now + 3000a,1,1,1) (now + 3100a,3,10,10) (now + 3200a,5,10,10) (now + 5100a,20,1,1) (now + 5200a,30,10,10) (now + 5300a,40,10,10); +sql insert into t2 values(now + 3000a,1,1,1) (now + 3100a,3,10,10) (now + 3200a,5,10,10) (now + 5100a,10,1,1) (now + 5200a,40,10,10) (now + 5300a,7,10,10); + + +print sql select _wstart, count(*) from t1 interval(2s) order by 1; +sql select _wstart, count(*) from t1 interval(2s) order by 1; + +$query1_data01 = $data01 +$query1_data11 = $data11 + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop3: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 1 order by 1; +sql select * from streamt where ta == 1 order by 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $data01 != $query1_data01 then + print ======data01======$data01 + print ====query1_data01=$query1_data01 + goto loop3 +endi + + +sql insert into t1 values(now + 3000a,1,1,1) (now + 3100a,3,10,10) (now + 3200a,5,10,10) (now + 5100a,20,1,1) (now + 5200a,30,10,10) (now + 5300a,40,10,10); +sql insert into t2 values(now + 3000a,1,1,1) (now + 3100a,3,10,10) (now + 3200a,5,10,10) (now + 5100a,10,1,1) (now + 5200a,40,10,10) (now + 5300a,7,10,10); + +$loop_count = 0 +loop4: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print ======step2=rows=$rows +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows < 10 then + print ======rows=$rows + goto loop4 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpDelete0.sim b/tests/script/tsim/stream/streamInterpDelete0.sim new file mode 100644 index 00000000000..21bac13e4a7 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpDelete0.sim @@ -0,0 +1,507 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212001,1,1,1,1.0) (1648791214000,8,1,1,1.0) (1648791215000,10,1,1,1.0) (1648791215009,15,1,1,1.0) (1648791217001,4,1,1,1.0); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop0 +endi + +if $data11 != 8 then + print ======data11=$data11 + goto loop0 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop0 +endi + +if $data31 != 15 then + print ======data31=$data31 + goto loop0 +endi + +if $data41 != 15 then + print ======data41=$data41 + goto loop0 +endi + +print 1 sql delete from t1 where ts >= 1648791215000 and ts <= 1648791216000; +sql delete from t1 where ts >= 1648791215000 and ts <= 1648791216000; + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop1 +endi + +if $data11 != 8 then + print ======data11=$data11 + goto loop1 +endi + +if $data21 != 8 then + print ======data21=$data21 + goto loop1 +endi + +if $data31 != 8 then + print ======data31=$data31 + goto loop1 +endi + +if $data41 != 8 then + print ======data41=$data41 + goto loop1 +endi + + +print 2 sql delete from t1 where ts >= 1648791212000 and ts <= 1648791213000; +sql delete from t1 where ts >= 1648791212000 and ts <= 1648791213000; + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 4 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 8 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 8 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 8 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 8 then + print ======data31=$data31 + goto loop2 +endi + +print 3 sql delete from t1 where ts >= 1648791217000 and ts <= 1648791218000; +sql delete from t1 where ts >= 1648791217000 and ts <= 1648791218000 + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 8 then + print ======data01=$data01 + goto loop3 +endi + + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791212001,1,1,1,1.0) (1648791214000,8,1,1,1.0) (1648791215000,10,1,1,1.0) (1648791215009,15,1,1,1.0) (1648791217001,4,1,1,1.0); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 8 then + print ======data01=$data01 + goto loop4 +endi + +if $data11 != 8 then + print ======data11=$data11 + goto loop4 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop4 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop4 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop4 +endi + +print 1 sql delete from t1 where ts >= 1648791215000 and ts <= 1648791216000; +sql delete from t1 where ts >= 1648791215000 and ts <= 1648791216000; + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop5 +endi + +# row 0 +if $data01 != 8 then + print ======data01=$data01 + goto loop5 +endi + +if $data11 != 8 then + print ======data11=$data11 + goto loop5 +endi + +if $data21 != 4 then + print ======data21=$data21 + goto loop5 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop5 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop5 +endi + + +print 2 sql delete from t1 where ts >= 1648791212000 and ts <= 1648791213000; +sql delete from t1 where ts >= 1648791212000 and ts <= 1648791213000; + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 4 then + print ======rows=$rows + goto loop6 +endi + +# row 0 +if $data01 != 8 then + print ======data01=$data01 + goto loop6 +endi + +if $data11 != 4 then + print ======data11=$data11 + goto loop6 +endi + +if $data21 != 4 then + print ======data21=$data21 + goto loop6 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop6 +endi + +print 3 sql delete from t1 where ts >= 1648791217000 and ts <= 1648791218000; +sql delete from t1 where ts >= 1648791217000 and ts <= 1648791218000 + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop7: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop7 +endi + +# row 0 +if $data01 != 8 then + print ======data01=$data01 + goto loop7 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpDelete1.sim b/tests/script/tsim/stream/streamInterpDelete1.sim new file mode 100644 index 00000000000..162da175e8b --- /dev/null +++ b/tests/script/tsim/stream/streamInterpDelete1.sim @@ -0,0 +1,508 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212001,1,1,1,1.0) (1648791214000,8,1,1,1.0) (1648791215000,10,1,1,1.0) (1648791215009,15,1,1,1.0) (1648791217001,4,1,1,1.0); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != NULL then + print ======data01=$data01 + goto loop0 +endi + +if $data11 != 8 then + print ======data11=$data11 + goto loop0 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop0 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop0 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop0 +endi + +print 1 sql delete from t1 where ts >= 1648791215000 and ts <= 1648791216000; +sql delete from t1 where ts >= 1648791215000 and ts <= 1648791216000; + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != NULL then + print ======data01=$data01 + goto loop1 +endi + +if $data11 != 8 then + print ======data11=$data11 + goto loop1 +endi + +if $data21 != NULL then + print ======data21=$data21 + goto loop1 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop1 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop1 +endi + + +print 2 sql delete from t1 where ts >= 1648791212000 and ts <= 1648791213000; +sql delete from t1 where ts >= 1648791212000 and ts <= 1648791213000; + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 4 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 8 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != NULL then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop2 +endi + +print 3 sql delete from t1 where ts >= 1648791217000 and ts <= 1648791218000; +sql delete from t1 where ts >= 1648791217000 and ts <= 1648791218000 + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 8 then + print ======data01=$data01 + goto loop3 +endi + + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(value,100,200,300,400); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791212001,1,1,1,1.0) (1648791214000,8,1,1,1.0) (1648791215000,10,1,1,1.0) (1648791215009,15,1,1,1.0) (1648791217001,4,1,1,1.0); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100,200,300,400); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100,200,300,400); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 100 then + print ======data01=$data01 + goto loop4 +endi + +if $data11 != 8 then + print ======data11=$data11 + goto loop4 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop4 +endi + +if $data31 != 100 then + print ======data31=$data31 + goto loop4 +endi + +if $data41 != 100 then + print ======data41=$data41 + goto loop4 +endi + +print 1 sql delete from t1 where ts >= 1648791215000 and ts <= 1648791216000; +sql delete from t1 where ts >= 1648791215000 and ts <= 1648791216000; + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100,200,300,400); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100,200,300,400); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop5 +endi + +# row 0 +if $data01 != 100 then + print ======data01=$data01 + goto loop5 +endi + +if $data11 != 8 then + print ======data11=$data11 + goto loop5 +endi + +if $data21 != 100 then + print ======data21=$data21 + goto loop5 +endi + +if $data31 != 100 then + print ======data31=$data31 + goto loop5 +endi + +if $data41 != 100 then + print ======data41=$data41 + goto loop5 +endi + + +print 2 sql delete from t1 where ts >= 1648791212000 and ts <= 1648791213000; +sql delete from t1 where ts >= 1648791212000 and ts <= 1648791213000; + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100,200,300,400); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100,200,300,400); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 4 then + print ======rows=$rows + goto loop6 +endi + +# row 0 +if $data01 != 8 then + print ======data01=$data01 + goto loop6 +endi + +if $data11 != 100 then + print ======data11=$data11 + goto loop6 +endi + +if $data21 != 100 then + print ======data21=$data21 + goto loop6 +endi + +if $data31 != 100 then + print ======data31=$data31 + goto loop6 +endi + +print 3 sql delete from t1 where ts >= 1648791217000 and ts <= 1648791218000; +sql delete from t1 where ts >= 1648791217000 and ts <= 1648791218000 + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100,200,300,400); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100,200,300,400); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop7: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop7 +endi + +# row 0 +if $data01 != 8 then + print ======data01=$data01 + goto loop7 +endi + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpDelete2.sim b/tests/script/tsim/stream/streamInterpDelete2.sim new file mode 100644 index 00000000000..be27dcda492 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpDelete2.sim @@ -0,0 +1,258 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212001,1,1,1,1.0) (1648791214000,8,1,1,1.0) (1648791215000,10,1,1,1.0) (1648791215009,15,1,1,1.0) (1648791217001,4,1,1,1.0); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linera); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 4 then + print ======data01=$data01 + goto loop0 +endi + +if $data11 != 8 then + print ======data11=$data11 + goto loop0 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop0 +endi + +if $data31 != 9 then + print ======data31=$data31 + goto loop0 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop0 +endi + +print 1 sql delete from t1 where ts >= 1648791215000 and ts <= 1648791216000; +sql delete from t1 where ts >= 1648791215000 and ts <= 1648791216000; + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 4 then + print ======data01=$data01 + goto loop1 +endi + +if $data11 != 8 then + print ======data11=$data11 + goto loop1 +endi + +if $data21 != 6 then + print ======data21=$data21 + goto loop1 +endi + +if $data31 != 5 then + print ======data31=$data31 + goto loop1 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop1 +endi + + +print 2 sql delete from t1 where ts >= 1648791212000 and ts <= 1648791213000; +sql delete from t1 where ts >= 1648791212000 and ts <= 1648791213000; + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 4 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 8 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 6 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 5 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop2 +endi + +print 3 sql delete from t1 where ts >= 1648791217000 and ts <= 1648791218000; +sql delete from t1 where ts >= 1648791217000 and ts <= 1648791218000 + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 8 then + print ======data01=$data01 + goto loop3 +endi + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpError.sim b/tests/script/tsim/stream/streamInterpError.sim new file mode 100644 index 00000000000..53a92df772e --- /dev/null +++ b/tests/script/tsim/stream/streamInterpError.sim @@ -0,0 +1,117 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step2 + +sql create database test2 vgroups 1; +sql use test2; + +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +print step2_0 + +sql create stream streams2_0_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_0_1 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(prev); +sql create stream streams2_0_2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_0_2 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(next); +sql create stream streams2_0_3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_0_3 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(linear); +sql create stream streams2_0_4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_0_4 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(NULL); +sql create stream streams2_0_5 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_0_5 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(value,11,22,33,44); + +print step2_1 + +sql_error create stream streams2_1_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_1_1 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 range(1648791212000, 1648791215000) every(1s) fill(prev); +sql_error create stream streams2_1_2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_1_2 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 range(1648791212000, 1648791215000) every(1s) fill(next); +sql_error create stream streams2_1_3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_1_3 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 range(1648791212000, 1648791215000) every(1s) fill(linear); +sql_error create stream streams2_1_4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_1_4 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 range(1648791212000, 1648791215000) every(1s) fill(NULL); +sql_error create stream streams2_1_5 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_1_5 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 range(1648791212000, 1648791215000) every(1s) fill(value,11,22,33,44); + +print step2_2 + +sql_error create stream streams2_2_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_2_1 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from st every(1s) fill(prev); +sql_error create stream streams2_2_2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_2_2 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from st every(1s) fill(next); +sql_error create stream streams2_2_3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_2_3 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from st every(1s) fill(linear); +sql_error create stream streams2_2_4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_2_4 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from st every(1s) fill(NULL); +sql_error create stream streams2_2_5 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_2_5 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from st every(1s) fill(value,11,22,33,44); + +print step2_3 + +sql_error create stream streams2_3_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_3_1 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from st partition by a every(1s) fill(prev); +sql_error create stream streams2_3_2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_3_2 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from st partition by a every(1s) fill(next); +sql_error create stream streams2_3_3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_3_3 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from st partition by a every(1s) fill(linear); +sql_error create stream streams2_3_4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_3_4 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from st partition by a every(1s) fill(NULL); +sql_error create stream streams2_3_5 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_3_5 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from st partition by a every(1s) fill(value,11,22,33,44); + +print step2_4 + +sql_error create stream streams2_4_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_4_1 as select INTERP(a) FROM t1 RANGE('2023-01-01 00:00:00') fill(prev); +sql_error create stream streams2_4_2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_4_2 as select INTERP(a) FROM t1 RANGE('2023-01-01 00:00:00') fill(next); +sql_error create stream streams2_4_3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_4_3 as select INTERP(a) FROM t1 RANGE('2023-01-01 00:00:00') fill(linear); +sql_error create stream streams2_4_4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_4_4 as select INTERP(a) FROM t1 RANGE('2023-01-01 00:00:00') fill(NULL); +sql_error create stream streams2_4_5 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_4_5 as select INTERP(a) FROM t1 RANGE('2023-01-01 00:00:00') fill(value,11,22,33,44); + +print step2_5 + +sql_error create stream streams2_5_1 trigger window_close IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_5_1 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(prev); +sql_error create stream streams2_5_2 trigger window_close IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_5_2 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(next); +sql_error create stream streams2_5_3 trigger window_close IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_5_3 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(linear); +sql_error create stream streams2_5_4 trigger window_close IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_5_4 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(NULL); +sql_error create stream streams2_5_5 trigger window_close IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_5_5 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(value,11,22,33,44); + +run tsim/stream/checkTaskStatus.sim + +print step2_6 + +sql create stream streams2_6_1 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_6_1 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(prev); +sql create stream streams2_6_2 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_6_2 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(next); +sql create stream streams2_6_3 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_6_3 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(linear); +sql create stream streams2_6_4 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_6_4 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(NULL); +sql create stream streams2_6_5 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_6_5 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(value,11,22,33,44); + +sql_error create stream streams2_6_6 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_6 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(prev); +sql_error create stream streams2_6_7 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_7 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(next); +sql_error create stream streams2_6_8 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_8 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(linear); +sql_error create stream streams2_6_9 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_9 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(NULL); +sql_error create stream streams2_6_10 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_10 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(value,11,22,33,44); + +run tsim/stream/checkTaskStatus.sim + +print step3 + +sql create database test3 vgroups 1; +sql use test3; + +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +print step3_0 + +sql create stream streams3_0_1 trigger force_window_close FILL_HISTORY 0 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt3_0_1 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(prev); + +sleep 5000 + +sql_error create stream streams3_0_2 trigger force_window_close FILL_HISTORY 0 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt3_0_2 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(next); +sql_error create stream streams3_0_3 trigger force_window_close FILL_HISTORY 0 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt3_0_3 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(linear); +sql create stream streams3_0_4 trigger force_window_close FILL_HISTORY 0 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt3_0_4 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(NULL); + +sleep 5000 + +sql create stream streams3_0_5 trigger force_window_close FILL_HISTORY 0 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt3_0_5 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(value,11,22,33,44); + + +run tsim/stream/checkTaskStatus.sim + +print step4 + +sql_error create stream streams4_1 trigger max_delay 1s FILL_HISTORY 0 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4_1 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(prev); +sql_error create stream streams4_2 trigger max_delay 1s FILL_HISTORY 0 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4_2 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(next); +sql_error create stream streams4_3 trigger max_delay 1s FILL_HISTORY 0 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4_3 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(linear); +sql_error create stream streams4_4 trigger max_delay 1s FILL_HISTORY 0 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4_4 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(NULL); +sql_error create stream streams4_5 trigger max_delay 1s FILL_HISTORY 0 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4_5 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(value,11,22,33,44); + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpForceWindowClose.sim b/tests/script/tsim/stream/streamInterpForceWindowClose.sim new file mode 100644 index 00000000000..e96866b3e0d --- /dev/null +++ b/tests/script/tsim/stream/streamInterpForceWindowClose.sim @@ -0,0 +1,235 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _irowts, interp(a) as a, interp(b) as b, now from t1 every(2s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(now,1,1,1,1.1) (now + 10s,2,2,2,2.1) (now + 20s,3,3,3,3.1); + +print sql select * from t1; +sql select * from t1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop0: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where a == 1; +sql select * from streamt where a == 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop0 +endi + +$loop_count = 0 +loop1: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where a == 2; +sql select * from streamt where a == 2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop1 +endi + +$loop_count = 0 +loop2: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where a == 3; +sql select * from streamt where a == 3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop2 +endi + +sleep 4000 + +$loop_count = 0 +loop3: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where a == 3; +sql select * from streamt where a == 3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 5 then + print ======rows=$rows + goto loop3 +endi + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _irowts, interp(a) as a, interp(b) as b, now from t1 every(2s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(now,1,1,1,1.1) (now + 10s,2,2,2,2.1) (now + 20s,3,3,3,3.1); + +print sql select * from t1; +sql select * from t1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop4: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where a is null; +sql select * from streamt where a is null; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows < 5 then + print ======rows=$rows + goto loop4 +endi + +print step3 +print =============== create database +sql create database test3 vgroups 1; +sql use test3; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams3 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _irowts, interp(a) as a, interp(b) as b, now from t1 every(2s) fill(value,100,200); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(now,1,1,1,1.1) (now + 10s,2,2,2,2.1) (now + 20s,3,3,3,3.1); + +print sql select * from t1; +sql select * from t1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop5: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where a == 100; +sql select * from streamt where a == 100; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows < 5 then + print ======rows=$rows + goto loop5 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpForceWindowClose1.sim b/tests/script/tsim/stream/streamInterpForceWindowClose1.sim new file mode 100644 index 00000000000..e870e407f99 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpForceWindowClose1.sim @@ -0,0 +1,471 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step prev +print =============== create database +sql create database test vgroups 3; +sql use test; +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); + +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create table t3 using st tags(2,2,2); + +sql create stream streams1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _irowts, interp(a) as a, _isfilled, tbname, b, c from st partition by tbname, b,c every(5s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now,1,1,1,1.0) (now + 10s,2,1,1,2.0)(now + 20s,3,1,1,3.0) +sql insert into t2 values(now,21,1,1,1.0) (now + 10s,22,1,1,2.0)(now + 20s,23,1,1,3.0) +sql insert into t3 values(now,31,1,1,1.0) (now + 10s,32,1,1,2.0)(now + 20s,33,1,1,3.0) + +print sql select * from t1; +sql select * from t1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select * from t2; +sql select * from t2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select * from t3; +sql select * from t3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop0: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where a == 1; +sql select * from streamt where a == 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop0 +endi + +print 2 sql select * from streamt where a == 21; +sql select * from streamt where a == 21; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop0 +endi + +$loop_count = 0 +loop1: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where a == 31; +sql select * from streamt where a == 31; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop0 +endi + +$loop_count = 0 +loop1: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print sql select * from streamt where a == 2; +sql select * from streamt where a == 2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop1 +endi + +print 3 sql select * from streamt where a == 22; +sql select * from streamt where a == 22; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop1 +endi + +print 3 sql select * from streamt where a == 32; +sql select * from streamt where a == 32; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop1 +endi + +$loop_count = 0 +loop2: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 4 sql select * from streamt where a == 3; +sql select * from streamt where a == 3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop2 +endi + +print 4 sql select * from streamt where a == 23; +sql select * from streamt where a == 23; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop2 +endi + +print 4 sql select * from streamt where a == 33; +sql select * from streamt where a == 33; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop2 +endi + +sleep 4000 + +$loop_count = 0 +loop3: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 5 sql select * from streamt where a == 3; +sql select * from streamt where a == 3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 5 then + print ======rows=$rows + goto loop3 +endi + +print 5 sql select * from streamt where a == 23; +sql select * from streamt where a == 23; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 5 then + print ======rows=$rows + goto loop3 +endi + +print 5 sql select * from streamt where a == 33; +sql select * from streamt where a == 33; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 5 then + print ======rows=$rows + goto loop3 +endi + +print 2 sql select * from streamt where a == 3; +sql select * from streamt where a == 3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 5 then + print ======rows=$rows + goto loop3 +endi + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); + +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create table t3 using st tags(2,2,2); + +sql create stream streams2 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _irowts, interp(a) as a, _isfilled, tbname, b, c from st partition by tbname, b,c every(2s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now,1,1,1,1.0) (now + 10s,2,1,1,2.0)(now + 20s,3,1,1,3.0) +sql insert into t2 values(now,21,1,1,1.0) (now + 10s,22,1,1,2.0)(now + 20s,23,1,1,3.0) +sql insert into t3 values(now,31,1,1,1.0) (now + 10s,32,1,1,2.0)(now + 20s,33,1,1,3.0) + +print sql select * from t1; +sql select * from t1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select * from t2; +sql select * from t2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select * from t3; +sql select * from t3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop4: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where a is null; +sql select * from streamt where a is null; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 5 then + print ======rows=$rows + goto loop4 +endi + +print step3 +print =============== create database +sql create database test3 vgroups 1; +sql use test3; + +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); + +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create table t3 using st tags(2,2,2); + +sql create stream streams3 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _irowts, interp(a) as a, _isfilled, tbname, b, c from st partition by tbname, b,c every(2s) fill(value,100); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(now,1,1,1,1.0) (now + 10s,2,1,1,2.0)(now + 20s,3,1,1,3.0) +sql insert into t2 values(now,21,1,1,1.0) (now + 10s,22,1,1,2.0)(now + 20s,23,1,1,3.0) +sql insert into t3 values(now,31,1,1,1.0) (now + 10s,32,1,1,2.0)(now + 20s,33,1,1,3.0) + +print sql select * from t1; +sql select * from t1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select * from t2; +sql select * from t2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select * from t3; +sql select * from t3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop5: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where a == 100; +sql select * from streamt where a == 100; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 10 then + print ======rows=$rows + goto loop5 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpFwcError.sim b/tests/script/tsim/stream/streamInterpFwcError.sim new file mode 100644 index 00000000000..a53a6fe1899 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpFwcError.sim @@ -0,0 +1,31 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step2 + +sql create database test2 vgroups 1; +sql use test2; + +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams1 trigger force_window_close into streamt1 as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + +sql_error create stream streams2 trigger force_window_close IGNORE EXPIRED 0 into streamt2 as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(prev); +sql_error create stream streams3 trigger force_window_close IGNORE UPDATE 0 into streamt3 as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(prev); + + +sql create stream streams4 trigger force_window_close IGNORE EXPIRED 1 into streamt4 as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(prev); +run tsim/stream/checkTaskStatus.sim + +sql create stream streams5 trigger force_window_close IGNORE UPDATE 1 into streamt5 as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(prev); +run tsim/stream/checkTaskStatus.sim + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpHistory.sim b/tests/script/tsim/stream/streamInterpHistory.sim new file mode 100644 index 00000000000..b9685ebf05b --- /dev/null +++ b/tests/script/tsim/stream/streamInterpHistory.sim @@ -0,0 +1,655 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql insert into t1 values(1648791212000,1,1,1,1.0); +sql insert into t1 values(1648791215001,2,1,1,1.0); + +sql insert into t2 values(1648791212000,31,1,1,1.0); +sql insert into t2 values(1648791216001,41,1,1,1.0); + +sql create stream streams1 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(prev); + + +sql insert into t1 values(1648791217000,5,1,1,1.0); +sql insert into t2 values(1648791217000,61,1,1,1.0); + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(prev) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(prev) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where a2 <= 10 order by 1; +sql select * from streamt where a2 < 10 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data02 != 1 then + print ======data02=$data02 + goto loop0 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop0 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop0 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop0 +endi + +if $data42 != 2 then + print ======data42=$data42 + goto loop0 +endi + +if $data52 != 5 then + print ======data52=$data52 + goto loop0 +endi + + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(prev) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(prev) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 + +loop0_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where a2 > 10 order by 1; +sql select * from streamt where a2 > 10 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop0_1 +endi + +if $data02 != 31 then + print ======data02=$data02 + goto loop0_1 +endi + +if $data12 != 31 then + print ======data12=$data12 + goto loop0_1 +endi + +if $data22 != 31 then + print ======data22=$data22 + goto loop0_1 +endi + +if $data32 != 31 then + print ======data32=$data32 + goto loop0_1 +endi + +if $data42 != 31 then + print ======data42=$data42 + goto loop0_1 +endi + +if $data52 != 61 then + print ======data52=$data52 + goto loop0_1 +endi + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791219001,7,1,1,1.0); +sql insert into t2 values(1648791219001,81,1,1,1.0); + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(prev) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(prev) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where a2 <= 10 order by 1; +sql select * from streamt where a2 < 10 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +if $rows != 8 then + print ======rows=$rows + goto loop1 +endi + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(prev) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(prev) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +$loop_count = 0 + +loop1_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where a2 > 10 order by 1; +sql select * from streamt where a2 > 10 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +if $rows != 8 then + print ======rows=$rows + goto loop1_1 +endi + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql insert into t1 values(1648791212000,1,1,1,1.0); +sql insert into t1 values(1648791215001,2,1,1,1.0); + +sql insert into t2 values(1648791212000,31,1,1,1.0); +sql insert into t2 values(1648791216001,41,1,1,1.0); + +sql create stream streams2 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(next); + + +sql insert into t1 values(1648791217000,5,1,1,1.0); +sql insert into t2 values(1648791217000,61,1,1,1.0); + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(next) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(next) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +$loop_count = 0 + +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where a2 <= 10 order by 1; +sql select * from streamt where a2 < 10 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data02 != 1 then + print ======data02=$data02 + goto loop2 +endi + +if $data12 != 2 then + print ======data12=$data12 + goto loop2 +endi + +if $data22 != 2 then + print ======data22=$data22 + goto loop2 +endi + +if $data32 != 2 then + print ======data32=$data32 + goto loop2 +endi + +if $data42 != 5 then + print ======data42=$data42 + goto loop2 +endi + +if $data52 != 5 then + print ======data52=$data52 + goto loop2 +endi + + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(next) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(next) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 + +loop2_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where a2 > 10 order by 1; +sql select * from streamt where a2 > 10 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop2_1 +endi + +if $data02 != 31 then + print ======data02=$data02 + goto loop2_1 +endi + +if $data12 != 41 then + print ======data12=$data12 + goto loop2_1 +endi + +if $data22 != 41 then + print ======data22=$data22 + goto loop2_1 +endi + +if $data32 != 41 then + print ======data32=$data32 + goto loop2_1 +endi + +if $data42 != 41 then + print ======data42=$data42 + goto loop2_1 +endi + +if $data52 != 61 then + print ======data52=$data52 + goto loop2_1 +endi + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791219001,7,1,1,1.0); +sql insert into t2 values(1648791219001,81,1,1,1.0); + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(next) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(next) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where a2 <= 10 order by 1; +sql select * from streamt where a2 < 10 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +if $rows != 8 then + print ======rows=$rows + goto loop3 +endi + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(next) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(next) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +$loop_count = 0 + +loop3_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where a2 > 10 order by 1; +sql select * from streamt where a2 > 10 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +if $rows != 8 then + print ======rows=$rows + goto loop3_1 +endi + +print step3 + +print =============== create database +sql create database test3 vgroups 1; +sql use test3; + +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql insert into t1 values(1648791212000,1,1,1,1.0); +sql insert into t1 values(1648791215001,2,1,1,1.0); + +sql insert into t2 values(1648791212000,31,1,1,1.0); +sql insert into t2 values(1648791216001,41,1,1,1.0); + +sql create stream streams3 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,5,1,1,1.0); +sql insert into t2 values(1648791217000,61,1,1,1.0); + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(prev) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(prev) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +$loop_count = 0 + +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where a2 <= 10 order by 1; +sql select * from streamt where a2 < 10 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data02 != 1 then + print ======data02=$data02 + goto loop4 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop4 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop4 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop4 +endi + +if $data42 != 2 then + print ======data42=$data42 + goto loop4 +endi + +if $data52 != 5 then + print ======data52=$data52 + goto loop4 +endi + + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(prev) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(prev) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 + +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where a2 > 10 order by 1; +sql select * from streamt where a2 > 10 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop5 +endi + +if $data02 != 31 then + print ======data02=$data02 + goto loop5 +endi + +if $data12 != 31 then + print ======data12=$data12 + goto loop5 +endi + +if $data22 != 31 then + print ======data22=$data22 + goto loop5 +endi + +if $data32 != 31 then + print ======data32=$data32 + goto loop5 +endi + +if $data42 != 31 then + print ======data42=$data42 + goto loop5 +endi + +if $data52 != 61 then + print ======data52=$data52 + goto loop5 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpHistory1.sim b/tests/script/tsim/stream/streamInterpHistory1.sim new file mode 100644 index 00000000000..c4d558592c0 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpHistory1.sim @@ -0,0 +1,737 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql insert into t1 values(1648791212000,1,1,1,1.0); +sql insert into t1 values(1648791215001,2,1,1,1.0); + +sql insert into t2 values(1648791212000,31,1,1,1.0); +sql insert into t2 values(1648791215001,41,1,1,1.0); + +sql create stream streams1 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, _isfilled as a1, interp(a) as a2, tbname as tb from st partition by tbname every(1s) fill(NULL); + + +sql insert into t1 values(1648791217000,5,1,1,1.0); +sql insert into t2 values(1648791217000,61,1,1,1.0); + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(NULL) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(NULL) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where tb = "t1" order by 1; +sql select * from streamt where tb = "t1" order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data02 != 1 then + print ======data02=$data02 + goto loop0 +endi + +if $data12 != NULL then + print ======data12=$data12 + goto loop0 +endi + +if $data22 != NULL then + print ======data22=$data22 + goto loop0 +endi + +if $data32 != NULL then + print ======data32=$data32 + goto loop0 +endi + +if $data42 != NULL then + print ======data42=$data42 + goto loop0 +endi + +if $data52 != 5 then + print ======data52=$data52 + goto loop0 +endi + + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(NULL) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(NULL) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 + +loop0_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where tb = "t2" order by 1; +sql select * from streamt where tb = "t2" order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop0_1 +endi + +if $data02 != 31 then + print ======data02=$data02 + goto loop0_1 +endi + +if $data12 != NULL then + print ======data12=$data12 + goto loop0_1 +endi + +if $data22 != NULL then + print ======data22=$data22 + goto loop0_1 +endi + +if $data32 != NULL then + print ======data32=$data32 + goto loop0_1 +endi + +if $data42 != NULL then + print ======data42=$data42 + goto loop0_1 +endi + +if $data52 != 61 then + print ======data52=$data52 + goto loop0_1 +endi + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791219001,7,1,1,1.0); +sql insert into t2 values(1648791219001,81,1,1,1.0); + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(NULL) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(NULL) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where tb = "t1" order by 1; +sql select * from streamt where tb = "t1" order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +if $rows != 8 then + print ======rows=$rows + goto loop1 +endi + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(NULL) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(NULL) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +$loop_count = 0 + +loop1_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where tb = "t2" order by 1; +sql select * from streamt where tb = "t2" order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +if $rows != 8 then + print ======rows=$rows + goto loop1_1 +endi + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql insert into t1 values(1648791212000,1,1,1,1.0); +sql insert into t1 values(1648791215001,2,1,1,1.0); + +sql insert into t2 values(1648791212000,31,1,1,1.0); +sql insert into t2 values(1648791212001,41,1,1,1.0); + +sql create stream streams2 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, _isfilled as a1, interp(a) as a2, tbname as tb from st partition by tbname every(1s) fill(value, 888); + + +sql insert into t1 values(1648791217000,5,1,1,1.0); +sql insert into t2 values(1648791217000,61,1,1,1.0); + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(value, 888) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(value, 888) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +$loop_count = 0 + +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where tb = "t1" order by 1; +sql select * from streamt where tb = "t1" order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data02 != 1 then + print ======data02=$data02 + goto loop2 +endi + +if $data12 != 888 then + print ======data12=$data12 + goto loop2 +endi + +if $data22 != 888 then + print ======data22=$data22 + goto loop2 +endi + +if $data32 != 888 then + print ======data32=$data32 + goto loop2 +endi + +if $data42 != 888 then + print ======data42=$data42 + goto loop2 +endi + +if $data52 != 5 then + print ======data52=$data52 + goto loop2 +endi + + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(value, 888) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(value, 888) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 + +loop2_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where tb = "t2" order by 1; +sql select * from streamt where tb = "t2" order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop2_1 +endi + +if $data02 != 31 then + print ======data02=$data02 + goto loop2_1 +endi + +if $data12 != 888 then + print ======data12=$data12 + goto loop2_1 +endi + +if $data22 != 888 then + print ======data22=$data22 + goto loop2_1 +endi + +if $data32 != 888 then + print ======data32=$data32 + goto loop2_1 +endi + +if $data42 != 888 then + print ======data42=$data42 + goto loop2_1 +endi + +if $data52 != 61 then + print ======data52=$data52 + goto loop2_1 +endi + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791219001,7,1,1,1.0); +sql insert into t2 values(1648791219001,81,1,1,1.0); + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(value, 888) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(value, 888) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where tb = "t1" order by 1; +sql select * from streamt where tb = "t1" order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +if $rows != 8 then + print ======rows=$rows + goto loop3 +endi + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(value, 888) order by 3, 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(value, 888) order by 3, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +$loop_count = 0 + +loop3_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where tb = "t2" order by 1; +sql select * from streamt where tb = "t2" order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +if $rows != 8 then + print ======rows=$rows + goto loop3_1 +endi + + +print step3 +print =============== create database +sql create database test3 vgroups 1; +sql use test3; + +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql insert into t1 values(1648791212000,1,1,1,1.0); +sql insert into t1 values(1648791215001,20,1,1,1.0); + +sql insert into t2 values(1648791212000,31,1,1,1.0); +sql insert into t2 values(1648791215001,41,1,1,1.0); + +sql create stream streams3 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, _isfilled as a1, interp(a) as a2, tbname as tb from st partition by tbname every(1s) fill(linear); + + +sql insert into t1 values(1648791217000,5,1,1,1.0); +sql insert into t2 values(1648791217000,61,1,1,1.0); + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(linear) order by 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(linear) order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +$loop_count = 0 + +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where tb = "t1" order by 1; +sql select * from streamt where tb = "t1" order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data02 != 1 then + print ======data02=$data02 + goto loop4 +endi + +if $data12 != 7 then + print ======data12=$data12 + goto loop4 +endi + +if $data22 != 13 then + print ======data22=$data22 + goto loop4 +endi + +if $data32 != 19 then + print ======data32=$data32 + goto loop4 +endi + +if $data42 != 12 then + print ======data42=$data42 + goto loop4 +endi + +if $data52 != 5 then + print ======data52=$data52 + goto loop4 +endi + + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(linear) order by 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791217000) every(1s) fill(linear) order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 + +loop4_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where tb = "t2" order by 1; +sql select * from streamt where tb = "t2" order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop4_1 +endi + +if $data02 != 31 then + print ======data02=$data02 + goto loop4_1 +endi + +if $data12 != 34 then + print ======data12=$data12 + goto loop4_1 +endi + +if $data22 != 37 then + print ======data22=$data22 + goto loop4_1 +endi + +if $data32 != 40 then + print ======data32=$data32 + goto loop4_1 +endi + +if $data42 != 50 then + print ======data42=$data42 + goto loop4_1 +endi + +if $data52 != 61 then + print ======data52=$data52 + goto loop4_1 +endi + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791219001,7,1,1,1.0); +sql insert into t2 values(1648791219001,81,1,1,1.0); + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(linear) order by 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t1 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(linear) order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +$loop_count = 0 + +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where tb = "t1" order by 1; +sql select * from streamt where tb = "t1" order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +if $rows != 8 then + print ======rows=$rows + goto loop5 +endi + +print sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(linear) order by 1; +sql select _irowts, _isfilled as a1, interp(a) as a2 from t2 partition by tbname range(1648791212000, 1648791219000) every(1s) fill(linear) order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +$loop_count = 0 + +loop5_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt where tb = "t2" order by 1; +sql select * from streamt where tb = "t2" order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 +print $data60 $data61 $data62 $data63 $data64 $data65 +print $data70 $data71 $data72 $data73 $data74 $data75 + +if $rows != 8 then + print ======rows=$rows + goto loop5_1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpLarge.sim b/tests/script/tsim/stream/streamInterpLarge.sim new file mode 100644 index 00000000000..85203d2d9e3 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpLarge.sim @@ -0,0 +1,188 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648700000000,1,1,1,1.0) (1648710000000,100,100,100,100.0) (1648720000000,10,10,10,10.0); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 30 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 + +# row 0 +if $rows != 20001 then + print ======rows=$rows + goto loop0 +endi + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648700000000,1,1,1,1.0) (1648710000000,100,100,100,100.0) (1648720000000,10,10,10,10.0); + +$loop_count = 0 + +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 30 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 + +# row 0 +if $rows != 20001 then + print ======rows=$rows + goto loop2 +endi + +print step3 +print =============== create database +sql create database test3 vgroups 1; +sql use test3; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648700000000,1,1,1,1.0) (1648710000000,100,100,100,100.0) (1648720000000,10,10,10,10.0); + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 30 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 + +# row 0 +if $rows != 20001 then + print ======rows=$rows + goto loop3 +endi + +print step4 +print =============== create database +sql create database test4 vgroups 1; +sql use test4; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(value, 1,2,3,4); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648700000000,1,1,1,1.0) (1648710000000,100,100,100,100.0) (1648720000000,10,10,10,10.0); + +$loop_count = 0 + +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 30 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 + +# row 0 +if $rows != 20001 then + print ======rows=$rows + goto loop4 +endi + +print step5 +print =============== create database +sql create database test5 vgroups 1; +sql use test5; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams5 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648700000000,1,1,1,1.0) (1648710000000,100,100,100,100.0) (1648720000000,10,10,10,10.0); + +$loop_count = 0 + +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 30 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 + +# row 0 +if $rows != 20001 then + print ======rows=$rows + goto loop5 +endi + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpLinear0.sim b/tests/script/tsim/stream/streamInterpLinear0.sim new file mode 100644 index 00000000000..7d4b28d545a --- /dev/null +++ b/tests/script/tsim/stream/streamInterpLinear0.sim @@ -0,0 +1,507 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,1,1,1,1.1); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop0 +endi + + +sql insert into t1 values(1648791213001,2,2,2,2.1); +sql insert into t1 values(1648791213009,3,3,3,3.1); + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop1 +endi + + +sql insert into t1 values(1648791217001,14,14,14,14.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 5 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 8 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 11 then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != 13 then + print ======data41=$data41 + goto loop2 +endi + + +sql insert into t1 values(1648791215001,7,7,7,7.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 3 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop3 +endi + +if $data11 != 4 then + print ======data11=$data11 + goto loop3 +endi + +if $data21 != 6 then + print ======data21=$data21 + goto loop3 +endi + +if $data31 != 10 then + print ======data31=$data31 + goto loop3 +endi + +if $data41 != 13 then + print ======data41=$data41 + goto loop3 +endi + + +print step2 + +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212000,0,0,0,0.0) (1648791213001,11,11,11,11.0) (1648791213009,22,22,22,2.1) (1648791215001,15,15,15,15.1) (1648791217001,34,34,34,34.1); + + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt2; +sql select * from streamt2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop4 +endi + +if $data11 != 10 then + print ======data11=$data11 + goto loop4 +endi + +if $data21 != 18 then + print ======data21=$data21 + goto loop4 +endi + +if $data31 != 15 then + print ======data31=$data31 + goto loop4 +endi + +if $data41 != 24 then + print ======data41=$data41 + goto loop4 +endi + +if $data51 != 33 then + print ======data51=$data51 + goto loop4 +endi + + +print step2_1 + +sql create database test2_1 vgroups 1; +sql use test2_1; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_1 as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212011,0,0,0,0.0) (1648791212099,20,20,20,20.0) (1648791213011,11,11,11,11.0) (1648791214099,35,35,35,35.1) (1648791215011,10,10,10,10.1) (1648791218099,34,34,34,34.1) (1648791219011,5,5,5,5.1); + + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791219011) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791219011) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + +$loop_count = 0 +loop4_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt2_1; +sql select * from streamt2_1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop4_1 +endi + +# row 0 +if $data01 != 11 then + print ======data01=$data01 + goto loop4_1 +endi + +if $data11 != 32 then + print ======data11=$data11 + goto loop4_1 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop4_1 +endi + +if $data31 != 17 then + print ======data31=$data31 + goto loop4_1 +endi + +if $data41 != 25 then + print ======data41=$data41 + goto loop4_1 +endi + +if $data51 != 33 then + print ======data51=$data51 + goto loop4_1 +endi + +if $data61 != 5 then + print ======data51=$data51 + goto loop4_1 +endi + + +print step3 +sql create database test3 vgroups 1; +sql use test3; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3 as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212001,0,0,0,0.0) (1648791217001,8,4,4,4.1); + + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217000) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217000) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt3; +sql select * from streamt3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop5 +endi + +sql insert into t1 values(1648791213001,11,11,11,11.0) (1648791213009,22,22,22,22.1) (1648791215001,15,15,15,15.1) + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt3; +sql select * from streamt3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop6 +endi + +# row 0 +if $data01 != 10 then + print ======data01=$data01 + goto loop6 +endi + +if $data11 != 18 then + print ======data11=$data11 + goto loop6 +endi + +if $data21 != 15 then + print ======data21=$data21 + goto loop6 +endi + +if $data31 != 11 then + print ======data31=$data31 + goto loop6 +endi + +if $data41 != 8 then + print ======data41=$data41 + goto loop6 +endi + +print end +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpLinear1.sim b/tests/script/tsim/stream/streamInterpLinear1.sim new file mode 100644 index 00000000000..5151c47f62b --- /dev/null +++ b/tests/script/tsim/stream/streamInterpLinear1.sim @@ -0,0 +1,239 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,1,1,1,1.0); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop0 +endi + + +sql insert into t1 values(1648791213009,30,3,3,1.0) (1648791217001,4,4,4,4.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 23 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 17 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 10 then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop2 +endi + + +print step2 + +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791213000,1,1,1,1.0); + + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop3 +endi + + +sql insert into t1 values(1648791213009,30,3,3,1.0) (1648791217001,4,4,4,4.1) (1648791219000,50,5,5,5.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791219000) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791219000) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + + +$loop_count = 0 +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop4 +endi + +if $data11 != 23 then + print ======data11=$data11 + goto loop4 +endi + +if $data21 != 17 then + print ======data21=$data21 + goto loop4 +endi + +if $data31 != 10 then + print ======data31=$data31 + goto loop4 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop4 +endi + +if $data51 != 26 then + print ======data51=$data51 + goto loop4 +endi + +if $data61 != 50 then + print ======data61=$data61 + goto loop4 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpNext0.sim b/tests/script/tsim/stream/streamInterpNext0.sim new file mode 100644 index 00000000000..abdbeda6349 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpNext0.sim @@ -0,0 +1,437 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,1,1,1,1.0); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop0 +endi + + +sql insert into t1 values(1648791213001,2,2,2,1.1); +sql insert into t1 values(1648791213009,3,3,3,1.0); + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop1 +endi + + +sql insert into t1 values(1648791217001,4,4,4,4.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 4 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 4 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop2 +endi + + +sql insert into t1 values(1648791215001,5,5,5,5.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 3 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop3 +endi + +if $data11 != 5 then + print ======data11=$data11 + goto loop3 +endi + +if $data21 != 5 then + print ======data21=$data21 + goto loop3 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop3 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop3 +endi + +print step2 + +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212000,0,0,0,0.0) (1648791213001,1,1,1,1.0) (1648791213009,2,2,2,1.1) (1648791215001,5,5,5,5.1) (1648791217001,4,4,4,4.1); + + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt2; +sql select * from streamt2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop4 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop4 +endi + +if $data21 != 5 then + print ======data21=$data21 + goto loop4 +endi + +if $data31 != 5 then + print ======data31=$data31 + goto loop4 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop4 +endi + +if $data51 != 4 then + print ======data51=$data51 + goto loop4 +endi + + + +print step3 +sql create database test3 vgroups 1; +sql use test3; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3 as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791210001,0,0,0,0.0) (1648791217001,4,4,4,4.1); + + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791211000, 1648791217000) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791211000, 1648791217000) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 + +$loop_count = 0 +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt3; +sql select * from streamt3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 + + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop5 +endi + +sql insert into t1 values(1648791213001,1,1,1,1.0) (1648791213009,2,2,2,1.1) (1648791215001,5,5,5,5.1) + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791211000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791211000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 + +$loop_count = 0 +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt3; +sql select * from streamt3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 + + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop6 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop6 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop6 +endi + +if $data21 != 1 then + print ======data21=$data21 + goto loop6 +endi + +if $data31 != 5 then + print ======data31=$data31 + goto loop6 +endi + +if $data41 != 5 then + print ======data41=$data41 + goto loop6 +endi + +if $data51 != 4 then + print ======data51=$data51 + goto loop6 +endi + +if $data61 != 4 then + print ======data61=$data61 + goto loop6 +endi + +print end +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpNext1.sim b/tests/script/tsim/stream/streamInterpNext1.sim new file mode 100644 index 00000000000..f74863d7a36 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpNext1.sim @@ -0,0 +1,477 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,1,1,1,1.0); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop0 +endi + + +sql insert into t1 values(1648791213009,3,3,3,1.0) (1648791217001,4,4,4,4.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 4 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 4 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop2 +endi + + +print step2 + +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791213000,1,1,1,1.0); + + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop3 +endi + + +sql insert into t1 values(1648791213009,3,3,3,1.0) (1648791217001,4,4,4,4.1) (1648791219000,5,5,5,5.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791219000) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791219000) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + + +$loop_count = 0 +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop4 +endi + +if $data11 != 4 then + print ======data11=$data11 + goto loop4 +endi + +if $data21 != 4 then + print ======data21=$data21 + goto loop4 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop4 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop4 +endi + +if $data51 != 5 then + print ======data51=$data51 + goto loop4 +endi + +if $data61 != 5 then + print ======data61=$data61 + goto loop4 +endi + +print step3 + +sql create database test3 vgroups 1; +sql use test3; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791213001,1,1,1,1.0) (1648791219001,2,2,2,2.1) (1648791229001,3,3,3,3.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791229001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791229001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + + +$loop_count = 0 +loop5: + +sleep 300 + +print sql select * from streamt order by 1; +sql select * from streamt order by 1; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 16 then + print =====rows=$rows + goto loop5 +endi + +sql insert into t1 values(1648791215001,4,4,4,4.0) (1648791217001,5,5,5,5.1) (1648791222000,6,6,6,6.1) (1648791226000,7,7,7,7.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791229001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791229001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + + +$loop_count = 0 +loop6: + +sleep 300 + +print sql select * from streamt order by 1; +sql select * from streamt order by 1; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 16 then + goto loop6 +endi + +if $data01 != 4 then + print =====data01=$data01 + goto loop6 +endi + +if $data11 != 4 then + print =====data11=$data11 + goto loop6 +endi + +if $data21 != 5 then + print =====data21=$data21 + goto loop6 +endi + +if $data31 != 5 then + print =====data31=$data31 + goto loop6 +endi + +if $data41 != 2 then + print =====data41=$data41 + goto loop6 +endi + +if $data51 != 2 then + print =====data51=$data51 + goto loop6 +endi + +if $data61 != 6 then + print =====data61=$data61 + goto loop6 +endi + +if $data71 != 6 then + print =====data71=$data71 + goto loop6 +endi + +if $data81 != 6 then + print =====data81=$data81 + goto loop6 +endi + +if $data91 != 7 then + print =====data91=$data91 + goto loop6 +endi + +if $data[10][1] != 7 then + print =====data[10][1]=$data[10][1] + goto loop6 +endi + +if $data[11][1] != 7 then + print =====data[11][1]=$data[11][1] + goto loop6 +endi + +if $data[12][1] != 7 then + print =====data[12][1]=$data[12][1] + goto loop6 +endi + +if $data[13][1] != 3 then + print =====data[13][1]=$data[13][1] + goto loop6 +endi + +if $data[14][1] != 3 then + print =====data[14][1]=$data[14][1] + goto loop6 +endi + +if $data[15][1] != 3 then + print =====data[15][1]=$data[15][1] + goto loop6 +endi + + +print step4 + +sql create database test4 vgroups 1; +sql use test4; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4 as select _irowts, interp(a) as b, _isfilled as a from st partition by tbname, b as cc every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,20000,2,3); + +sleep 2000 + +sql insert into t1 values(1648791212000,10000,2,3) (1648791215001,20,2,3); + +$loop_count = 0 +loop7: + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +sleep 300 + +print sql select a,b from streamt4; +sql select a,b from streamt4; + +if $rows != 6 then + print ======rows=$rows + goto loop7 +endi + +if $data00 != 0 then + print ======data00=$data00 + goto loop7 +endi + +if $data01 != 10000 then + print ======data01=$data01 + goto loop7 +endi + +if $data10 != 1 then + print ======data10=$data10 + goto loop7 +endi + +if $data20 != 1 then + print ======data20=$data20 + goto loop7 +endi + +if $data41 != 20000 then + print ======data41=$data41 + goto loop7 +endi + +if $data50 != 0 then + print ======data50=$data50 + goto loop7 +endi + +if $data51 != 20000 then + print ======data51=$data51 + goto loop7 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpOther.sim b/tests/script/tsim/stream/streamInterpOther.sim new file mode 100644 index 00000000000..8553e67ec87 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpOther.sim @@ -0,0 +1,608 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 4; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt1_1 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(prev); +sql create stream streams1_2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt1_2 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(next); +sql create stream streams1_3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt1_3 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(linear); +sql create stream streams1_4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt1_4 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(NULL); +sql create stream streams1_5 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt1_5 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(value,11,22,33,44); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791215000,0,0,0,0.0); + +sql insert into t1 values(1648791212000,10,10,10,10.0); + +$loop_count = 0 +loop0: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 300 + +print sql desc streamt1_1; +sql desc streamt1_1; + +if $rows != 9 then + print ======rows=$rows + goto loop0 +endi + +sql desc streamt1_2; + +if $rows != 9 then + print ======rows=$rows + goto loop0 +endi + +sql desc streamt1_3; + +if $rows != 9 then + print ======rows=$rows + goto loop0 +endi + +sql desc streamt1_4; + +if $rows != 9 then + print ======rows=$rows + goto loop0 +endi + +sql desc streamt1_5; + +if $rows != 9 then + print ======rows=$rows + goto loop0 +endi + +$loop_count = 0 +loop0_1: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 300 + +print sql select * from streamt1_1; +sql select * from streamt1_1; + +if $rows != 4 then + print ======rows=$rows + goto loop0_1 +endi + +print sql select * from streamt1_2; +sql select * from streamt1_2; + +if $rows != 4 then + print ======rows=$rows + goto loop0_1 +endi + +print sql select * from streamt1_3; +sql select * from streamt1_3; + +if $rows != 4 then + print ======rows=$rows + goto loop0_1 +endi + +print sql select * from streamt1_4; +sql select * from streamt1_4; + +if $rows != 4 then + print ======rows=$rows + goto loop0_1 +endi + +print sql select * from streamt1_5; +sql select * from streamt1_5; + +if $rows != 4 then + print ======rows=$rows + goto loop0_1 +endi + +print sql select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 range(1648791212000, 1648791215000) every(1s) fill(value,11,22,33,44); +sql select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 range(1648791212000, 1648791215000) every(1s) fill(value,11,22,33,44); + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 +loop0_2: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 300 + +print sql select * from streamt1_5; +sql select * from streamt1_5; + +if $data01 != 10 then + print ======data01=$data01 + goto loop0_2 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop0_2 +endi + +if $data03 != 10 then + print ======data03=$data03 + goto loop0_2 +endi + +if $data04 != 0 then + print ======data04=$data04 + goto loop0_2 +endi + +if $data05 != 10 then + print ======data05=$data05 + goto loop0_2 +endi + +if $data06 != 0 then + print ======data06=$data06 + goto loop0_2 +endi + +if $data07 != 10.000000000 then + print ======data07=$data07 + goto loop0_2 +endi + +if $data11 != 11 then + print ======data11=$data11 + goto loop0_2 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop0_2 +endi + +if $data13 != 22 then + print ======data13=$data13 + goto loop0_2 +endi + +if $data14 != 1 then + print ======data14=$data14 + goto loop0_2 +endi + +if $data15 != 33 then + print ======data15=$data15 + goto loop0_2 +endi + +if $data16 != 1 then + print ======data16=$data16 + goto loop0_2 +endi + +if $data17 != 44.000000000 then + print ======data17=$data17 + goto loop0_2 +endi + +print step3 + +sql create database test3 vgroups 4; +sql use test3; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + + +sql create stream streams3_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3_1 TAGS(cc varchar(100)) SUBTABLE(concat(concat("tbn-", tbname), "_1")) as select interp(a), _isfilled as a1 from st partition by tbname, b as cc every(1s) fill(prev); +sql create stream streams3_2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3_2 TAGS(cc varchar(100)) SUBTABLE(concat(concat("tbn-", tbname), "_2")) as select interp(a), _isfilled as a1 from st partition by tbname, b as cc every(1s) fill(next); +sql create stream streams3_3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3_3 TAGS(cc varchar(100)) SUBTABLE(concat(concat("tbn-", tbname), "_3")) as select interp(a), _isfilled as a1 from st partition by tbname, b as cc every(1s) fill(linear); +sql create stream streams3_4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3_4 TAGS(cc varchar(100)) SUBTABLE(concat(concat("tbn-", tbname), "_4")) as select interp(a), _isfilled as a1 from st partition by tbname, b as cc every(1s) fill(NULL); +sql create stream streams3_5 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3_5 TAGS(cc varchar(100)) SUBTABLE(concat(concat("tbn-", tbname), "_5")) as select interp(a), _isfilled as a1 from st partition by tbname, b as cc every(1s) fill(value,11); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,1,2,3); + +sleep 500 + +sql insert into t1 values(1648791212000,10,2,3); + +sleep 500 + +sql insert into t1 values(1648791215001,20,2,3); + +sleep 500 + +sql insert into t2 values(1648791215001,20,2,3); + +$loop_count = 0 +loop3: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 300 + +print sql select cc, * from `tbn-t1_1_streamt3_1_914568691400502130`; +sql select cc, * from `tbn-t1_1_streamt3_1_914568691400502130`; + +if $rows != 6 then + print ======rows=$rows + goto loop3 +endi + +if $data00 != 2 then + print ======rows=$rows + goto loop3 +endi + +print sql select cc, * from `tbn-t1_2_streamt3_2_914568691400502130`; +sql select cc, * from `tbn-t1_2_streamt3_2_914568691400502130`; + +if $rows != 6 then + print ======rows=$rows + goto loop3 +endi + +if $data00 != 2 then + print ======rows=$rows + goto loop3 +endi + +print sql select cc, * from `tbn-t1_3_streamt3_3_914568691400502130`; +sql select cc, * from `tbn-t1_3_streamt3_3_914568691400502130`; + +if $rows != 6 then + print ======rows=$rows + goto loop3 +endi + +if $data00 != 2 then + print ======rows=$rows + goto loop3 +endi + +print sql select cc, * from `tbn-t1_4_streamt3_4_914568691400502130`; +sql select cc, * from `tbn-t1_4_streamt3_4_914568691400502130`; + +if $rows != 6 then + print ======rows=$rows + goto loop3 +endi + +if $data00 != 2 then + print ======rows=$rows + goto loop3 +endi + +print sql select cc, * from `tbn-t1_5_streamt3_5_914568691400502130`; +sql select cc, * from `tbn-t1_5_streamt3_5_914568691400502130`; + +if $rows != 6 then + print ======rows=$rows + goto loop3 +endi + +if $data00 != 2 then + print ======rows=$rows + goto loop3 +endi + + + +print sql select * from `tbn-t2_1_streamt3_1_8905952758123525205`; +sql select * from `tbn-t2_1_streamt3_1_8905952758123525205`; + +if $rows != 0 then + print ======rows=$rows + goto loop3 +endi + +print sql select * from `tbn-t2_2_streamt3_2_8905952758123525205`; +sql select * from `tbn-t2_2_streamt3_2_8905952758123525205`; + +if $rows != 0 then + print ======rows=$rows + goto loop3 +endi + +print sql select * from `tbn-t2_3_streamt3_3_8905952758123525205`; +sql select * from `tbn-t2_3_streamt3_3_8905952758123525205`; + +if $rows != 0 then + print ======rows=$rows + goto loop3 +endi + +print sql select * from `tbn-t2_4_streamt3_4_8905952758123525205`; +sql select * from `tbn-t2_4_streamt3_4_8905952758123525205`; + +if $rows != 0 then + print ======rows=$rows + goto loop3 +endi + +print sql select * from `tbn-t2_5_streamt3_5_8905952758123525205`; +sql select * from `tbn-t2_5_streamt3_5_8905952758123525205`; + +if $rows != 0 then + print ======rows=$rows + goto loop3 +endi + + +print step4 +print =============== create database +sql drop database if exists test4; +sql create database test4 vgroups 4; +sql use test4; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams4_1 trigger at_once watermark 10s IGNORE EXPIRED 1 IGNORE UPDATE 0 into streamt4_1 as select interp(a, 1), _isfilled as a1 from t1 every(1s) fill(prev); +sql create stream streams4_2 trigger at_once watermark 10s IGNORE EXPIRED 1 IGNORE UPDATE 0 into streamt4_2 as select interp(a, 1), _isfilled as a1 from t1 every(1s) fill(next); +sql create stream streams4_3 trigger at_once watermark 10s IGNORE EXPIRED 1 IGNORE UPDATE 0 into streamt4_3 as select interp(a, 1), _isfilled as a1 from t1 every(1s) fill(linear); +sql create stream streams4_4 trigger at_once watermark 10s IGNORE EXPIRED 1 IGNORE UPDATE 0 into streamt4_4 as select interp(a, 1), _isfilled as a1 from t1 every(1s) fill(NULL); +sql create stream streams4_5 trigger at_once watermark 10s IGNORE EXPIRED 1 IGNORE UPDATE 0 into streamt4_5 as select interp(a, 1), _isfilled as a1 from t1 every(1s) fill(value,11); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791275000,NULL,0,0,0.0); + +sleep 500 + +sql insert into t1 values(1648791276000,NULL,1,0,0.0) (1648791277000,NULL,2,0,0.0) (1648791275000,NULL,3,0,0.0); + +$loop_count = 0 +loop4: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 300 + +print sql select * from streamt4_1; +sql select * from streamt4_1; + +if $rows != 0 then + print ======rows=$rows + goto loop4 +endi + +print sql select * from streamt4_2; +sql select * from streamt4_2; + +if $rows != 0 then + print ======rows=$rows + goto loop4 +endi + +print sql select * from streamt4_3; +sql select * from streamt4_3; + +if $rows != 0 then + print ======rows=$rows + goto loop4 +endi + +print sql select * from streamt4_4; +sql select * from streamt4_4; + +if $rows != 0 then + print ======rows=$rows + goto loop4 +endi + +print sql select * from streamt4_5; +sql select * from streamt4_5; + +if $rows != 0 then + print ======rows=$rows + goto loop4 +endi + +print step4_2 + +print sql insert into t1 values(1648791215000,1,0,0,0.0); +sql insert into t1 values(1648791215000,1,0,0,0.0); +sleep 500 + +sql insert into t1 values(1648791216000,2,1,0,0.0) (1648791217000,3,2,0,0.0) (1648791215000,4,3,0,0.0); + +$loop_count = 0 +loop5: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 300 + +print sql select * from streamt4_1; +sql select * from streamt4_1; + +if $rows != 0 then + print ======rows=$rows + goto loop5 +endi + +print sql select * from streamt4_2; +sql select * from streamt4_2; + +if $rows != 0 then + print ======rows=$rows + goto loop5 +endi + +print sql select * from streamt4_3; +sql select * from streamt4_3; + +if $rows != 0 then + print ======rows=$rows + goto loop5 +endi + +print sql select * from streamt4_4; +sql select * from streamt4_4; + +if $rows != 0 then + print ======rows=$rows + goto loop5 +endi + +print sql select * from streamt4_5; +sql select * from streamt4_5; + +if $rows != 0 then + print ======rows=$rows + goto loop5 +endi + +print step4_3 + +print sql insert into t1 values(1648791278000,NULL,2,0,0.0) (1648791278001,NULL,2,0,0.0) (1648791279000,1,2,0,0.0) (1648791279001,NULL,2,0,0.0) (1648791280000,NULL,2,0,0.0)(1648791280001,NULL,2,0,0.0)(1648791281000,20,2,0,0.0) (1648791281001,NULL,2,0,0.0)(1648791281002,NULL,2,0,0.0) (1648791282000,NULL,2,0,0.0); +sql insert into t1 values(1648791278000,NULL,2,0,0.0) (1648791278001,NULL,2,0,0.0) (1648791279000,1,2,0,0.0) (1648791279001,NULL,2,0,0.0) (1648791280000,NULL,2,0,0.0)(1648791280001,NULL,2,0,0.0)(1648791281000,20,2,0,0.0) (1648791281001,NULL,2,0,0.0)(1648791281002,NULL,2,0,0.0) (1648791282000,NULL,2,0,0.0); + +$loop_count = 0 +loop6: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 300 + +print sql select * from streamt4_1; +sql select * from streamt4_1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows != 3 then + print ======rows=$rows + goto loop6 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop6 +endi + +print sql select * from streamt4_2; +sql select * from streamt4_2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows != 3 then + print ======rows=$rows + goto loop6 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop6 +endi + +print sql select * from streamt4_3; +sql select * from streamt4_3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows != 3 then + print ======rows=$rows + goto loop6 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop6 +endi + +print sql select * from streamt4_4; +sql select * from streamt4_4; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows != 3 then + print ======rows=$rows + goto loop6 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop6 +endi + +print sql select * from streamt4_5; +sql select * from streamt4_5; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows != 3 then + print ======rows=$rows + goto loop6 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop6 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpOther1.sim b/tests/script/tsim/stream/streamInterpOther1.sim new file mode 100644 index 00000000000..941b3e18f03 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpOther1.sim @@ -0,0 +1,510 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step4 + +sql create database test4_1 vgroups 4; +sql use test4_1; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stable streamt4_1(ts timestamp,a varchar(10),b tinyint,c tinyint) tags(ta int,cc int,tc int); + +sql create stream streams4_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4_1(ts, b, a) TAGS(cc) SUBTABLE(concat(concat("tbn-", tbname), "_1")) as select _irowts, interp(a), _isfilled as a1 from st partition by tbname, b as cc every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,20000,2,3); + +sleep 2000 + +sql insert into t1 values(1648791212000,10000,2,3); + +sleep 2000 + +sql insert into t1 values(1648791215001,20,2,3); + +$loop_count = 0 +loop4_1: + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +sleep 300 + +print sql select a,b from streamt4_1; +sql select a,b from streamt4_1; + +if $rows != 6 then + print ======rows=$rows + goto loop4_1 +endi + +if $data00 != false then + print ======data00=$data00 + goto loop4_1 +endi + +if $data01 != 16 then + print ======data01=$data01 + goto loop4_1 +endi + +if $data10 != true then + print ======data10=$data10 + goto loop4_1 +endi + +if $data20 != true then + print ======data20=$data20 + goto loop4_1 +endi + +if $data50 != false then + print ======data50=$data50 + goto loop4_1 +endi + +if $data51 != 32 then + print ======data51=$data51 + goto loop4_1 +endi + +print step4_2 + +sql create database test4_2 vgroups 4; +sql use test4_2; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stable streamt4_2(ts timestamp,a varchar(10),b tinyint,c tinyint) tags(ta int,cc int,tc int); + +sql create stream streams4_2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4_2(ts, b, a) TAGS(cc) SUBTABLE(concat(concat("tbn-", tbname), "_2")) as select _irowts, interp(a), _isfilled as a1 from st partition by tbname, b as cc every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,20000,2,3); + +sleep 2000 + +sql insert into t1 values(1648791212000,10000,2,3); + +sleep 2000 + +sql insert into t1 values(1648791215001,20,2,3); + +$loop_count = 0 +loop4_2: + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +sleep 300 + +print sql select a,b from streamt4_2; +sql select a,b from streamt4_2; + +if $rows != 6 then + print ======rows=$rows + goto loop4_2 +endi + +if $data00 != false then + print ======data00=$data00 + goto loop4_2 +endi + +if $data01 != 16 then + print ======data01=$data01 + goto loop4_2 +endi + +if $data10 != true then + print ======data10=$data10 + goto loop4_2 +endi + +if $data20 != true then + print ======data20=$data20 + goto loop4_2 +endi + +if $data50 != false then + print ======data50=$data50 + goto loop4_2 +endi + +if $data51 != 32 then + print ======data51=$data51 + goto loop4_2 +endi + +print step4_3 + +sql create database test4_3 vgroups 4; +sql use test4_3; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stable streamt4_3(ts timestamp,a varchar(10),b tinyint,c tinyint) tags(ta int,cc int,tc int); + +sql create stream streams4_3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4_3(ts, b, a) TAGS(cc) SUBTABLE(concat(concat("tbn-", tbname), "_3")) as select _irowts, interp(a), _isfilled as a1 from st partition by tbname, b as cc every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,20000,2,3); + +sleep 2000 + +sql insert into t1 values(1648791212000,10000,2,3); + +sleep 2000 + +sql insert into t1 values(1648791215001,20,2,3); + +$loop_count = 0 +loop4_3: + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +sleep 300 + +print sql select a,b from streamt4_3; +sql select a,b from streamt4_3; + +if $rows != 6 then + print ======rows=$rows + goto loop4_3 +endi + +if $data00 != false then + print ======data00=$data00 + goto loop4_3 +endi + +if $data01 != 16 then + print ======data01=$data01 + goto loop4_3 +endi + +if $data10 != true then + print ======data10=$data10 + goto loop4_3 +endi + +if $data20 != true then + print ======data20=$data20 + goto loop4_3 +endi + +if $data50 != false then + print ======data50=$data50 + goto loop4_3 +endi + +if $data51 != 32 then + print ======data51=$data51 + goto loop4_3 +endi + +print step4_4 + +sql create database test4_4 vgroups 4; +sql use test4_4; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stable streamt4_4(ts timestamp,a varchar(10),b tinyint,c tinyint) tags(ta int,cc int,tc int); + +sql create stream streams4_4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4_4(ts, b, a) TAGS(cc) SUBTABLE(concat(concat("tbn-", tbname), "_4")) as select _irowts, interp(a), _isfilled as a1 from st partition by tbname, b as cc every(1s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,20000,2,3); + +sleep 2000 + +sql insert into t1 values(1648791212000,10000,2,3); + +sleep 2000 + +sql insert into t1 values(1648791215001,20,2,3); + +$loop_count = 0 +loop4_4: + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +sleep 300 + +print sql select a,b from streamt4_4; +sql select a,b from streamt4_4; + +if $rows != 6 then + print ======rows=$rows + goto loop4_4 +endi + +if $data00 != false then + print ======data00=$data00 + goto loop4_4 +endi + +if $data01 != 16 then + print ======data01=$data01 + goto loop4_4 +endi + +if $data10 != true then + print ======data10=$data10 + goto loop4_4 +endi + +if $data20 != true then + print ======data20=$data20 + goto loop4_4 +endi + +if $data50 != false then + print ======data50=$data50 + goto loop4_4 +endi + +if $data51 != 32 then + print ======data51=$data51 + goto loop4_4 +endi + +print step4_5 + +sql create database test4_5 vgroups 4; +sql use test4_5; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stable streamt4_5(ts timestamp,a varchar(10),b tinyint,c tinyint) tags(ta int,cc int,tc int); + +sql create stream streams4_5 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4_5(ts, b, a) TAGS(cc) SUBTABLE(concat(concat("tbn-", tbname), "_5")) as select _irowts, interp(a), _isfilled as a1 from st partition by tbname, b as cc every(1s) fill(value,1100); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,20000,2,3); + +sleep 2000 + +sql insert into t1 values(1648791212000,10000,2,3); + +sleep 2000 + +sql insert into t1 values(1648791215001,20,2,3); + +$loop_count = 0 +loop4_5: + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +sleep 300 + +print sql select a,b from streamt4_5; +sql select a,b from streamt4_5; + +if $rows != 6 then + print ======rows=$rows + goto loop4_5 +endi + +if $data00 != false then + print ======data00=$data00 + goto loop4_5 +endi + +if $data01 != 16 then + print ======data01=$data01 + goto loop4_5 +endi + +if $data10 != true then + print ======data10=$data10 + goto loop4_5 +endi + +if $data20 != true then + print ======data20=$data20 + goto loop4_5 +endi + +if $data50 != false then + print ======data50=$data50 + goto loop4_5 +endi + +if $data51 != 32 then + print ======data51=$data51 + goto loop4_5 +endi + +print step5 +print =============== create database +sql drop database if exists test5; +sql create database test5 vgroups 4 precision 'us'; +sql use test5; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams5 trigger at_once watermark 10s IGNORE EXPIRED 1 IGNORE UPDATE 0 into streamt as select interp(a), _isfilled as a1 from t1 every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791213000001,1,1,1,1.0) (1648791215000001,20,1,1,1.0) (1648791216000000,3,1,1,1.0); + +$loop_count = 0 +loop5: + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +sleep 300 + +print sql select cast(`_irowts` as bigint) from streamt order by 1; +sql select cast(`_irowts` as bigint) from streamt order by 1; + +if $rows != 3 then + print ======rows=$rows + goto loop5 +endi + +if $data00 != 1648791214000000 then + print ======data00=$data00 + goto loop5 +endi + +if $data10 != 1648791215000000 then + print ======data01=$data01 + goto loop5 +endi + +if $data20 != 1648791216000000 then + print ======data01=$data01 + goto loop5 +endi + +print step6 +print =============== create database +sql drop database if exists test6; +sql create database test6 vgroups 4 precision 'us'; +sql use test6; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams6 trigger at_once watermark 10s IGNORE EXPIRED 1 IGNORE UPDATE 0 into streamt as select interp(a), _isfilled as a1 from t1 every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791213000001,1,1,1,1.0) (1648791215000001,20,1,1,1.0) (1648791216000000,3,1,1,1.0); + +$loop_count = 0 +loop6: + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +sleep 300 + +print sql select cast(`_irowts` as bigint) from streamt order by 1; +sql select cast(`_irowts` as bigint) from streamt order by 1; + +if $rows != 3 then + print ======rows=$rows + goto loop6 +endi + +if $data00 != 1648791214000000 then + print ======data00=$data00 + goto loop6 +endi + +if $data10 != 1648791215000000 then + print ======data01=$data01 + goto loop6 +endi + +if $data20 != 1648791216000000 then + print ======data01=$data01 + goto loop6 +endi + +print step7 +print =============== create database +sql drop database if exists test7; +sql create database test7 vgroups 4 precision 'us'; +sql use test7; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams7 trigger at_once watermark 10s IGNORE EXPIRED 1 IGNORE UPDATE 0 into streamt as select interp(a), _isfilled as a1 from t1 every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791213000001,1,1,1,1.0) (1648791215000001,20,1,1,1.0) (1648791216000000,3,1,1,1.0); + +$loop_count = 0 +loop7: + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +sleep 300 + +print sql select cast(`_irowts` as bigint) from streamt order by 1; +sql select cast(`_irowts` as bigint) from streamt order by 1; + +if $rows != 3 then + print ======rows=$rows + goto loop7 +endi + +if $data00 != 1648791214000000 then + print ======data00=$data00 + goto loop7 +endi + +if $data10 != 1648791215000000 then + print ======data01=$data01 + goto loop7 +endi + +if $data20 != 1648791216000000 then + print ======data01=$data01 + goto loop7 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpOther2.sim b/tests/script/tsim/stream/streamInterpOther2.sim new file mode 100644 index 00000000000..25d5171a5ce --- /dev/null +++ b/tests/script/tsim/stream/streamInterpOther2.sim @@ -0,0 +1,525 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step prev + +sql create database test1 vgroups 4; +sql use test1; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt1 as select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,0,0,3) (1648791212000,10,10,3) (1648791212001,11,NULL,3); + +sleep 500 + +sql insert into t1 values(1648791214001,20,NULL,3) (1648791213000,22,NULL,3); + +print sql select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname range(1648791212000, 1648791217000) every(1s) fill(prev); +sql select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname range(1648791212000, 1648791217000) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 +loop0: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 300 + +print sql select * from streamt1; +sql select * from streamt1; + +if $rows != 6 then + print ======rows=$rows + goto loop0 +endi + +if $data01 != 0 then + print ======data01=$data01 + goto loop0 +endi + +if $data02 != 10 then + print ======data02=$data02 + goto loop0 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop0 +endi + +if $data12 != 10 then + print ======data12=$data12 + goto loop0 +endi + +if $data21 != 1 then + print ======data21=$data21 + goto loop0 +endi + +if $data22 != 10 then + print ======data22=$data22 + goto loop0 +endi + +if $data31 != 1 then + print ======data31=$data31 + goto loop0 +endi + +if $data32 != 10 then + print ======data32=$data32 + goto loop0 +endi + +if $data41 != 1 then + print ======data41=$data41 + goto loop0 +endi + +if $data42 != 10 then + print ======data42=$data42 + goto loop0 +endi + +if $data51 != 0 then + print ======data51=$data51 + goto loop0 +endi + +if $data52 != 0 then + print ======data52=$data52 + goto loop0 +endi + +print step next + +sql create database test2 vgroups 4; +sql use test2; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,11,11,3) (1648791212000,10,10,3) (1648791212001,11,NULL,3); + +sleep 500 + +sql insert into t1 values(1648791214001,20,NULL,3) (1648791213000,22,NULL,3); + +print sql select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname range(1648791212000, 1648791217000) every(1s) fill(next); +sql select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname range(1648791212000, 1648791217000) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 +loop1: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 300 + +print sql select * from streamt2; +sql select * from streamt2; + +if $rows != 6 then + print ======rows=$rows + goto loop1 +endi + +if $data01 != 0 then + print ======data01=$data01 + goto loop1 +endi + +if $data02 != 10 then + print ======data02=$data02 + goto loop1 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop1 +endi + +if $data12 != 11 then + print ======data12=$data12 + goto loop1 +endi + +if $data21 != 1 then + print ======data21=$data21 + goto loop1 +endi + +if $data22 != 11 then + print ======data22=$data22 + goto loop1 +endi + +if $data31 != 1 then + print ======data31=$data31 + goto loop1 +endi + +if $data32 != 11 then + print ======data32=$data32 + goto loop1 +endi + +if $data41 != 1 then + print ======data41=$data41 + goto loop1 +endi + +if $data42 != 11 then + print ======data42=$data42 + goto loop1 +endi + +if $data51 != 0 then + print ======data51=$data51 + goto loop1 +endi + +if $data52 != 11 then + print ======data52=$data52 + goto loop1 +endi + +print step value + +sql create database test3 vgroups 4; +sql use test3; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams3_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3_1 as select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname every(1s) fill(NULL); +sql create stream streams3_2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3_2 as select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname every(1s) fill(value, 110); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,11,11,3) (1648791212000,10,10,3) (1648791212001,11,NULL,3); + +sleep 500 + +sql insert into t1 values(1648791214001,20,NULL,3) (1648791213000,22,NULL,3); + +print sql select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname range(1648791212000, 1648791217000) every(1s) fill(NULL); +sql select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname range(1648791212000, 1648791217000) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 +loop3: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 300 + +print sql select * from streamt3_1; +sql select * from streamt3_1; + +if $rows != 6 then + print ======rows=$rows + goto loop3 +endi + +if $data01 != 0 then + print ======data01=$data01 + goto loop3 +endi + +if $data02 != 10 then + print ======data02=$data02 + goto loop3 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop3 +endi + +if $data12 != NULL then + print ======data12=$data12 + goto loop3 +endi + +if $data21 != 1 then + print ======data21=$data21 + goto loop3 +endi + +if $data22 != NULL then + print ======data22=$data22 + goto loop3 +endi + +if $data31 != 1 then + print ======data31=$data31 + goto loop3 +endi + +if $data32 != NULL then + print ======data32=$data32 + goto loop3 +endi + +if $data41 != 1 then + print ======data41=$data41 + goto loop3 +endi + +if $data42 != NULL then + print ======data42=$data42 + goto loop3 +endi + +if $data51 != 0 then + print ======data51=$data51 + goto loop3 +endi + +if $data52 != 11 then + print ======data52=$data52 + goto loop3 +endi + + +print sql select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname range(1648791212000, 1648791217000) every(1s) fill(value, 110); +sql select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname range(1648791212000, 1648791217000) every(1s) fill(value, 110); + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 +loop3_2: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 300 + +print sql select * from streamt3_2; +sql select * from streamt3_2; + +if $rows != 6 then + print ======rows=$rows + goto loop3_2 +endi + +if $data01 != 0 then + print ======data01=$data01 + goto loop3_2 +endi + +if $data02 != 10 then + print ======data02=$data02 + goto loop3_2 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop3_2 +endi + +if $data12 != 110 then + print ======data12=$data12 + goto loop3_2 +endi + +if $data21 != 1 then + print ======data21=$data21 + goto loop3_2 +endi + +if $data22 != 110 then + print ======data22=$data22 + goto loop3_2 +endi + +if $data31 != 1 then + print ======data31=$data31 + goto loop3_2 +endi + +if $data32 != 110 then + print ======data32=$data32 + goto loop3_2 +endi + +if $data41 != 1 then + print ======data41=$data41 + goto loop3_2 +endi + +if $data42 != 110 then + print ======data42=$data42 + goto loop3_2 +endi + +if $data51 != 0 then + print ======data51=$data51 + goto loop3_2 +endi + +if $data52 != 11 then + print ======data52=$data52 + goto loop3_2 +endi + +print step linear + +sql create database test4 vgroups 4; +sql use test4; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4 as select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,11,55,3) (1648791212000,10,10,3) (1648791212001,11,NULL,3); + +sleep 500 + +sql insert into t1 values(1648791214001,20,NULL,3) (1648791213000,22,NULL,3); + +print sql select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname range(1648791212000, 1648791217000) every(1s) fill(linear); +sql select _irowts, _isfilled as a1, interp(b, 1) from st partition by tbname range(1648791212000, 1648791217000) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 +loop4: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 300 + +print sql select * from streamt4; +sql select * from streamt4; + +if $rows != 6 then + print ======rows=$rows + goto loop4 +endi + +if $data01 != 0 then + print ======data01=$data01 + goto loop4 +endi + +if $data02 != 10 then + print ======data02=$data02 + goto loop4 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop4 +endi + +if $data12 != 19 then + print ======data12=$data12 + goto loop4 +endi + +if $data21 != 1 then + print ======data21=$data21 + goto loop4 +endi + +if $data22 != 28 then + print ======data22=$data22 + goto loop4 +endi + +if $data31 != 1 then + print ======data31=$data31 + goto loop4 +endi + +if $data32 != 37 then + print ======data32=$data32 + goto loop4 +endi + +if $data41 != 1 then + print ======data41=$data41 + goto loop4 +endi + +if $data42 != 46 then + print ======data42=$data42 + goto loop4 +endi + +if $data51 != 0 then + print ======data51=$data51 + goto loop4 +endi + +if $data52 != 55 then + print ======data52=$data52 + goto loop4 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpPartitionBy0.sim b/tests/script/tsim/stream/streamInterpPartitionBy0.sim new file mode 100644 index 00000000000..6b222de228e --- /dev/null +++ b/tests/script/tsim/stream/streamInterpPartitionBy0.sim @@ -0,0 +1,592 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step prev +print =============== create database +sql create database test vgroups 1; +sql use test; +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); + +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create table t3 using st tags(2,2,2); + +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), _isfilled, tbname, b, c from st partition by tbname, b,c every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791212000,0,0,0,0.0) (1648791212001,1,0,0,1.0) (1648791217001,2,0,0,2.1) t2 values(1648791212000,0,1,1,0.0) (1648791212001,1,1,1,1.0) (1648791217001,2,1,1,2.1); + +sql insert into t3 values(1648791212000,0,2,2,0.0) (1648791212001,1,2,2,1.0) (1648791217001,2,2,2,2.1); + +print sql select _irowts, interp(a), _isfilled, b from st where b = 0 and c = 0 partition by tbname, b,c range(1648791212000, 1648791217001) every(1s) fill(prev) order by b, 1; +sql select _irowts, interp(a), _isfilled, b from st where b = 0 and c = 0 partition by tbname, b,c range(1648791212000, 1648791217001) every(1s) fill(prev) order by b, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +print sql select _irowts, interp(a), _isfilled, b from st where b = 1 and c =1 partition by tbname, b,c range(1648791212000, 1648791217001) every(1s) fill(prev) order by b, 1; +sql select _irowts, interp(a), _isfilled, b from st where b = 1 and c = 1 partition by tbname, b,c range(1648791212000, 1648791217001) every(1s) fill(prev) order by b, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +print sql select _irowts, interp(a), _isfilled, b from st where b = 2 and c = 2 partition by tbname, b,c range(1648791212000, 1648791217001) every(1s) fill(prev) order by b, 1; +sql select _irowts, interp(a), _isfilled, b from st where b = 2 and c = 2 partition by tbname, b,c range(1648791212000, 1648791217001) every(1s) fill(prev) order by b, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt where b = 0 and c = 0 order by 1; +sql select * from streamt where b = 0 and c = 0 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop0 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop0 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop0 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop0 +endi + +if $data21 != 1 then + print ======data21=$data21 + goto loop0 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop0 +endi + +if $data31 != 1 then + print ======data31=$data31 + goto loop0 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop0 +endi + +if $data41 != 1 then + print ======data41=$data41 + goto loop0 +endi + +if $data42 != 1 then + print ======data41=$data41 + goto loop0 +endi + +if $data51 != 1 then + print ======data51=$data51 + goto loop0 +endi + +if $data52 != 1 then + print ======data51=$data51 + goto loop0 +endi + +print 1 sql select * from streamt where b = 1 and c = 1 order by 1; +sql select * from streamt where b = 1 and c = 1 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop0 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop0 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop0 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop0 +endi + +if $data21 != 1 then + print ======data21=$data21 + goto loop0 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop0 +endi + +if $data31 != 1 then + print ======data31=$data31 + goto loop0 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop0 +endi + +if $data41 != 1 then + print ======data41=$data41 + goto loop0 +endi + +if $data42 != 1 then + print ======data41=$data41 + goto loop0 +endi + +if $data51 != 1 then + print ======data51=$data51 + goto loop0 +endi + +if $data52 != 1 then + print ======data51=$data51 + goto loop0 +endi + +print 2 sql select * from streamt where b = 2 and c = 2 order by 1; +sql select * from streamt where b = 2 and c = 2 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop0 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop0 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop0 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop0 +endi + +if $data21 != 1 then + print ======data21=$data21 + goto loop0 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop0 +endi + +if $data31 != 1 then + print ======data31=$data31 + goto loop0 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop0 +endi + +if $data41 != 1 then + print ======data41=$data41 + goto loop0 +endi + +if $data42 != 1 then + print ======data41=$data41 + goto loop0 +endi + +if $data51 != 1 then + print ======data51=$data51 + goto loop0 +endi + +if $data52 != 1 then + print ======data51=$data51 + goto loop0 +endi + +print step next +print =============== create database +sql create database test2 vgroups 1; +sql use test2; +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); + +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create table t3 using st tags(2,2,2); + +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _irowts, interp(a), _isfilled, tbname, b, c from st partition by tbname, b,c every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791212000,0,0,0,0.0) (1648791212001,1,0,0,1.0) (1648791217001,2,0,0,2.1) t2 values(1648791212000,0,1,1,0.0) (1648791212001,1,1,1,1.0) (1648791217001,2,1,1,2.1); + +sql insert into t3 values(1648791212000,0,2,2,0.0) (1648791212001,1,2,2,1.0) (1648791217001,2,2,2,2.1); + +print sql select _irowts, interp(a), _isfilled, b from st where b = 0 and c = 0 partition by tbname, b,c range(1648791212000, 1648791217001) every(1s) fill(next) order by b, 1; +sql select _irowts, interp(a), _isfilled, b from st where b = 0 and c = 0 partition by tbname, b,c range(1648791212000, 1648791217001) every(1s) fill(next) order by b, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +print sql select _irowts, interp(a), _isfilled, b from st where b = 1 and c =1 partition by tbname, b,c range(1648791212000, 1648791217001) every(1s) fill(next) order by b, 1; +sql select _irowts, interp(a), _isfilled, b from st where b = 1 and c = 1 partition by tbname, b,c range(1648791212000, 1648791217001) every(1s) fill(next) order by b, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +print sql select _irowts, interp(a), _isfilled, b from st where b = 2 and c = 2 partition by tbname, b,c range(1648791212000, 1648791217001) every(1s) fill(next) order by b, 1; +sql select _irowts, interp(a), _isfilled, b from st where b = 2 and c = 2 partition by tbname, b,c range(1648791212000, 1648791217001) every(1s) fill(next) order by b, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt2 where b = 0 and c = 0 order by 1; +sql select * from streamt2 where b = 0 and c = 0 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop1 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop1 +endi + +if $data11 != 2 then + print ======data11=$data11 + goto loop1 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop1 +endi + +if $data21 != 2 then + print ======data21=$data21 + goto loop1 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop1 +endi + +if $data31 != 2 then + print ======data31=$data31 + goto loop1 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop1 +endi + +if $data41 != 2 then + print ======data41=$data41 + goto loop1 +endi + +if $data42 != 1 then + print ======data41=$data41 + goto loop1 +endi + +if $data51 != 2 then + print ======data51=$data51 + goto loop1 +endi + +if $data52 != 1 then + print ======data51=$data51 + goto loop1 +endi + +print 1 sql select * from streamt2 where b = 1 and c = 1 order by 1; +sql select * from streamt2 where b = 1 and c = 1 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop1 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop1 +endi + +if $data11 != 2 then + print ======data11=$data11 + goto loop1 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop1 +endi + +if $data21 != 2 then + print ======data21=$data21 + goto loop1 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop1 +endi + +if $data31 != 2 then + print ======data31=$data31 + goto loop1 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop1 +endi + +if $data41 != 2 then + print ======data41=$data41 + goto loop1 +endi + +if $data42 != 1 then + print ======data41=$data41 + goto loop1 +endi + +if $data51 != 2 then + print ======data51=$data51 + goto loop1 +endi + +if $data52 != 1 then + print ======data51=$data51 + goto loop1 +endi + +print 2 sql select * from streamt2 where b = 2 and c = 2 order by 1; +sql select * from streamt2 where b = 2 and c = 2 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop1 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop1 +endi + +if $data11 != 2 then + print ======data11=$data11 + goto loop1 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop1 +endi + +if $data21 != 2 then + print ======data21=$data21 + goto loop1 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop1 +endi + +if $data31 != 2 then + print ======data31=$data31 + goto loop1 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop1 +endi + +if $data41 != 2 then + print ======data41=$data41 + goto loop1 +endi + +if $data42 != 1 then + print ======data41=$data41 + goto loop1 +endi + +if $data51 != 2 then + print ======data51=$data51 + goto loop1 +endi + +if $data52 != 1 then + print ======data51=$data51 + goto loop1 +endi + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpPartitionBy1.sim b/tests/script/tsim/stream/streamInterpPartitionBy1.sim new file mode 100644 index 00000000000..ecb5e0ee629 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpPartitionBy1.sim @@ -0,0 +1,592 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step NULL +print =============== create database +sql create database test vgroups 1; +sql use test; +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); + +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create table t3 using st tags(2,2,2); + +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), _isfilled, tbname, b, c from st partition by tbname, b,c every(1s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791212000,0,0,0,0.0) (1648791212001,1,0,0,1.0) (1648791217001,2,0,0,2.1) t2 values(1648791212000,0,1,1,0.0) (1648791212001,1,1,1,1.0) (1648791217001,2,1,1,2.1); + +sql insert into t3 values(1648791212000,0,2,2,0.0) (1648791212001,1,2,2,1.0) (1648791217001,2,2,2,2.1); + +print sql select _irowts, interp(a), _isfilled, b from st where b = 0 partition by tbname, b, c range(1648791212000, 1648791217001) every(1s) fill(NULL) order by b, 1; +sql select _irowts, interp(a), _isfilled, b from st where b = 0 partition by tbname, b, c range(1648791212000, 1648791217001) every(1s) fill(NULL) order by b, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +print sql select _irowts, interp(a), _isfilled, b from st where b = 1 partition by tbname, b, c range(1648791212000, 1648791217001) every(1s) fill(NULL) order by b, 1; +sql select _irowts, interp(a), _isfilled, b from st where b = 1 partition by tbname, b, c range(1648791212000, 1648791217001) every(1s) fill(NULL) order by b, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +print sql select _irowts, interp(a), _isfilled, b from st where b = 2 partition by tbname, b, c range(1648791212000, 1648791217001) every(1s) fill(NULL) order by b, 1; +sql select _irowts, interp(a), _isfilled, b from st where b = 2 partition by tbname, b, c range(1648791212000, 1648791217001) every(1s) fill(NULL) order by b, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt where b = 0 and c = 0 order by 1; +sql select * from streamt where b = 0 and c = 0 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop0 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop0 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop0 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop0 +endi + +if $data21 != NULL then + print ======data21=$data21 + goto loop0 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop0 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop0 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop0 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop0 +endi + +if $data42 != 1 then + print ======data41=$data41 + goto loop0 +endi + +if $data51 != NULL then + print ======data51=$data51 + goto loop0 +endi + +if $data52 != 1 then + print ======data51=$data51 + goto loop0 +endi + +print 1 sql select * from streamt where b = 1 and c = 1 order by 1; +sql select * from streamt where b = 1 and c = 1 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop0 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop0 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop0 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop0 +endi + +if $data21 != NULL then + print ======data21=$data21 + goto loop0 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop0 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop0 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop0 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop0 +endi + +if $data42 != 1 then + print ======data41=$data41 + goto loop0 +endi + +if $data51 != NULL then + print ======data51=$data51 + goto loop0 +endi + +if $data52 != 1 then + print ======data51=$data51 + goto loop0 +endi + +print 2 sql select * from streamt where b = 2 and c = 2 order by 1; +sql select * from streamt where b = 2 and c = 2 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop0 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop0 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop0 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop0 +endi + +if $data21 != NULL then + print ======data21=$data21 + goto loop0 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop0 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop0 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop0 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop0 +endi + +if $data42 != 1 then + print ======data41=$data41 + goto loop0 +endi + +if $data51 != NULL then + print ======data51=$data51 + goto loop0 +endi + +if $data52 != 1 then + print ======data51=$data51 + goto loop0 +endi + +print step linear +print =============== create database +sql create database test2 vgroups 1; +sql use test2; +sql create stable st(ts timestamp,a int,b int,c int, d double) tags(ta int,tb int,tc int); + +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create table t3 using st tags(2,2,2); + +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _irowts, interp(a), _isfilled, tbname, b, c from st partition by tbname, b,c every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791212000,0,0,0,0.0) (1648791212001,10,0,0,1.0) (1648791217001,20,0,0,2.1) t2 values(1648791212000,0,1,1,0.0) (1648791212001,10,1,1,1.0) (1648791217001,20,1,1,2.1); + +sql insert into t3 values(1648791212000,0,2,2,0.0) (1648791212001,10,2,2,1.0) (1648791217001,20,2,2,2.1); + +print sql select _irowts, interp(a), _isfilled, b from st where b = 0 partition by tbname, b, c range(1648791212000, 1648791217001) every(1s) fill(linear) order by b, 1; +sql select _irowts, interp(a), _isfilled, b from st where b = 0 partition by tbname, b, c range(1648791212000, 1648791217001) every(1s) fill(linear) order by b, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +$loop_count = 0 +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt2 where b = 0 and c = 0 order by 1; +sql select * from streamt2 where b = 0 and c = 0 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop1 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop1 +endi + +if $data11 != 11 then + print ======data11=$data11 + goto loop1 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop1 +endi + +if $data21 != 13 then + print ======data21=$data21 + goto loop1 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop1 +endi + +if $data31 != 15 then + print ======data31=$data31 + goto loop1 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop1 +endi + +if $data41 != 17 then + print ======data41=$data41 + goto loop1 +endi + +if $data42 != 1 then + print ======data41=$data41 + goto loop1 +endi + +if $data51 != 19 then + print ======data51=$data51 + goto loop1 +endi + +if $data52 != 1 then + print ======data51=$data51 + goto loop1 +endi + +print sql select _irowts, interp(a), _isfilled, b from st where b = 1 partition by tbname, b, c range(1648791212000, 1648791217001) every(1s) fill(linear) order by b, 1; +sql select _irowts, interp(a), _isfilled, b from st where b = 1 partition by tbname, b, c range(1648791212000, 1648791217001) every(1s) fill(linear) order by b, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +print 1 sql select * from streamt2 where b = 1 and c = 1 order by 1; +sql select * from streamt2 where b = 1 and c = 1 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop1 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop1 +endi + +if $data11 != 11 then + print ======data11=$data11 + goto loop1 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop1 +endi + +if $data21 != 13 then + print ======data21=$data21 + goto loop1 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop1 +endi + +if $data31 != 15 then + print ======data31=$data31 + goto loop1 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop1 +endi + +if $data41 != 17 then + print ======data41=$data41 + goto loop1 +endi + +if $data42 != 1 then + print ======data41=$data41 + goto loop1 +endi + +if $data51 != 19 then + print ======data51=$data51 + goto loop1 +endi + +if $data52 != 1 then + print ======data51=$data51 + goto loop1 +endi + +print sql select _irowts, interp(a), _isfilled, b from st where b = 2 partition by tbname, b, c range(1648791212000, 1648791217001) every(1s) fill(linear) order by b, 1; +sql select _irowts, interp(a), _isfilled, b from st where b = 2 partition by tbname, b, c range(1648791212000, 1648791217001) every(1s) fill(linear) order by b, 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + +print 2 sql select * from streamt2 where b = 2 and c = 2 order by 1; +sql select * from streamt2 where b = 2 and c = 2 order by 1; + +print $data00 $data01 $data02 $data03 $data04 $data05 +print $data10 $data11 $data12 $data13 $data14 $data15 +print $data20 $data21 $data22 $data23 $data24 $data25 +print $data30 $data31 $data32 $data33 $data34 $data35 +print $data40 $data41 $data42 $data43 $data44 $data45 +print $data50 $data51 $data52 $data53 $data54 $data55 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop1 +endi + +if $data02 != 0 then + print ======data02=$data02 + goto loop1 +endi + +if $data11 != 11 then + print ======data11=$data11 + goto loop1 +endi + +if $data12 != 1 then + print ======data12=$data12 + goto loop1 +endi + +if $data21 != 13 then + print ======data21=$data21 + goto loop1 +endi + +if $data22 != 1 then + print ======data22=$data22 + goto loop1 +endi + +if $data31 != 15 then + print ======data31=$data31 + goto loop1 +endi + +if $data32 != 1 then + print ======data32=$data32 + goto loop1 +endi + +if $data41 != 17 then + print ======data41=$data41 + goto loop1 +endi + +if $data42 != 1 then + print ======data41=$data41 + goto loop1 +endi + +if $data51 != 19 then + print ======data51=$data51 + goto loop1 +endi + +if $data52 != 1 then + print ======data51=$data51 + goto loop1 +endi + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpPrev0.sim b/tests/script/tsim/stream/streamInterpPrev0.sim new file mode 100644 index 00000000000..86f7f95a5fb --- /dev/null +++ b/tests/script/tsim/stream/streamInterpPrev0.sim @@ -0,0 +1,434 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,1,1,1,1.0); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop0 +endi + + +sql insert into t1 values(1648791213001,2,2,2,1.1); +sql insert into t1 values(1648791213009,3,3,3,1.0); + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop1 +endi + + +sql insert into t1 values(1648791217001,4,4,4,4.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 3 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 3 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 3 then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != 3 then + print ======data41=$data41 + goto loop2 +endi + + +sql insert into t1 values(1648791215001,5,5,5,5.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 3 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop3 +endi + +if $data11 != 3 then + print ======data11=$data11 + goto loop3 +endi + +if $data21 != 3 then + print ======data21=$data21 + goto loop3 +endi + +if $data31 != 5 then + print ======data31=$data31 + goto loop3 +endi + +if $data41 != 5 then + print ======data41=$data41 + goto loop3 +endi + +print step2 + +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212000,0,0,0,0.0) (1648791213001,1,1,1,1.0) (1648791213009,2,2,2,1.1) (1648791215001,5,5,5,5.1) (1648791217001,4,4,4,4.1); + + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt2; +sql select * from streamt2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop4 +endi + +if $data11 != 0 then + print ======data11=$data11 + goto loop4 +endi + +if $data21 != 2 then + print ======data21=$data21 + goto loop4 +endi + +if $data31 != 2 then + print ======data31=$data31 + goto loop4 +endi + +if $data41 != 5 then + print ======data41=$data41 + goto loop4 +endi + +if $data51 != 5 then + print ======data51=$data51 + goto loop4 +endi + +print step3 +sql create database test3 vgroups 1; +sql use test3; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3 as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791210001,0,0,0,0.0) (1648791217001,4,4,4,4.1); + + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791210000, 1648791217000) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791210000, 1648791217000) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 + +$loop_count = 0 +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt3; +sql select * from streamt3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 + + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop5 +endi + +sql insert into t1 values(1648791213001,1,1,1,1.0) (1648791213009,2,2,2,1.1) (1648791215001,5,5,5,5.1) + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791210000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791210000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 + +$loop_count = 0 +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt3; +sql select * from streamt3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 + + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop6 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop6 +endi + +if $data11 != 0 then + print ======data11=$data11 + goto loop6 +endi + +if $data21 != 0 then + print ======data21=$data21 + goto loop6 +endi + +if $data31 != 2 then + print ======data31=$data31 + goto loop6 +endi + +if $data41 != 2 then + print ======data41=$data41 + goto loop6 +endi + +if $data51 != 5 then + print ======data51=$data51 + goto loop6 +endi + +if $data61 != 5 then + print ======data61=$data61 + goto loop6 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpPrev1.sim b/tests/script/tsim/stream/streamInterpPrev1.sim new file mode 100644 index 00000000000..0beeb3e9a76 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpPrev1.sim @@ -0,0 +1,404 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,1,1,1,1.0); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop0 +endi + + +sql insert into t1 values(1648791213009,3,3,3,1.0) (1648791217001,4,4,4,4.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 3 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 3 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 3 then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != 3 then + print ======data41=$data41 + goto loop2 +endi + + +print step2 + +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791213000,1,1,1,1.0); + + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop3 +endi + + +sql insert into t1 values(1648791213009,3,3,3,1.0) (1648791217001,4,4,4,4.1) (1648791219000,5,5,5,5.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + + +$loop_count = 0 +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop4 +endi + +if $data11 != 3 then + print ======data11=$data11 + goto loop4 +endi + +if $data21 != 3 then + print ======data21=$data21 + goto loop4 +endi + +if $data31 != 3 then + print ======data31=$data31 + goto loop4 +endi + +if $data41 != 3 then + print ======data41=$data41 + goto loop4 +endi + +if $data51 != 4 then + print ======data51=$data51 + goto loop4 +endi + +if $data61 != 5 then + print ======data61=$data61 + goto loop4 +endi + +print step3 + +sql create database test3 vgroups 1; +sql use test3; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791213001,1,1,1,1.0) (1648791219001,2,2,2,2.1) (1648791229001,3,3,3,3.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791229001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791229001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + + +$loop_count = 0 +loop5: + +sleep 300 + +print sql select * from streamt order by 1; +sql select * from streamt order by 1; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 16 then + print =====rows=$rows + goto loop5 +endi + +sql insert into t1 values(1648791215001,4,4,4,4.0) (1648791217001,5,5,5,5.1) (1648791222000,6,6,6,6.1) (1648791226000,7,7,7,7.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791229001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791229001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + + +$loop_count = 0 +loop6: + +sleep 300 + +print sql select * from streamt order by 1; +sql select * from streamt order by 1; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 16 then + goto loop6 +endi + +if $data01 != 1 then + print =====data01=$data01 + goto loop6 +endi + +if $data11 != 1 then + print =====data11=$data11 + goto loop6 +endi + +if $data21 != 4 then + print =====data21=$data21 + goto loop6 +endi + +if $data31 != 4 then + print =====data31=$data31 + goto loop6 +endi + +if $data41 != 5 then + print =====data41=$data41 + goto loop6 +endi + +if $data51 != 5 then + print =====data51=$data51 + goto loop6 +endi + +if $data61 != 2 then + print =====data61=$data61 + goto loop6 +endi + +if $data71 != 2 then + print =====data71=$data71 + goto loop6 +endi + +if $data81 != 6 then + print =====data81=$data81 + goto loop6 +endi + +if $data91 != 6 then + print =====data91=$data91 + goto loop6 +endi + +if $data[10][1] != 6 then + print =====data[10][1]=$data[10][1] + goto loop6 +endi + +if $data[11][1] != 6 then + print =====data[11][1]=$data[11][1] + goto loop6 +endi + +if $data[12][1] != 7 then + print =====data[12][1]=$data[12][1] + goto loop6 +endi + +if $data[13][1] != 7 then + print =====data[13][1]=$data[13][1] + goto loop6 +endi + +if $data[14][1] != 7 then + print =====data[14][1]=$data[14][1] + goto loop6 +endi + +if $data[15][1] != 7 then + print =====data[15][1]=$data[15][1] + goto loop6 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpPrimaryKey0.sim b/tests/script/tsim/stream/streamInterpPrimaryKey0.sim new file mode 100644 index 00000000000..9edddff6dbf --- /dev/null +++ b/tests/script/tsim/stream/streamInterpPrimaryKey0.sim @@ -0,0 +1,452 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create stable st(ts timestamp,a int primary key,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(b) from st partition by tbname every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,9,9,9,9.0); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop0 +endi + + +sql insert into t1 values(1648791213000,10,10,10,10.0); +sql insert into t1 values(1648791213009,30,30,30,30.0); + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop1 +endi + + +sql insert into t1 values(1648791213009,20,20,20,20.0) (1648791217001,4,4,4,4.1); + +print sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 20 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 20 then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != 20 then + print ======data41=$data41 + goto loop2 +endi + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create stable st(ts timestamp,a int ,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt(ts, b primary key) as select _irowts, interp(b) from st partition by tbname every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,9,9,9,9.0); + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop3 +endi + +sql insert into t1 values(1648791213000,10,10,10,10.0); + +sql insert into t1 values(1648791213009,30,30,30,30.0); + +$loop_count = 0 + +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 10 then + print ======data01=$data01 + goto loop4 +endi + +sql insert into t1 values(1648791217001,4,4,10,4.1); + +sleep 2000 + +sql insert into t1 values(1648791213009,20,20,10,20.0); + +print sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop5 +endi + +# row 0 +if $data01 != 10 then + print ======data01=$data01 + goto loop5 +endi + +if $data11 != 20 then + print ======data11=$data11 + goto loop5 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop5 +endi + +if $data31 != 20 then + print ======data31=$data31 + goto loop5 +endi + +if $data41 != 20 then + print ======data41=$data41 + goto loop5 +endi + +print step3 +print =============== create database +sql create database test3 vgroups 1; +sql use test3; + +sql create stable st(ts timestamp,a int primary key, b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt(ts, b primary key) as select _irowts, interp(b) from st partition by tbname, c every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,9,9,10,9.0); + +$loop_count = 0 + +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop6 +endi + + +sql insert into t1 values(1648791213000,10,10,10,10.0); +sql insert into t1 values(1648791213009,30,30,10,30.0); + +$loop_count = 0 + +loop7: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop7 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop7 +endi + + +sql insert into t1 values(1648791217001,4,4,10,4.1); + +sleep 2000 + +sql insert into t1 values(1648791213009,20,20,10,20.0); + +print sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop8: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop8 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop8 +endi + +if $data11 != 20 then + print ======data11=$data11 + goto loop8 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop8 +endi + +if $data31 != 20 then + print ======data31=$data31 + goto loop8 +endi + +if $data41 != 20 then + print ======data41=$data41 + goto loop8 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpPrimaryKey1.sim b/tests/script/tsim/stream/streamInterpPrimaryKey1.sim new file mode 100644 index 00000000000..04a1f299be3 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpPrimaryKey1.sim @@ -0,0 +1,458 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create stable st(ts timestamp,a int primary key,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(b) from st partition by tbname every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,10,10,10,10.0); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop0 +endi + + +sql insert into t1 values(1648791213000,9,9,9,9.0); +sql insert into t1 values(1648791213009,30,30,30,30.0); + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop1 +endi + + +sql insert into t1 values(1648791213009,20,20,20,20.0) (1648791217001,40,40,40,40.1); + +sleep 2000 + +sql insert into t1 values(1648791217001,4,4,4,4.1); + +print sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 4 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 4 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop2 +endi + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create stable st(ts timestamp,a int ,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt(ts, b primary key) as select _irowts, interp(b) from st partition by tbname every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,10,10,10,10.0); + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop3 +endi + + +sql insert into t1 values(1648791213000,9,9,9,9.0); +sql insert into t1 values(1648791213009,30,30,30,30.0); + +$loop_count = 0 + +loop4: + +sleep 500 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop4 +endi + + +sql insert into t1 values(1648791213009,20,20,20,20.0) (1648791217001,40,40,40,40.1); + +sleep 2000 + +sql insert into t1 values(1648791217001,4,4,4,4.1); + +print sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop5 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop5 +endi + +if $data11 != 4 then + print ======data11=$data11 + goto loop5 +endi + +if $data21 != 4 then + print ======data21=$data21 + goto loop5 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop5 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop5 +endi + +print step3 +print =============== create database +sql create database test3 vgroups 1; +sql use test3; + +sql create stable st(ts timestamp,a int primary key, b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt(ts, b primary key) as select _irowts, interp(b) from st partition by tbname, c every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791213000,10,10,10,10.0); + +$loop_count = 0 + +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop6 +endi + + +sql insert into t1 values(1648791213000,9,9,10,9.0); +sql insert into t1 values(1648791213009,30,30,10,30.0); + +$loop_count = 0 + +loop7: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop7 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop7 +endi + + +sql insert into t1 values(1648791213009,20,20,10,20.0) (1648791217001,40,40,10,40.1); + +sleep 1000 + +sql insert into t1 values(1648791217001,4,4,10,4.1); + +print sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 + + +$loop_count = 0 +loop8: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop8 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop8 +endi + +if $data11 != 4 then + print ======data11=$data11 + goto loop8 +endi + +if $data21 != 4 then + print ======data21=$data21 + goto loop8 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop8 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop8 +endi + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpPrimaryKey2.sim b/tests/script/tsim/stream/streamInterpPrimaryKey2.sim new file mode 100644 index 00000000000..f06e1ecd034 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpPrimaryKey2.sim @@ -0,0 +1,452 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create stable st(ts timestamp,a int primary key,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(b) from st partition by tbname every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,9,9,9,9.0); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop0 +endi + + +sql insert into t1 values(1648791213000,10,10,10,10.0); +sql insert into t1 values(1648791213009,30,30,30,30.0); + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop1 +endi + + +sql insert into t1 values(1648791213009,20,20,20,20.0) (1648791217001,4,4,4,4.1); + +print sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 16 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 12 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 8 then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop2 +endi + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create stable st(ts timestamp,a int ,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt(ts, b primary key) as select _irowts, interp(b) from st partition by tbname every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,9,9,9,9.0); + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop3 +endi + +sql insert into t1 values(1648791213000,10,10,10,10.0); + +sql insert into t1 values(1648791213009,30,30,30,30.0); + +$loop_count = 0 + +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 10 then + print ======data01=$data01 + goto loop4 +endi + +sql insert into t1 values(1648791217001,4,4,10,4.1); + +sleep 2000 + +sql insert into t1 values(1648791213009,20,20,10,20.0); + +print sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop5 +endi + +# row 0 +if $data01 != 10 then + print ======data01=$data01 + goto loop5 +endi + +if $data11 != 16 then + print ======data11=$data11 + goto loop5 +endi + +if $data21 != 12 then + print ======data21=$data21 + goto loop5 +endi + +if $data31 != 8 then + print ======data31=$data31 + goto loop5 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop5 +endi + +print step3 +print =============== create database +sql create database test3 vgroups 1; +sql use test3; + +sql create stable st(ts timestamp,a int primary key, b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt(ts, b primary key) as select _irowts, interp(b) from st partition by tbname, c every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,9,9,10,9.0); + +$loop_count = 0 + +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop6 +endi + + +sql insert into t1 values(1648791213000,10,10,10,10.0); +sql insert into t1 values(1648791213009,30,30,10,30.0); + +$loop_count = 0 + +loop7: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop7 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop7 +endi + + +sql insert into t1 values(1648791217001,4,4,10,4.1); + +sleep 2000 + +sql insert into t1 values(1648791213009,20,20,10,20.0); + +print sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop8: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop8 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop8 +endi + +if $data11 != 16 then + print ======data11=$data11 + goto loop8 +endi + +if $data21 != 12 then + print ======data21=$data21 + goto loop8 +endi + +if $data31 != 8 then + print ======data31=$data31 + goto loop8 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop8 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpPrimaryKey3.sim b/tests/script/tsim/stream/streamInterpPrimaryKey3.sim new file mode 100644 index 00000000000..725cf8d8503 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpPrimaryKey3.sim @@ -0,0 +1,452 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create stable st(ts timestamp,a int primary key,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(b) from st partition by tbname every(1s) fill(value,100); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,9,9,9,9.0); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop0 +endi + + +sql insert into t1 values(1648791213000,10,10,10,10.0); +sql insert into t1 values(1648791213009,30,30,30,30.0); + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop1 +endi + + +sql insert into t1 values(1648791213009,20,20,20,20.0) (1648791217001,4,4,4,4.1); + +print sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100); +sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 100 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 100 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 100 then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != 100 then + print ======data41=$data41 + goto loop2 +endi + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create stable st(ts timestamp,a int ,b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt(ts, b primary key) as select _irowts, interp(b) from st partition by tbname every(1s) fill(value,100); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,9,9,9,9.0); + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop3 +endi + +sql insert into t1 values(1648791213000,10,10,10,10.0); + +sql insert into t1 values(1648791213009,30,30,30,30.0); + +$loop_count = 0 + +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 10 then + print ======data01=$data01 + goto loop4 +endi + +sql insert into t1 values(1648791217001,4,4,10,4.1); + +sleep 2000 + +sql insert into t1 values(1648791213009,20,20,10,20.0); + +print sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100); +sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop5 +endi + +# row 0 +if $data01 != 10 then + print ======data01=$data01 + goto loop5 +endi + +if $data11 != 100 then + print ======data11=$data11 + goto loop5 +endi + +if $data21 != 100 then + print ======data21=$data21 + goto loop5 +endi + +if $data31 != 100 then + print ======data31=$data31 + goto loop5 +endi + +if $data41 != 100 then + print ======data41=$data41 + goto loop5 +endi + +print step3 +print =============== create database +sql create database test3 vgroups 1; +sql use test3; + +sql create stable st(ts timestamp,a int primary key, b int,c int, d double) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt(ts, b primary key) as select _irowts, interp(b) from st partition by tbname, c every(1s) fill(value,100); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,9,9,10,9.0); + +$loop_count = 0 + +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop6 +endi + + +sql insert into t1 values(1648791213000,10,10,10,10.0); +sql insert into t1 values(1648791213009,30,30,10,30.0); + +$loop_count = 0 + +loop7: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop7 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop7 +endi + + +sql insert into t1 values(1648791217001,4,4,10,4.1); + +sleep 2000 + +sql insert into t1 values(1648791213009,20,20,10,20.0); + +print sql select _irowts,interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100); +sql select _irowts, interp(b) from t1 range(1648791212000, 1648791217001) every(1s) fill(value,100); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop8: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop8 +endi + +# row 0 +if $data01 != 9 then + print ======data01=$data01 + goto loop8 +endi + +if $data11 != 100 then + print ======data11=$data11 + goto loop8 +endi + +if $data21 != 100 then + print ======data21=$data21 + goto loop8 +endi + +if $data31 != 100 then + print ======data31=$data31 + goto loop8 +endi + +if $data41 != 100 then + print ======data41=$data41 + goto loop8 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpScalar.sim b/tests/script/tsim/stream/streamInterpScalar.sim new file mode 100644 index 00000000000..e4e280138b4 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpScalar.sim @@ -0,0 +1,417 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, now, floor(interp(d)), interp(floor(d)), a from t1 partition by tbname, a every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791217001,1,1,1,1.1); + +sleep 2000 + +sql insert into t1 values(1648791212009,1,3,3,3.3) (1648791214001,1,4,4,4.4) (1648791219001,1,5,5,5.5) (1648791220001,1,6,6,6.6); + +print sql select _irowts, now, floor(interp(d)), interp(floor(d)), a from t1 partition by tbname, a range(1648791213000, 1648791220001) every(1s) fill(prev); +sql select _irowts, now, floor(interp(d)), interp(floor(d)), a from t1 partition by tbname, a range(1648791213000, 1648791220001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt order by 1; +sql select * from streamt order by 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 8 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data02 != 3.000000000 then + print ======data02=$data02 + goto loop0 +endi + +if $data03 != 3.000000000 then + print ======data03=$data03 + goto loop0 +endi + +if $data12 != 3.000000000 then + print ======data12=$data12 + goto loop0 +endi + +if $data13 != 3.000000000 then + print ======data13=$data13 + goto loop0 +endi + +if $data22 != 4.000000000 then + print ======data22=$data22 + goto loop0 +endi + +if $data23 != 4.000000000 then + print ======data23=$data23 + goto loop0 +endi + +if $data32 != 4.000000000 then + print ======data32=$data32 + goto loop0 +endi + +if $data33 != 4.000000000 then + print ======data33=$data33 + goto loop0 +endi + +if $data42 != 4.000000000 then + print ======data42=$data42 + goto loop0 +endi + +if $data43 != 4.000000000 then + print ======data43=$data43 + goto loop0 +endi + +if $data52 != 1.000000000 then + print ======data52=$data52 + goto loop0 +endi + +if $data53 != 1.000000000 then + print ======data53=$data53 + goto loop0 +endi + +if $data62 != 1.000000000 then + print ======data62=$data62 + goto loop0 +endi + +if $data63 != 1.000000000 then + print ======data63=$data63 + goto loop0 +endi + +if $data72 != 5.000000000 then + print ======data72=$data72 + goto loop0 +endi + +if $data73 != 5.000000000 then + print ======data73=$data73 + goto loop0 +endi + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, now, floor(interp(d)), interp(floor(d)), a from t1 partition by tbname, a every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791217001,1,1,1,1.1); + +sleep 2000 + +sql insert into t1 values(1648791212009,1,3,3,3.3) (1648791214001,1,4,4,4.4) (1648791219001,1,5,5,5.5) (1648791220001,1,6,6,6.6); + +print sql select _irowts, now, floor(interp(d)), interp(floor(d)), a from t1 partition by tbname, a range(1648791213000, 1648791220001) every(1s) fill(next); +sql select _irowts, now, floor(interp(d)), interp(floor(d)), a from t1 partition by tbname, a range(1648791213000, 1648791220001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt order by 1; +sql select * from streamt order by 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 8 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data02 != 4.000000000 then + print ======data02=$data02 + goto loop1 +endi + +if $data03 != 4.000000000 then + print ======data03=$data03 + goto loop1 +endi + +if $data12 != 4.000000000 then + print ======data12=$data12 + goto loop1 +endi + +if $data13 != 4.000000000 then + print ======data13=$data13 + goto loop1 +endi + +if $data22 != 1.000000000 then + print ======data22=$data22 + goto loop1 +endi + +if $data23 != 1.000000000 then + print ======data23=$data23 + goto loop1 +endi + +if $data32 != 1.000000000 then + print ======data32=$data32 + goto loop1 +endi + +if $data33 != 1.000000000 then + print ======data33=$data33 + goto loop1 +endi + +if $data42 != 1.000000000 then + print ======data42=$data42 + goto loop1 +endi + +if $data43 != 1.000000000 then + print ======data43=$data43 + goto loop1 +endi + +if $data52 != 5.000000000 then + print ======data52=$data52 + goto loop1 +endi + +if $data53 != 5.000000000 then + print ======data53=$data53 + goto loop1 +endi + +if $data62 != 5.000000000 then + print ======data62=$data62 + goto loop1 +endi + +if $data63 != 5.000000000 then + print ======data63=$data63 + goto loop1 +endi + +if $data72 != 6.000000000 then + print ======data72=$data72 + goto loop1 +endi + +if $data73 != 6.000000000 then + print ======data73=$data73 + goto loop1 +endi + +print step3 +print =============== create database +sql create database test3 vgroups 1; +sql use test3; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, now, floor(interp(d)), interp(floor(d)), a from t1 partition by tbname, a every(1s) fill(value, 100, 200); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791217001,1,1,1,1.1); + +sleep 2000 + +sql insert into t1 values(1648791212009,1,3,3,3.3) (1648791214001,1,4,4,4.4) (1648791219001,1,5,5,5.5) (1648791220001,1,6,6,6.6); + +print sql select _irowts, now, floor(interp(d)), interp(floor(d)), a from t1 partition by tbname, a range(1648791213000, 1648791220001) every(1s) fill(value, 100, 200); +sql select _irowts, now, floor(interp(d)), interp(floor(d)), a from t1 partition by tbname, a range(1648791213000, 1648791220001) every(1s) fill(value, 100, 200); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt order by 1; +sql select * from streamt order by 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 8 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data02 != 100.000000000 then + print ======data02=$data02 + goto loop2 +endi + +if $data03 != 200.000000000 then + print ======data03=$data03 + goto loop2 +endi + +if $data12 != 100.000000000 then + print ======data12=$data12 + goto loop2 +endi + +if $data13 != 200.000000000 then + print ======data13=$data13 + goto loop2 +endi + +if $data22 != 100.000000000 then + print ======data22=$data22 + goto loop2 +endi + +if $data23 != 200.000000000 then + print ======data23=$data23 + goto loop2 +endi + +if $data32 != 100.000000000 then + print ======data32=$data32 + goto loop2 +endi + +if $data33 != 200.000000000 then + print ======data33=$data33 + goto loop2 +endi + +if $data42 != 100.000000000 then + print ======data42=$data42 + goto loop2 +endi + +if $data43 != 200.000000000 then + print ======data43=$data43 + goto loop2 +endi + +if $data52 != 100.000000000 then + print ======data52=$data52 + goto loop2 +endi + +if $data53 != 200.000000000 then + print ======data53=$data53 + goto loop2 +endi + +if $data62 != 100.000000000 then + print ======data62=$data62 + goto loop2 +endi + +if $data63 != 200.000000000 then + print ======data63=$data63 + goto loop2 +endi + +if $data72 != 100.000000000 then + print ======data72=$data72 + goto loop2 +endi + +if $data73 != 200.000000000 then + print ======data73=$data73 + goto loop2 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpUpdate.sim b/tests/script/tsim/stream/streamInterpUpdate.sim new file mode 100644 index 00000000000..59a188c2a6c --- /dev/null +++ b/tests/script/tsim/stream/streamInterpUpdate.sim @@ -0,0 +1,551 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212001,1,1,1,1.0) (1648791215000,10,1,1,1.0) (1648791217001,4,1,1,1.0) + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop0 +endi + +if $data11 != 1 then + print ======data11=$data11 + goto loop0 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop0 +endi + +if $data31 != 10 then + print ======data31=$data31 + goto loop0 +endi + +if $data41 != 10 then + print ======data41=$data41 + goto loop0 +endi + +sql insert into t1 values(1648791212001,2,2,2,2.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 2 then + print ======data01=$data01 + goto loop1 +endi + +if $data11 != 2 then + print ======data11=$data11 + goto loop1 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop1 +endi + +if $data31 != 10 then + print ======data31=$data31 + goto loop1 +endi + +if $data41 != 10 then + print ======data41=$data41 + goto loop1 +endi + + +sql insert into t1 values(1648791215000,20,20,20,20.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 2 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 2 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 20 then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != 20 then + print ======data41=$data41 + goto loop2 +endi + +sql insert into t1 values(1648791217001,8,8,8,8.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 2 then + print ======data01=$data01 + goto loop3 +endi + +if $data11 != 2 then + print ======data11=$data11 + goto loop3 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop3 +endi + +if $data31 != 20 then + print ======data31=$data31 + goto loop3 +endi + +if $data41 != 20 then + print ======data41=$data41 + goto loop3 +endi + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(next); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212001,1,1,1,1.0) (1648791215000,10,1,1,1.0) (1648791217001,4,1,1,1.0) + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 10 then + print ======data01=$data01 + goto loop4 +endi + +if $data11 != 10 then + print ======data11=$data11 + goto loop4 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop4 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop4 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop4 +endi + +sql insert into t1 values(1648791212001,2,2,2,2.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop5 +endi + +# row 0 +if $data01 != 10 then + print ======data01=$data01 + goto loop5 +endi + +if $data11 != 10 then + print ======data11=$data11 + goto loop5 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop5 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop5 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop5 +endi + + +sql insert into t1 values(1648791215000,20,20,20,20.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop6 +endi + +# row 0 +if $data01 != 20 then + print ======data01=$data01 + goto loop6 +endi + +if $data11 != 20 then + print ======data11=$data11 + goto loop6 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop6 +endi + +if $data31 != 4 then + print ======data31=$data31 + goto loop6 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop6 +endi + +sql insert into t1 values(1648791217001,8,8,8,8.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(next); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop7: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop7 +endi + +# row 0 +if $data01 != 20 then + print ======data01=$data01 + goto loop7 +endi + +if $data11 != 20 then + print ======data11=$data11 + goto loop7 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop7 +endi + +if $data31 != 8 then + print ======data31=$data31 + goto loop7 +endi + +if $data41 != 8 then + print ======data41=$data41 + goto loop7 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpUpdate1.sim b/tests/script/tsim/stream/streamInterpUpdate1.sim new file mode 100644 index 00000000000..45f16af35de --- /dev/null +++ b/tests/script/tsim/stream/streamInterpUpdate1.sim @@ -0,0 +1,551 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212001,1,1,1,1.0) (1648791215000,10,1,1,1.0) (1648791217001,4,1,1,1.0) + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != NULL then + print ======data01=$data01 + goto loop0 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop0 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop0 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop0 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop0 +endi + +sql insert into t1 values(1648791212001,2,2,2,2.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != NULL then + print ======data01=$data01 + goto loop1 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop1 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop1 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop1 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop1 +endi + + +sql insert into t1 values(1648791215000,20,20,20,20.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != NULL then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop2 +endi + +sql insert into t1 values(1648791217001,8,8,8,8.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(prev); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != NULL then + print ======data01=$data01 + goto loop3 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop3 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop3 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop3 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop3 +endi + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(value, 100, 200, 300, 400); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212001,1,1,1,1.0) (1648791215000,10,1,1,1.0) (1648791217001,4,1,1,1.0) + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value, 100, 200, 300, 400); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value, 100, 200, 300, 400); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 100 then + print ======data01=$data01 + goto loop4 +endi + +if $data11 != 100 then + print ======data11=$data11 + goto loop4 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop4 +endi + +if $data31 != 100 then + print ======data31=$data31 + goto loop4 +endi + +if $data41 != 100 then + print ======data41=$data41 + goto loop4 +endi + +sql insert into t1 values(1648791212001,2,2,2,2.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value, 100, 200, 300, 400); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value, 100, 200, 300, 400); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop5 +endi + +# row 0 +if $data01 != 100 then + print ======data01=$data01 + goto loop5 +endi + +if $data11 != 100 then + print ======data11=$data11 + goto loop5 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop5 +endi + +if $data31 != 100 then + print ======data31=$data31 + goto loop5 +endi + +if $data41 != 100 then + print ======data41=$data41 + goto loop5 +endi + + +sql insert into t1 values(1648791215000,20,20,20,20.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value, 100, 200, 300, 400); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value, 100, 200, 300, 400); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop6 +endi + +# row 0 +if $data01 != 100 then + print ======data01=$data01 + goto loop6 +endi + +if $data11 != 100 then + print ======data11=$data11 + goto loop6 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop6 +endi + +if $data31 != 100 then + print ======data31=$data31 + goto loop6 +endi + +if $data41 != 100 then + print ======data41=$data41 + goto loop6 +endi + +sql insert into t1 values(1648791217001,8,8,8,8.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value, 100, 200, 300, 400); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value, 100, 200, 300, 400); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop7: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop7 +endi + +# row 0 +if $data01 != 100 then + print ======data01=$data01 + goto loop7 +endi + +if $data11 != 100 then + print ======data11=$data11 + goto loop7 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop7 +endi + +if $data31 != 100 then + print ======data31=$data31 + goto loop7 +endi + +if $data41 != 100 then + print ======data41=$data41 + goto loop7 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpUpdate2.sim b/tests/script/tsim/stream/streamInterpUpdate2.sim new file mode 100644 index 00000000000..2a71474dd78 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpUpdate2.sim @@ -0,0 +1,279 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(linear); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212001,1,1,1,1.0) (1648791215000,10,1,1,1.0) (1648791217001,4,1,1,1.0) + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 3 then + print ======data01=$data01 + goto loop0 +endi + +if $data11 != 6 then + print ======data11=$data11 + goto loop0 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop0 +endi + +if $data31 != 7 then + print ======data31=$data31 + goto loop0 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop0 +endi + +sql insert into t1 values(1648791212001,2,2,2,2.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 4 then + print ======data01=$data01 + goto loop1 +endi + +if $data11 != 7 then + print ======data11=$data11 + goto loop1 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop1 +endi + +if $data31 != 7 then + print ======data31=$data31 + goto loop1 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop1 +endi + + +sql insert into t1 values(1648791215000,20,20,20,20.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 7 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != 13 then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != 12 then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != 4 then + print ======data41=$data41 + goto loop2 +endi + +sql insert into t1 values(1648791217001,8,8,8,8.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(linear); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 7 then + print ======data01=$data01 + goto loop3 +endi + +if $data11 != 13 then + print ======data11=$data11 + goto loop3 +endi + +if $data21 != 20 then + print ======data21=$data21 + goto loop3 +endi + +if $data31 != 14 then + print ======data31=$data31 + goto loop3 +endi + +if $data41 != 8 then + print ======data41=$data41 + goto loop3 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpValue0.sim b/tests/script/tsim/stream/streamInterpValue0.sim new file mode 100644 index 00000000000..bce7f0ece66 --- /dev/null +++ b/tests/script/tsim/stream/streamInterpValue0.sim @@ -0,0 +1,754 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(NULL); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(value, 10,20,30,40); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,1,1,1,1.0); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop0 +endi + + +sql insert into t1 values(1648791213001,2,2,2,1.1); +sql insert into t1 values(1648791213009,3,3,3,1.0); + +$loop_count = 0 + +loop1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 +print $data10 $data11 $data12 $data13 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop1 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop1 +endi + + +sql insert into t1 values(1648791217001,4,4,4,4.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != NULL then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop2 +endi + + + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(value, 10,20,30,40); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(value, 10,20,30,40); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt2; +sql select * from streamt2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2_1 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop2_1 +endi + +if $data11 != 10 then + print ======data11=$data11 + goto loop2_1 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop2_1 +endi + +if $data31 != 10 then + print ======data31=$data31 + goto loop2_1 +endi + +if $data41 != 10 then + print ======data41=$data41 + goto loop2_1 +endi + +sql insert into t1 values(1648791215001,5,5,5,5.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 3 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop3 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop3 +endi + +if $data21 != NULL then + print ======data21=$data21 + goto loop3 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop3 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop3 +endi + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(value, 10,20,30,40); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(value, 10,20,30,40); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop3_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 3 sql select * from streamt2; +sql select * from streamt2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop3_1 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop3_1 +endi + +if $data11 != 10 then + print ======data11=$data11 + goto loop3_1 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop3_1 +endi + +if $data31 != 10 then + print ======data31=$data31 + goto loop3_1 +endi + +if $data41 != 10 then + print ======data41=$data41 + goto loop3_1 +endi + +if $data12 != 20 then + print ======data12=$data12 + goto loop3_1 +endi + +if $data13 != 30 then + print ======data13=$data13 + goto loop3_1 +endi + +if $data14 != 40.000000000 then + print ======data14=$data14 + goto loop3_1 +endi + + + +print step2 + +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3 as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(NULL); +sql create stream streams2_2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4 as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(value, 10,20,30,40); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791212000,0,0,0,0.0) (1648791213001,1,1,1,1.0) (1648791213009,2,2,2,1.1) (1648791215001,5,5,5,5.1) (1648791217001,4,4,4,4.1); + + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt3; +sql select * from streamt3; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop4 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop4 +endi + +if $data21 != NULL then + print ======data21=$data21 + goto loop4 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop4 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop4 +endi + +if $data51 != NULL then + print ======data51=$data51 + goto loop4 +endi + + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value, 10,20,30,40); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791212000, 1648791217001) every(1s) fill(value, 10,20,30,40); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop4_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt4; +sql select * from streamt4; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 6 then + print ======rows=$rows + goto loop4_1 +endi + +# row 0 +if $data01 != 0 then + print ======data01=$data01 + goto loop4_1 +endi + +if $data11 != 10 then + print ======data11=$data11 + goto loop4_1 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop4_1 +endi + +if $data31 != 10 then + print ======data31=$data31 + goto loop4_1 +endi + +if $data41 != 10 then + print ======data41=$data41 + goto loop4_1 +endi + +if $data51 != 10 then + print ======data51=$data51 + goto loop4_1 +endi + + + +print step3 +sql create database test3 vgroups 1; +sql use test3; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams3_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3_1 as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(NULL); +sql create stream streams3_2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3_2 as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(value, 10,20,30,40); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791210001,0,0,0,0.0) (1648791217001,4,4,4,4.1); + + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791211000, 1648791217000) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791211000, 1648791217000) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop5: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt3_1; +sql select * from streamt3_1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop5 +endi + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791211000, 1648791217000) every(1s) fill(value, 10,20,30,40); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791211000, 1648791217000) every(1s) fill(value, 10,20,30,40); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop5_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 1 sql select * from streamt3_2; +sql select * from streamt3_2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop5_1 +endi + +sql insert into t1 values(1648791213001,1,1,1,1.0) (1648791213009,2,2,2,1.1) (1648791215001,5,5,5,5.1) + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791211000, 1648791217001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791211000, 1648791217001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop6: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt3_1; +sql select * from streamt3_1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop6 +endi + +# row 0 +if $data01 != NULL then + print ======data01=$data01 + goto loop6 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop6 +endi + +if $data21 != NULL then + print ======data21=$data21 + goto loop6 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop6 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop6 +endi + +if $data51 != NULL then + print ======data51=$data51 + goto loop6 +endi + +if $data61 != NULL then + print ======data61=$data61 + goto loop6 +endi + + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791211000, 1648791217001) every(1s) fill(value, 10,20,30,40); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791211000, 1648791217001) every(1s) fill(value, 10,20,30,40); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop6_1: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt3_2; +sql select * from streamt3_2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop6_1 +endi + +# row 0 +if $data01 != 10 then + print ======data01=$data01 + goto loop6_1 +endi + +if $data11 != 10 then + print ======data11=$data11 + goto loop6_1 +endi + +if $data21 != 10 then + print ======data21=$data21 + goto loop6_1 +endi + +if $data31 != 10 then + print ======data31=$data31 + goto loop6_1 +endi + +if $data41 != 10 then + print ======data41=$data41 + goto loop6_1 +endi + +if $data51 != 10 then + print ======data51=$data51 + goto loop6_1 +endi + +if $data61 != 10 then + print ======data61=$data61 + goto loop6_1 +endi + +print end +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpValue1.sim b/tests/script/tsim/stream/streamInterpValue1.sim new file mode 100644 index 00000000000..84a0e28300c --- /dev/null +++ b/tests/script/tsim/stream/streamInterpValue1.sim @@ -0,0 +1,477 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + + +sql insert into t1 values(1648791213000,1,1,1,1.0); + +$loop_count = 0 + +loop0: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop0 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop0 +endi + + +sql insert into t1 values(1648791213009,3,3,3,1.0) (1648791217001,4,4,4,4.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791217001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows != 5 then + print ======rows=$rows + goto loop2 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop2 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop2 +endi + +if $data21 != NULL then + print ======data21=$data21 + goto loop2 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop2 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop2 +endi + + +print step2 + +sql create database test2 vgroups 1; +sql use test2; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791213000,1,1,1,1.0); + + +$loop_count = 0 + +loop3: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 0 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows != 1 then + print ======rows=$rows + goto loop3 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop3 +endi + + +sql insert into t1 values(1648791213009,3,3,3,1.0) (1648791217001,4,4,4,4.1) (1648791219000,5,5,5,5.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791219000) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791219000) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + + +$loop_count = 0 +loop4: + +sleep 300 + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +print 2 sql select * from streamt; +sql select * from streamt; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + +# row 0 +if $rows != 7 then + print ======rows=$rows + goto loop4 +endi + +# row 0 +if $data01 != 1 then + print ======data01=$data01 + goto loop4 +endi + +if $data11 != NULL then + print ======data11=$data11 + goto loop4 +endi + +if $data21 != NULL then + print ======data21=$data21 + goto loop4 +endi + +if $data31 != NULL then + print ======data31=$data31 + goto loop4 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop4 +endi + +if $data51 != NULL then + print ======data51=$data51 + goto loop4 +endi + +if $data61 != 5 then + print ======data61=$data61 + goto loop4 +endi + +print step3 + +sql create database test3 vgroups 1; +sql use test3; + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams3 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 every(1s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791213001,1,1,1,1.0) (1648791219001,2,2,2,2.1) (1648791229001,3,3,3,3.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791229001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791229001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + + +$loop_count = 0 +loop5: + +sleep 300 + +print sql select * from streamt order by 1; +sql select * from streamt order by 1; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 16 then + print =====rows=$rows + goto loop5 +endi + +sql insert into t1 values(1648791215001,4,4,4,4.0) (1648791217001,5,5,5,5.1) (1648791222000,6,6,6,6.1) (1648791226000,7,7,7,7.1); + +print sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791229001) every(1s) fill(NULL); +sql select _irowts, interp(a), interp(b), interp(c), interp(d) from t1 range(1648791213000, 1648791229001) every(1s) fill(NULL); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 +print $data60 $data61 $data62 $data63 $data64 +print $data70 $data71 $data72 $data73 $data74 + + +$loop_count = 0 +loop6: + +sleep 300 + +print sql select * from streamt order by 1; +sql select * from streamt order by 1; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 16 then + goto loop6 +endi + +if $data01 != NULL then + print =====data01=$data01 + goto loop6 +endi + +if $data11 != NULL then + print =====data11=$data11 + goto loop6 +endi + +if $data21 != NULL then + print =====data21=$data21 + goto loop6 +endi + +if $data31 != NULL then + print =====data31=$data31 + goto loop6 +endi + +if $data41 != NULL then + print =====data41=$data41 + goto loop6 +endi + +if $data51 != NULL then + print =====data51=$data51 + goto loop6 +endi + +if $data61 != NULL then + print =====data61=$data61 + goto loop6 +endi + +if $data71 != NULL then + print =====data71=$data71 + goto loop6 +endi + +if $data81 != 6 then + print =====data81=$data81 + goto loop6 +endi + +if $data91 != NULL then + print =====data91=$data91 + goto loop6 +endi + +if $data[10][1] != NULL then + print =====data[10][1]=$data[10][1] + goto loop6 +endi + +if $data[11][1] != NULL then + print =====data[11][1]=$data[11][1] + goto loop6 +endi + +if $data[12][1] != 7 then + print =====data[12][1]=$data[12][1] + goto loop6 +endi + +if $data[13][1] != NULL then + print =====data[13][1]=$data[13][1] + goto loop6 +endi + +if $data[14][1] != NULL then + print =====data[14][1]=$data[14][1] + goto loop6 +endi + +if $data[15][1] != NULL then + print =====data[15][1]=$data[15][1] + goto loop6 +endi + + +print step4 + +sql create database test4 vgroups 1; +sql use test4; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams4 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4 as select _irowts, interp(a) as b, _isfilled as a from st partition by tbname, b as cc every(1s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(1648791217000,20000,2,3); + +sleep 2000 + +sql insert into t1 values(1648791212000,10000,2,3) (1648791215001,20,2,3); + +$loop_count = 0 +loop7: + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +sleep 300 + +print sql select a,b from streamt4; +sql select a,b from streamt4; + +if $rows != 6 then + print ======rows=$rows + goto loop7 +endi + +if $data00 != 0 then + print ======data00=$data00 + goto loop7 +endi + +if $data01 != 10000 then + print ======data01=$data01 + goto loop7 +endi + +if $data10 != 1 then + print ======data10=$data10 + goto loop7 +endi + +if $data20 != 1 then + print ======data20=$data20 + goto loop7 +endi + +if $data41 != NULL then + print ======data41=$data41 + goto loop7 +endi + +if $data50 != 0 then + print ======data50=$data50 + goto loop7 +endi + +if $data51 != 20000 then + print ======data51=$data51 + goto loop7 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamTwaError.sim b/tests/script/tsim/stream/streamTwaError.sim new file mode 100644 index 00000000000..cda5fa9c4bf --- /dev/null +++ b/tests/script/tsim/stream/streamTwaError.sim @@ -0,0 +1,36 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create stable st(ts timestamp, a int, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _wstart, twa(a) from st partition by tbname,ta interval(2s) fill(prev); + +sql_error create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _wstart, twa(a) from st partition by tbname,ta interval(2s) fill(prev); +sql_error create stream streams3 trigger window_close IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt3 as select _wstart, twa(a) from st partition by tbname,ta interval(2s) fill(prev); +sql_error create stream streams4 trigger max_delay 5s IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt4 as select _wstart, twa(a) from st partition by tbname,ta interval(2s) fill(prev); + +sql_error create stream streams5 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt5 as select _wstart, twa(a) from st interval(2s) fill(prev); +sql_error create stream streams6 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt6 as select last(ts), twa(a) from st partition by tbname,ta; +sql_error create stream streams7 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt7 as select _wstart, twa(a) from st partition by tbname,ta session(ts, 2s); +sql_error create stream streams8 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt8 as select _wstart, twa(a) from st partition by tbname,ta state_window(a); + +sql_error create stream streams9 trigger at_once IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt9 as select _wstart, elapsed(ts) from st partition by tbname,ta interval(2s) fill(prev); + +sql_error create stream streams10 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt10 as select _wstart, sum(a) from st partition by tbname,ta interval(2s) SLIDING(1s); +sql create stream streams11 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt11 as select _wstart, avg(a) from st partition by tbname,ta interval(2s) SLIDING(2s); + +sql_error create stream streams10 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt10 as select _wstart, sum(a) from st interval(2s); + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamTwaFwcFill.sim b/tests/script/tsim/stream/streamTwaFwcFill.sim new file mode 100644 index 00000000000..6a742a31789 --- /dev/null +++ b/tests/script/tsim/stream/streamTwaFwcFill.sim @@ -0,0 +1,278 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create stable st(ts timestamp, a int, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _wstart, twa(a), twa(b), elapsed(ts), now ,timezone(), ta from st partition by tbname,ta interval(2s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now + 3s,1,1,1) (now + 4s,10,1,1) (now + 7s,20,2,2) (now + 8s,30,3,3); +sql insert into t2 values(now + 4s,1,1,1) (now + 5s,10,1,1) (now + 8s,20,2,2) (now + 9s,30,3,3); + + +print sql select * from t1; +sql select * from t1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select * from t2; +sql select * from t2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop0: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 1; +sql select * from streamt where ta == 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 5 then + print ======rows=$rows + goto loop0 +endi + +$loop_count = 0 +loop1: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 2; +sql select * from streamt where ta == 2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 5 then + print ======rows=$rows + goto loop1 +endi + + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create stable st(ts timestamp, a int, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams2 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _wstart, twa(a), twa(b), elapsed(ts), now ,timezone(), ta from st partition by tbname interval(2s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now + 3s,1,1,1) (now + 4s,10,1,1) (now + 7s,20,2,2) (now + 8s,30,3,3); +sql insert into t2 values(now + 4s,1,1,1) (now + 5s,10,1,1) (now + 8s,20,2,2) (now + 9s,30,3,3); + + +print sql select * from t1; +sql select * from t1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select * from t2; +sql select * from t2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop2: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 1; +sql select * from streamt where ta == 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 5 then + print ======rows=$rows + goto loop2 +endi + +$loop_count = 0 +loop3: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 2; +sql select * from streamt where ta == 2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 5 then + print ======rows=$rows + goto loop3 +endi + +print step3 +print =============== create database +sql create database test3 vgroups 1; +sql use test3; + +sql create stable st(ts timestamp, a int, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams3 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _wstart, twa(a), twa(b), elapsed(ts), now ,timezone(), ta from st partition by tbname interval(2s) fill(value,100,200,300); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now + 3s,1,1,1) (now + 4s,10,1,1) (now + 7s,20,2,2) (now + 8s,30,3,3); +sql insert into t2 values(now + 4s,1,1,1) (now + 5s,10,1,1) (now + 8s,20,2,2) (now + 9s,30,3,3); + + +print sql select * from t1; +sql select * from t1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select * from t2; +sql select * from t2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop4: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 1; +sql select * from streamt where ta == 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 5 then + print ======rows=$rows + goto loop4 +endi + +$loop_count = 0 +loop5: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 2; +sql select * from streamt where ta == 2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 5 then + print ======rows=$rows + goto loop5 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamTwaFwcFillPrimaryKey.sim b/tests/script/tsim/stream/streamTwaFwcFillPrimaryKey.sim new file mode 100644 index 00000000000..4282518c9c8 --- /dev/null +++ b/tests/script/tsim/stream/streamTwaFwcFillPrimaryKey.sim @@ -0,0 +1,222 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create stable st(ts timestamp, a int primary key, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _wstart, twa(b), count(*),ta from st partition by tbname, ta interval(2s) fill(prev); + +run tsim/stream/checkTaskStatus.sim + +sql select now; + +sql insert into t1 values(now + 3s,1,1,1) (now + 3s,2,10,10) (now + 3s,3,30,30); +sql insert into t2 values(now + 4s,1,1,1) (now + 4s,2,10,10) (now + 4s,3,30,30); + + +print sql select _wstart, twa(b), count(*),ta from t1 partition by tbname, ta interval(2s); +sql select _wstart, twa(b), count(*),ta from t1 partition by tbname, ta interval(2s); + +$query1_data = $data01 + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select _wstart, twa(b), count(*),ta from t2 partition by tbname, ta interval(2s); +sql select _wstart, twa(b), count(*),ta from t2 partition by tbname, ta interval(2s); + +$query2_data = $data01 + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop0: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 1; +sql select * from streamt where ta == 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 6 then + print ======rows=$rows + goto loop0 +endi + +if $data01 != $query1_data then + print ======data01=$data01 + return -1 +endi + + +$loop_count = 0 +loop1: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 2; +sql select * from streamt where ta == 2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 6 then + print ======rows=$rows + goto loop1 +endi + + +if $data01 != $query2_data then + print ======data01=$data01 + return -1 +endi + + +print step2 +print =============== create database +sql create database test2 vgroups 1; +sql use test2; + +sql create stable st(ts timestamp, a int primary key, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); +sql create stream streams2 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _wstart, twa(b), ta from st partition by tbname, ta interval(2s) fill(NULL); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now + 3s,1,1,1) (now + 3s,2,10,10) (now + 3s,3,30,30); +sql insert into t2 values(now + 4s,1,1,1) (now + 4s,2,10,10) (now + 4s,3,30,30); + + +print sql select _wstart, twa(b), count(*),ta from t1 partition by tbname, ta interval(2s); +sql select _wstart, twa(b), count(*),ta from t1 partition by tbname, ta interval(2s); + +$query1_data = $data01 + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select _wstart, twa(b), count(*),ta from t2 partition by tbname, ta interval(2s); +sql select _wstart, twa(b), count(*),ta from t2 partition by tbname, ta interval(2s); + +$query2_data = $data01 + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop2: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 1; +sql select * from streamt where ta == 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 6 then + print ======rows=$rows + goto loop2 +endi + +if $data01 != $query1_data then + print ======data01=$data01 + return -1 +endi + +$loop_count = 0 +loop3: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 2; +sql select * from streamt where ta == 2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 6 then + print ======rows=$rows + goto loop3 +endi + + +if $data01 != $query2_data then + print ======data01=$data01 + return -1 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamTwaFwcInterval.sim b/tests/script/tsim/stream/streamTwaFwcInterval.sim new file mode 100644 index 00000000000..86406503107 --- /dev/null +++ b/tests/script/tsim/stream/streamTwaFwcInterval.sim @@ -0,0 +1,294 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create stable st(ts timestamp, a int, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _wstart, twa(a), ta from st partition by tbname,ta interval(2s); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now + 3000a,1,1,1) (now + 3100a,5,10,10) (now + 3200a,5,10,10) (now + 5100a,20,1,1) (now + 5200a,30,10,10) (now + 5300a,40,10,10); +sql insert into t2 values(now + 3000a,1,1,1) (now + 3100a,2,10,10) (now + 3200a,30,10,10) (now + 5100a,10,1,1) (now + 5200a,40,10,10) (now + 5300a,7,10,10); + + +print sql select _wstart, twa(a) from t1 interval(2s); +sql select _wstart, twa(a) from t1 interval(2s); + +$query1_data01 = $data01 +$query1_data11 = $data11 + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +print sql select _wstart, twa(a) from t2 interval(2s); +sql select _wstart, twa(a) from t2 interval(2s); + +$query2_data01 = $data01 +$query2_data11 = $data11 + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop0: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 1; +sql select * from streamt where ta == 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop0 +endi + +if $data01 != $query1_data01 then + print ======data01========$data01 + print ======query1_data01=$query1_data01 + return -1 +endi + +if $data11 != $query1_data11 then + print ======data11========$data11 + print ======query1_data11=$query1_data11 + goto loop0 +endi + +$loop_count = 0 +loop1: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 2; +sql select * from streamt where ta == 2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop1 +endi + +if $data01 != $query2_data01 then + print ======data01======$data01 + print ====query2_data01=$query2_data01 + return -1 +endi + +if $data11 != $query2_data11 then + print ======data11======$data11 + print ====query2_data11=$query2_data11 + goto loop1 +endi + + +print step2 +print =============== create database +sql create database test2 vgroups 4; +sql use test2; + +sql create stable st(ts timestamp, a int, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams2 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _wstart, count(*), ta from st partition by tbname,ta interval(2s); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now + 3000a,1,1,1) (now + 3100a,3,10,10) (now + 3200a,5,10,10) (now + 5100a,20,1,1) (now + 5200a,30,10,10) (now + 5300a,40,10,10); +sql insert into t2 values(now + 3000a,1,1,1) (now + 3100a,3,10,10) (now + 3200a,5,10,10) (now + 5100a,10,1,1) (now + 5200a,40,10,10) (now + 5300a,7,10,10); + + +print sql select _wstart, count(*) from t1 interval(2s) order by 1; +sql select _wstart, count(*) from t1 interval(2s) order by 1; + +$query1_data01 = $data01 +$query1_data11 = $data11 + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop2: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 1 order by 1; +sql select * from streamt where ta == 1 order by 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $data01 != $query1_data01 then + print ======data01======$data01 + print ====query1_data01=$query1_data01 + goto loop2 +endi + +if $data11 != $query1_data11 then + print ======data11========$data11 + print ======query1_data11=$query1_data11 + goto loop2 +endi + +sql insert into t1 values(now + 3000a,1,1,1) (now + 3100a,3,10,10) (now + 3200a,5,10,10) (now + 5100a,20,1,1) (now + 5200a,30,10,10) (now + 5300a,40,10,10); +sql insert into t2 values(now + 3000a,1,1,1) (now + 3100a,3,10,10) (now + 3200a,5,10,10) (now + 5100a,10,1,1) (now + 5200a,40,10,10) (now + 5300a,7,10,10); + +print sql select _wstart, count(*) from t1 interval(2s) order by 1; +sql select _wstart, count(*) from t1 interval(2s) order by 1; + +$query1_data21 = $data21 +$query1_data31 = $data31 + + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop3: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 1 order by 1; +sql select * from streamt where ta == 1 order by 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $data21 != $query1_data21 then + print ======data21======$data21 + print ====query1_data21=$query1_data21 + goto loop3 +endi + +if $data31 != $query1_data31 then + print ======data31========$data31 + print ======query1_data31=$query1_data31 + goto loop3 +endi + + +sql insert into t1 values(now + 3000a,1,1,1) (now + 3100a,3,10,10) (now + 3200a,5,10,10) (now + 5100a,20,1,1) (now + 5200a,30,10,10) (now + 5300a,40,10,10); +sql insert into t2 values(now + 3000a,1,1,1) (now + 3100a,3,10,10) (now + 3200a,5,10,10) (now + 5100a,10,1,1) (now + 5200a,40,10,10) (now + 5300a,7,10,10); + +print sql select _wstart, count(*) from t1 interval(2s) order by 1; +sql select _wstart, count(*) from t1 interval(2s) order by 1; + +$query1_data41 = $data41 +$query1_data51 = $data51 + + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +$loop_count = 0 +loop3: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 1 order by 1; +sql select * from streamt where ta == 1 order by 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $data41 != $query1_data41 then + print ======data41======$data41 + print ====query1_data41=$query1_data41 + goto loop3 +endi + +if $data51 != $query1_data51 then + print ======data51========$data51 + print ======query1_data51=$query1_data51 + goto loop3 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamTwaFwcIntervalPrimaryKey.sim b/tests/script/tsim/stream/streamTwaFwcIntervalPrimaryKey.sim new file mode 100644 index 00000000000..b015a5955a0 --- /dev/null +++ b/tests/script/tsim/stream/streamTwaFwcIntervalPrimaryKey.sim @@ -0,0 +1,109 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 1; +sql use test; + +sql create stable st(ts timestamp, a int primary key, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt as select _wstart, count(*), ta from st partition by tbname,ta interval(2s); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now + 3s,1,1,1) (now + 3s,2,10,10) (now + 3s,3,30,30) (now + 11s,1,1,1) (now + 11s,2,10,10); +sql insert into t2 values(now + 4s,1,1,1) (now + 4s,2,10,10) (now + 4s,3,30,30) (now + 12s,1,1,1) (now + 12s,2,10,10); + + +print sql select _wstart, count(*) from st partition by tbname,ta interval(2s); +sql select _wstart, count(*) from st partition by tbname,ta interval(2s); + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +$loop_count = 0 +loop0: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt order by +sql select * from streamt where ta == 1; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop0 +endi + +if $data01 != 3 then + print ======data01=$data01 + return -1 +endi + +if $data11 != 2 then + print ======data11=$data11 + goto loop0 +endi + +$loop_count = 0 +loop1: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select * from streamt where ta == 2; +sql select * from streamt where ta == 2; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop1 +endi + +if $data01 != 3 then + print ======data01=$data01 + return -1 +endi + +if $data11 != 2 then + print ======data11=$data11 + goto loop1 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamTwaInterpFwc.sim b/tests/script/tsim/stream/streamTwaInterpFwc.sim new file mode 100644 index 00000000000..2073378e92a --- /dev/null +++ b/tests/script/tsim/stream/streamTwaInterpFwc.sim @@ -0,0 +1,114 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print step1 +print =============== create database +sql create database test vgroups 4; +sql use test; + +sql create stable st(ts timestamp, a int, b int , c int)tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +sql create stream streams1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt1 as select _wstart, count(a), sum(b), now, timezone(), ta from st partition by tbname,ta interval(2s) fill(value, 100, 200); +sql create stream streams2 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2 as select _wstart, count(a), twa(a), sum(b), now, timezone(), ta from st partition by tbname,ta interval(2s) fill(prev); +sql create stream streams3 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt3 as select _irowts, interp(a), interp(b), interp(c), now, timezone(), ta from st partition by tbname,ta every(2s) fill(value, 100, 200, 300); +sql create stream streams4 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt4 as select _irowts, interp(a), interp(b), interp(c), now, timezone(), ta from st partition by tbname,ta every(2s) fill(prev); +sql create stream streams5 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt5 as select _wstart, count(a), sum(b), now, timezone(), ta from st partition by tbname,ta interval(2s); + +run tsim/stream/checkTaskStatus.sim + +$loop_count = 0 + +_data: + +sql insert into t1 values(now + 3000a,1,1,1) (now + 3100a,5,10,10) (now + 3200a,5,10,10) (now + 5100a,20,1,1) (now + 5200a,30,10,10) (now + 5300a,40,10,10); +sql insert into t2 values(now + 3000a,1,1,1) (now + 3100a,2,10,10) (now + 3200a,30,10,10) (now + 5100a,10,1,1) (now + 5200a,40,10,10) (now + 5300a,7,10,10); + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count < 10 then + goto _data +endi + +print sql select _wstart, count(a), sum(b), now, timezone(), ta from st partition by tbname,ta interval(2s) order by 1, 2; +sql select _wstart, count(a), sum(b), now, timezone(), ta from st partition by tbname,ta interval(2s) order by 1, 2; +$query1_rows = $rows +print ======query1_rows=$query1_rows + +$query1_data01 = $data01 +print ======query1_data01=$query1_data01 + +print select last(*) from (select _wstart, count(a), sum(b), now, timezone(), ta from st partition by tbname,ta interval(2s)) order by 1,2 desc; +sql select _wstart, count(a), sum(b), now, timezone(), ta from st partition by tbname,ta interval(2s) order by 1,2 desc; +print $data00 $data01 $data02 $data03 $data04 + +loop0: + +sleep 2000 + +print sql select * from streamt1 order by 1, 2; +sql select * from streamt1 order by 1, 2; +print ======streamt1=rows=$rows + +if $rows < $query1_rows then + goto loop0 +endi + +if $data01 != $query1_data01 then + print =============data01=$data01 + print ======query1_data01=$query1_data01 + return -1 +endi + +print sql select * from streamt2 order by 1, 2; +sql select * from streamt2 order by 1, 2; +print ======streamt2=rows=$rows + +if $rows < $query1_rows then + goto loop0 +endi + +if $data01 != $query1_data01 then + print =============data01=$data01 + print ======query1_data01=$query1_data01 + return -1 +endi + +print sql select * from streamt3 order by 1, 2; +sql select * from streamt3 order by 1, 2; +print ======streamt3=rows=$rows + +if $rows < $query1_rows then + goto loop0 +endi + +print sql select * from streamt4 order by 1, 2; +sql select * from streamt4 order by 1, 2; +print ======streamt4=rows=$rows + +if $rows < $query1_rows then + goto loop0 +endi + +print sql select * from streamt5 order by 1, 2; +sql select * from streamt5 order by 1, 2; +print ======streamt5=rows=$rows + +if $rows < $query1_rows then + return -1 +endi + +if $data01 != $query1_data01 then + print =============data01=$data01 + print ======query1_data01=$query1_data01 + return -1 +endi + +print end + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/testsuit.sim b/tests/script/tsim/testsuit.sim index c208a07488d..ec52b8c234b 100644 --- a/tests/script/tsim/testsuit.sim +++ b/tests/script/tsim/testsuit.sim @@ -102,6 +102,7 @@ run tsim/stream/triggerInterval0.sim run tsim/stream/triggerSession0.sim run tsim/stream/distributeIntervalRetrive0.sim run tsim/stream/basic0.sim +run tsim/stream/snodeCheck.sim run tsim/stream/session0.sim run tsim/stream/schedSnode.sim run tsim/stream/partitionby.sim @@ -110,6 +111,7 @@ run tsim/stream/distributeInterval0.sim run tsim/stream/distributeSession0.sim run tsim/stream/state0.sim run tsim/stream/basic2.sim +run tsim/stream/concurrentcheckpt.sim run tsim/insert/basic1.sim run tsim/insert/commit-merge0.sim run tsim/insert/basic0.sim diff --git a/tests/script/win-test-file b/tests/script/win-test-file index ff69e919672..ef99442214e 100644 --- a/tests/script/win-test-file +++ b/tests/script/win-test-file @@ -325,6 +325,7 @@ ./test.sh -f tsim/compress/compress.sim ./test.sh -f tsim/compress/compress_col.sim ./test.sh -f tsim/compress/uncompress.sim +./test.sh -f tsim/compress/compressDisable.sim ./test.sh -f tsim/compute/avg.sim ./test.sh -f tsim/compute/block_dist.sim ./test.sh -f tsim/compute/bottom.sim diff --git a/tests/system-test/0-others/compatibility.py b/tests/system-test/0-others/compatibility.py index 9ba3bd0d2ff..7c3eb48fe1d 100644 --- a/tests/system-test/0-others/compatibility.py +++ b/tests/system-test/0-others/compatibility.py @@ -32,7 +32,7 @@ def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300 stt_trigger 1; ;use deldata; + self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 100 stt_trigger 1; ;use deldata; create table deldata.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int); create table deldata.ct1 using deldata.stb1 tags ( 1 ); insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a ); diff --git a/tests/system-test/0-others/compatibility_coverage.py b/tests/system-test/0-others/compatibility_coverage.py index 6eccf78c5a0..bf768927773 100644 --- a/tests/system-test/0-others/compatibility_coverage.py +++ b/tests/system-test/0-others/compatibility_coverage.py @@ -30,7 +30,7 @@ def init(self, conn, logSql, replicaVar=1): self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300 stt_trigger 1; ;use deldata; + self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 100 stt_trigger 1; ;use deldata; create table deldata.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int); create table deldata.ct1 using deldata.stb1 tags ( 1 ); insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a ); diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index 01e416bb26d..aa548d4e598 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -222,7 +222,7 @@ def ins_columns_check(self): tdSql.query("select * from information_schema.ins_columns where db_name ='information_schema'") tdLog.info(len(tdSql.queryResult)) - tdSql.checkEqual(True, len(tdSql.queryResult) in range(280, 281)) + tdSql.checkEqual(True, len(tdSql.queryResult) in range(281, 282)) tdSql.query("select * from information_schema.ins_columns where db_name ='performance_schema'") tdSql.checkEqual(56, len(tdSql.queryResult)) diff --git a/tests/system-test/0-others/sysinfo.py b/tests/system-test/0-others/sysinfo.py index 43a0400f188..35e574739bd 100644 --- a/tests/system-test/0-others/sysinfo.py +++ b/tests/system-test/0-others/sysinfo.py @@ -39,7 +39,7 @@ def check_version(self): taos_list = ['server','client'] for i in taos_list: tdSql.query(f'select {i}_version()') - version_info = str(subprocess.run('cat ../../source/util/src/version.c |grep "char version"', shell=True,capture_output=True).stdout.decode('utf8')).split('"')[1] + version_info = str(subprocess.run('cat ../../source/util/src/version.c |grep "char td_version"', shell=True,capture_output=True).stdout.decode('utf8')).split('"')[1] tdSql.checkData(0,0,version_info) def get_server_status(self): diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py index 88d0d420f73..829a8aec273 100644 --- a/tests/system-test/0-others/udfTest.py +++ b/tests/system-test/0-others/udfTest.py @@ -61,7 +61,7 @@ def prepare_udf_so(self): def prepare_data(self): tdSql.execute("drop database if exists db ") - tdSql.execute("create database if not exists db duration 300") + tdSql.execute("create database if not exists db duration 100") tdSql.execute("use db") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/0-others/udf_cfg1.py b/tests/system-test/0-others/udf_cfg1.py index 913e5fcca14..a92f3bce31f 100644 --- a/tests/system-test/0-others/udf_cfg1.py +++ b/tests/system-test/0-others/udf_cfg1.py @@ -63,7 +63,7 @@ def prepare_udf_so(self): def prepare_data(self): tdSql.execute("drop database if exists db ") - tdSql.execute("create database if not exists db duration 300") + tdSql.execute("create database if not exists db duration 100") tdSql.execute("use db") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/0-others/udf_cfg2.py b/tests/system-test/0-others/udf_cfg2.py index b535b4f626c..89c40309778 100644 --- a/tests/system-test/0-others/udf_cfg2.py +++ b/tests/system-test/0-others/udf_cfg2.py @@ -63,7 +63,7 @@ def prepare_udf_so(self): def prepare_data(self): tdSql.execute("drop database if exists db ") - tdSql.execute("create database if not exists db duration 300") + tdSql.execute("create database if not exists db duration 100") tdSql.execute("use db") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/0-others/udf_cluster.py b/tests/system-test/0-others/udf_cluster.py index 9253be4ea37..c41412c10d1 100644 --- a/tests/system-test/0-others/udf_cluster.py +++ b/tests/system-test/0-others/udf_cluster.py @@ -64,7 +64,7 @@ def prepare_udf_so(self): def prepare_data(self): tdSql.execute("drop database if exists db") - tdSql.execute("create database if not exists db replica 1 duration 300") + tdSql.execute("create database if not exists db replica 1 duration 100") tdSql.execute("use db") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/0-others/udf_create.py b/tests/system-test/0-others/udf_create.py index 6071561035d..9038d99ff95 100644 --- a/tests/system-test/0-others/udf_create.py +++ b/tests/system-test/0-others/udf_create.py @@ -73,7 +73,7 @@ def prepare_udf_so(self): def prepare_data(self): tdSql.execute("drop database if exists db ") - tdSql.execute("create database if not exists db duration 300") + tdSql.execute("create database if not exists db duration 100") tdSql.execute("use db") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/0-others/udf_restart_taosd.py b/tests/system-test/0-others/udf_restart_taosd.py index 61b6a4ea684..c99e864e713 100644 --- a/tests/system-test/0-others/udf_restart_taosd.py +++ b/tests/system-test/0-others/udf_restart_taosd.py @@ -60,7 +60,7 @@ def prepare_udf_so(self): def prepare_data(self): tdSql.execute("drop database if exists db ") - tdSql.execute("create database if not exists db duration 300") + tdSql.execute("create database if not exists db duration 100") tdSql.execute("use db") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/2-query/abs.py b/tests/system-test/2-query/abs.py index 0eabd915350..db841af0398 100644 --- a/tests/system-test/2-query/abs.py +++ b/tests/system-test/2-query/abs.py @@ -127,7 +127,7 @@ def prepare_datas(self, dbname="db"): def prepare_tag_datas(self, dbname="testdb"): # prepare datas tdSql.execute( - f"create database if not exists {dbname} keep 3650 duration 1000 replica {self.replicaVar} ") + f"create database if not exists {dbname} keep 3650 duration 100 replica {self.replicaVar} ") tdSql.execute(" use testdb ") tdSql.execute( f'''create table {dbname}.stb1 diff --git a/tests/system-test/2-query/and_or_for_byte.py b/tests/system-test/2-query/and_or_for_byte.py index 5b2fb519985..ca9c1f2bef8 100644 --- a/tests/system-test/2-query/and_or_for_byte.py +++ b/tests/system-test/2-query/and_or_for_byte.py @@ -128,7 +128,7 @@ def prepare_datas(self, dbname="db"): def prepare_tag_datas(self, dbname="testdb"): # prepare datas tdSql.execute( - f"create database if not exists {dbname} keep 3650 duration 1000") + f"create database if not exists {dbname} keep 3650 duration 100") tdSql.execute(f" use {dbname} ") tdSql.execute( f'''create table {dbname}.stb1 diff --git a/tests/system-test/2-query/countAlwaysReturnValue.py b/tests/system-test/2-query/countAlwaysReturnValue.py index bced89456e6..a6a064ddfd6 100644 --- a/tests/system-test/2-query/countAlwaysReturnValue.py +++ b/tests/system-test/2-query/countAlwaysReturnValue.py @@ -18,7 +18,7 @@ def init(self, conn, logSql, replicaVar=1): def prepare_data(self, dbname="db"): tdSql.execute( - f"create database if not exists {dbname} keep 3650 duration 1000") + f"create database if not exists {dbname} keep 3650 duration 100") tdSql.execute(f"use {dbname} ") tdSql.execute( f"create table {dbname}.tb (ts timestamp, c0 int)" diff --git a/tests/system-test/2-query/db.py b/tests/system-test/2-query/db.py index 1964cea51f5..ee6b517061f 100644 --- a/tests/system-test/2-query/db.py +++ b/tests/system-test/2-query/db.py @@ -57,11 +57,35 @@ def case2(self): tdSql.checkData(0, 2, 0) tdSql.query("show dnode 1 variables like '%debugFlag'") - tdSql.checkRows(24) + tdSql.checkRows(25) tdSql.query("show dnode 1 variables like '____debugFlag'") tdSql.checkRows(2) + tdSql.query("show dnode 1 variables like 's3MigrateEnab%'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 's3MigrateEnabled') + tdSql.checkData(0, 2, 0) + + tdSql.query("show dnode 1 variables like 's3MigrateIntervalSec%'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 's3MigrateIntervalSec') + tdSql.checkData(0, 2, 3600) + + tdSql.query("show dnode 1 variables like 's3PageCacheSize%'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 's3PageCacheSize') + tdSql.checkData(0, 2, 4096) + + tdSql.query("show dnode 1 variables like 's3UploadDelaySec%'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 's3UploadDelaySec') + tdSql.checkData(0, 2, 60) + def threadTest(self, threadID): print(f"Thread {threadID} starting...") tdsqln = tdCom.newTdSql() diff --git a/tests/system-test/2-query/distribute_agg_apercentile.py b/tests/system-test/2-query/distribute_agg_apercentile.py index 23ca0b9fae2..0a2f7ce45ff 100644 --- a/tests/system-test/2-query/distribute_agg_apercentile.py +++ b/tests/system-test/2-query/distribute_agg_apercentile.py @@ -18,7 +18,7 @@ def init(self, conn, logSql, replicaVar=1): def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 s3_keeplocal 3000 vgroups 5") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 100 s3_keeplocal 3000 vgroups 5") tdSql.execute(f" use {dbname} ") tdSql.execute( f'''create table {dbname}.stb1 diff --git a/tests/system-test/2-query/distribute_agg_avg.py b/tests/system-test/2-query/distribute_agg_avg.py index 1cd24103f88..497c3e9facf 100644 --- a/tests/system-test/2-query/distribute_agg_avg.py +++ b/tests/system-test/2-query/distribute_agg_avg.py @@ -35,7 +35,7 @@ def check_avg_functions(self, tbname , col_name): def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 100 vgroups 5") tdSql.execute(f" use {dbname} ") tdSql.execute( f'''create table {dbname}.stb1 diff --git a/tests/system-test/2-query/distribute_agg_count.py b/tests/system-test/2-query/distribute_agg_count.py index 7d131cd77d7..fdcf2704022 100644 --- a/tests/system-test/2-query/distribute_agg_count.py +++ b/tests/system-test/2-query/distribute_agg_count.py @@ -36,7 +36,7 @@ def check_count_functions(self, tbname , col_name): def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 100 vgroups 5") tdSql.execute(f" use {dbname} ") tdSql.execute( f'''create table {dbname}.stb1 diff --git a/tests/system-test/2-query/distribute_agg_max.py b/tests/system-test/2-query/distribute_agg_max.py index fb91216c3ea..53379ecbb38 100644 --- a/tests/system-test/2-query/distribute_agg_max.py +++ b/tests/system-test/2-query/distribute_agg_max.py @@ -38,7 +38,7 @@ def check_max_functions(self, tbname , col_name): def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 100 vgroups 5") tdSql.execute(f" use {dbname} ") tdSql.execute( f'''create table {dbname}.stb1 diff --git a/tests/system-test/2-query/distribute_agg_min.py b/tests/system-test/2-query/distribute_agg_min.py index 26677986401..01bc3da4a0f 100644 --- a/tests/system-test/2-query/distribute_agg_min.py +++ b/tests/system-test/2-query/distribute_agg_min.py @@ -37,7 +37,7 @@ def check_min_functions(self, tbname , col_name): def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 100 vgroups 5") tdSql.execute(f" use {dbname} ") tdSql.execute( f'''create table {dbname}.stb1 diff --git a/tests/system-test/2-query/distribute_agg_spread.py b/tests/system-test/2-query/distribute_agg_spread.py index 0247a91861a..8dc91f712a2 100644 --- a/tests/system-test/2-query/distribute_agg_spread.py +++ b/tests/system-test/2-query/distribute_agg_spread.py @@ -37,7 +37,7 @@ def check_spread_functions(self, tbname , col_name): def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 100 vgroups 5") tdSql.execute(f" use {dbname}") tdSql.execute( f'''create table {dbname}.stb1 diff --git a/tests/system-test/2-query/distribute_agg_stddev.py b/tests/system-test/2-query/distribute_agg_stddev.py index 80bab3082d3..f5383739ff6 100644 --- a/tests/system-test/2-query/distribute_agg_stddev.py +++ b/tests/system-test/2-query/distribute_agg_stddev.py @@ -46,7 +46,7 @@ def check_stddev_functions(self, tbname , col_name): def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 100 vgroups 5") tdSql.execute(f" use {dbname}") tdSql.execute( f'''create table {dbname}.stb1 diff --git a/tests/system-test/2-query/distribute_agg_sum.py b/tests/system-test/2-query/distribute_agg_sum.py index da26fd58f93..fbe0221dd67 100644 --- a/tests/system-test/2-query/distribute_agg_sum.py +++ b/tests/system-test/2-query/distribute_agg_sum.py @@ -35,7 +35,7 @@ def check_sum_functions(self, tbname , col_name): def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 100 vgroups 5") tdSql.execute(f" use {dbname}") tdSql.execute( f'''create table {dbname}.stb1 diff --git a/tests/system-test/2-query/fill_with_group.py b/tests/system-test/2-query/fill_with_group.py index 2139bbbfb35..3847f1dec44 100644 --- a/tests/system-test/2-query/fill_with_group.py +++ b/tests/system-test/2-query/fill_with_group.py @@ -237,11 +237,186 @@ def test_fill_with_order_by2(self): tdSql.checkData(12, 1, None) tdSql.checkData(13, 1, None) + def test_fill_with_complex_expr(self): + sql = "SELECT _wstart, _wstart + 1d, count(*), now, 1+1 FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' INTERVAL(5m) FILL(NULL)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(12) + for i in range(0, 12, 2): + tdSql.checkData(i, 2, 10) + for i in range(1, 12, 2): + tdSql.checkData(i, 2, None) + for i in range(0, 12): + firstCol = tdSql.getData(i, 0) + secondCol = tdSql.getData(i, 1) + tdLog.debug(f"firstCol: {firstCol}, secondCol: {secondCol}, secondCol - firstCol: {secondCol - firstCol}") + if secondCol - firstCol != timedelta(days=1): + tdLog.exit(f"query error: secondCol - firstCol: {secondCol - firstCol}") + nowCol = tdSql.getData(i, 3) + if nowCol is None: + tdLog.exit(f"query error: nowCol: {nowCol}") + constCol = tdSql.getData(i, 4) + if constCol != 2: + tdLog.exit(f"query error: constCol: {constCol}") + + sql = "SELECT _wstart + 1d, count(*), last(ts) + 1a, timediff(_wend, last(ts)) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' INTERVAL(5m) FILL(NULL)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(12) + for i in range(0, 12, 2): + tdSql.checkData(i, 1, 10) + tdSql.checkData(i, 3, 300000) + for i in range(1, 12, 2): + tdSql.checkData(i, 1, None) + tdSql.checkData(i, 2, None) + tdSql.checkData(i, 3, None) + + sql = "SELECT count(*), tbname FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname INTERVAL(5m) FILL(NULL)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(120) + + sql = "SELECT * from (SELECT count(*), timediff(_wend, last(ts)) + t1, tbname FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(NULL) LIMIT 1) order by tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(10) + j = 0 + for i in range(0, 10): + tdSql.checkData(i, 1, 300000 + j) + j = j + 1 + if j == 5: + j = 0 + + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, tbname,t1 FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(NULL) ORDER BY timediff(last(ts), _wstart)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(120) + + sql = "SELECT 1+1, count(*), timediff(_wend, last(ts)) + t1 FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(NULL) HAVING(timediff(last(ts), _wstart)+ t1 >= 1) ORDER BY timediff(last(ts), _wstart)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(48) + + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(NULL) HAVING(timediff(last(ts), _wstart) + t1 >= 1) ORDER BY timediff(last(ts), _wstart), tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(48) + + sql = "SELECT count(*) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(NULL) HAVING(timediff(last(ts), _wstart) >= 0)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(60) + + sql = "SELECT count(*) + 1 FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(NULL) HAVING(count(*) > 1)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(0) + + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(value, 0, 0) HAVING(timediff(last(ts), _wstart) + t1 >= 1) ORDER BY timediff(last(ts), _wstart), tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(48) + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(value, 0, 0) HAVING(count(*) >= 0) ORDER BY timediff(last(ts), _wstart), tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(120) + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname, t1 INTERVAL(5m) FILL(value, 0, 0) HAVING(count(*) > 0) ORDER BY timediff(last(ts), _wstart), tbname" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(60) + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname INTERVAL(5m) FILL(linear) HAVING(count(*) >= 0 and t1 <= 1) ORDER BY timediff(last(ts), _wstart), tbname, t1" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(44) + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname INTERVAL(5m) FILL(prev) HAVING(count(*) >= 0 and t1 > 1) ORDER BY timediff(last(ts), _wstart), tbname, t1" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(72) + + sql = "SELECT 1+1, count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname INTERVAL(5m) FILL(linear) ORDER BY tbname, _wstart;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(120) + for i in range(11, 120, 12): + tdSql.checkData(i, 1, None) + for i in range(0, 120): + tdSql.checkData(i, 0, 2) + + sql = "SELECT count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1, concat(to_char(_wstart, 'HH:MI:SS__'), tbname) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY tbname INTERVAL(5m) FILL(linear) HAVING(count(*) >= 0) ORDER BY tbname;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(110) + for i in range(0, 110, 11): + lastCol = tdSql.getData(i, 3) + tdLog.debug(f"lastCol: {lastCol}") + if lastCol[-1:] != str(i//11): + tdLog.exit(f"query error: lastCol: {lastCol}") + + sql = "SELECT 1+1, count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1,t1 FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY t1 INTERVAL(5m) FILL(linear) ORDER BY t1, _wstart;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(60) + + sql = "SELECT 1+1, count(*), timediff(_wend, last(ts)) + t1, timediff('2018-09-20 01:00:00', _wstart) + t1,t1 FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY t1 INTERVAL(5m) FILL(linear) HAVING(count(*) > 0) ORDER BY t1, _wstart;" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(55) + + sql = "SELECT count(*), timediff(_wend, last(ts)), timediff('2018-09-20 01:00:00', _wstart) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY concat(tbname, 'asd') INTERVAL(5m) having(concat(tbname, 'asd') like '%asd');" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(60) + + sql = "SELECT count(*), timediff(_wend, last(ts)), timediff('2018-09-20 01:00:00', _wstart) FROM meters WHERE ts >= '2018-09-20 00:00:00.000' AND ts < '2018-09-20 01:00:00.000' PARTITION BY concat(tbname, 'asd') INTERVAL(5m) having(concat(tbname, 'asd') like 'asd%');" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(0) + + sql = "SELECT c1 FROM meters PARTITION BY c1 HAVING c1 > 0 slimit 2 limit 10" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(20) + + sql = "SELECT t1 FROM meters PARTITION BY t1 HAVING(t1 = 1)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(20000) + + sql = "SELECT concat(t2, 'asd') FROM meters PARTITION BY t2 HAVING(t2 like '%5')" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(10000) + tdSql.checkData(0, 0, 'tb5asd') + + sql = "SELECT concat(t2, 'asd') FROM meters PARTITION BY concat(t2, 'asd') HAVING(concat(t2, 'asd')like '%5%')" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(10000) + tdSql.checkData(0, 0, 'tb5asd') + + sql = "SELECT avg(c1) FROM meters PARTITION BY tbname, t1 HAVING(t1 = 1)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(2) + + sql = "SELECT count(*) FROM meters PARTITION BY concat(tbname, 'asd') HAVING(concat(tbname, 'asd') like '%asd')" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(10) + + sql = "SELECT count(*), concat(tbname, 'asd') FROM meters PARTITION BY concat(tbname, 'asd') HAVING(concat(tbname, 'asd') like '%asd')" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(10) + + sql = "SELECT count(*) FROM meters PARTITION BY t1 HAVING(t1 < 4) order by t1 +1" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(4) + + sql = "SELECT count(*), t1 + 100 FROM meters PARTITION BY t1 HAVING(t1 < 4) order by t1 +1" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(4) + + sql = "SELECT count(*), t1 + 100 FROM meters PARTITION BY t1 INTERVAL(1d) HAVING(t1 < 4) order by t1 +1 desc" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(280) + + sql = "SELECT count(*), concat(t3, 'asd') FROM meters PARTITION BY concat(t3, 'asd') INTERVAL(1d) HAVING(concat(t3, 'asd') like '%5asd' and count(*) = 118)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(1) + + sql = "SELECT count(*), concat(t3, 'asd') FROM meters PARTITION BY concat(t3, 'asd') INTERVAL(1d) HAVING(concat(t3, 'asd') like '%5asd' and count(*) != 118)" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(69) + + sql = "SELECT count(*), concat(t3, 'asd') FROM meters PARTITION BY concat(t3, 'asd') INTERVAL(1d) HAVING(concat(t3, 'asd') like '%5asd') order by count(*) asc limit 10" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(10) + + sql = "SELECT count(*), concat(t3, 'asd') FROM meters PARTITION BY concat(t3, 'asd') INTERVAL(1d) HAVING(concat(t3, 'asd') like '%5asd' or concat(t3, 'asd') like '%3asd') order by count(*) asc limit 10000" + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(140) + + def run(self): self.prepareTestEnv() self.test_partition_by_with_interval_fill_prev_new_group_fill_error() self.test_fill_with_order_by() self.test_fill_with_order_by2() + self.test_fill_with_complex_expr() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/function_null.py b/tests/system-test/2-query/function_null.py index e5056b7c563..712c98d48bf 100644 --- a/tests/system-test/2-query/function_null.py +++ b/tests/system-test/2-query/function_null.py @@ -23,7 +23,7 @@ def init(self, conn, logSql, replicaVar=1): def prepare_tag_datas(self, dbname="testdb"): # prepare datas tdSql.execute( - f"create database if not exists {dbname} keep 3650 duration 1000") + f"create database if not exists {dbname} keep 3650 duration 100") tdSql.execute(f"use {dbname} ") tdSql.execute( f'''create table {dbname}.stb1 @@ -249,4 +249,4 @@ def stop(self): tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py index 384df02e8d3..7ee528841c5 100644 --- a/tests/system-test/2-query/group_partition.py +++ b/tests/system-test/2-query/group_partition.py @@ -420,7 +420,23 @@ def test_error(self): tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 where t2 = 1") tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 interval(1d)") - + def test_TS5567(self): + tdSql.query(f"select const_col from (select 1 as const_col from {self.dbname}.{self.stable}) t group by const_col") + tdSql.checkRows(50) + tdSql.query(f"select const_col from (select 1 as const_col from {self.dbname}.{self.stable}) t partition by const_col") + tdSql.checkRows(50) + tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) group by const_col") + tdSql.checkRows(10) + tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) partition by const_col") + tdSql.checkRows(10) + tdSql.query(f"select const_col as c_c from (select 1 as const_col from {self.dbname}.{self.stable}) t group by c_c") + tdSql.checkRows(50) + tdSql.query(f"select const_col as c_c from (select 1 as const_col from {self.dbname}.{self.stable}) t partition by c_c") + tdSql.checkRows(50) + tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) group by 1") + tdSql.checkRows(10) + tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) partition by 1") + tdSql.checkRows(10) def run(self): tdSql.prepare() self.prepare_db() @@ -453,6 +469,7 @@ def run(self): self.test_window(nonempty_tb_num) self.test_event_window(nonempty_tb_num) + self.test_TS5567() ## test old version before changed # self.test_groupby('group', 0, 0) diff --git a/tests/system-test/2-query/irate.py b/tests/system-test/2-query/irate.py index 82841541f04..69aa7f19faa 100644 --- a/tests/system-test/2-query/irate.py +++ b/tests/system-test/2-query/irate.py @@ -78,7 +78,7 @@ def insert_datas_and_check_irate(self ,tbnums , rownums , time_step ): def prepare_tag_datas(self, dbname="testdb"): # prepare datas tdSql.execute( - f"create database if not exists {dbname} keep 3650 duration 1000") + f"create database if not exists {dbname} keep 3650 duration 100") tdSql.execute(f"use {dbname} ") tdSql.execute( f'''create table {dbname}.stb1 diff --git a/tests/system-test/2-query/join.py b/tests/system-test/2-query/join.py index c5c8f6c7308..1c303b6d961 100644 --- a/tests/system-test/2-query/join.py +++ b/tests/system-test/2-query/join.py @@ -370,7 +370,7 @@ def run(self): tdLog.printNoPrefix("==========step4:cross db check") dbname1 = "db1" - tdSql.execute(f"create database {dbname1} duration 432000m") + tdSql.execute(f"create database {dbname1} duration 172800m") tdSql.execute(f"use {dbname1}") self.__create_tb(dbname=dbname1) self.__insert_data(dbname=dbname1) diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py index 395c754aa6f..15341830564 100644 --- a/tests/system-test/2-query/last_row.py +++ b/tests/system-test/2-query/last_row.py @@ -61,7 +61,7 @@ def insert_datas_and_check_abs(self, tbnums, rownums, time_step, cache_value, db def prepare_datas(self ,cache_value, dbname="db"): tdSql.execute(f"drop database if exists {dbname} ") - create_db_sql = f"create database if not exists {dbname} keep 3650 duration 1000 cachemodel {cache_value}" + create_db_sql = f"create database if not exists {dbname} keep 3650 duration 100 cachemodel {cache_value}" tdSql.execute(create_db_sql) tdSql.execute(f"use {dbname}") @@ -129,7 +129,7 @@ def prepare_tag_datas(self,cache_value, dbname="testdb"): tdSql.execute(f"drop database if exists {dbname} ") # prepare datas - tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 cachemodel {cache_value}") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 100 cachemodel {cache_value}") tdSql.execute(f"use {dbname} ") @@ -871,7 +871,7 @@ def basic_query(self): def initLastRowDelayTest(self, dbname="db"): tdSql.execute(f"drop database if exists {dbname} ") - create_db_sql = f"create database if not exists {dbname} keep 3650 duration 1000 cachemodel 'NONE' REPLICA 1" + create_db_sql = f"create database if not exists {dbname} keep 3650 duration 100 cachemodel 'NONE' REPLICA 1" tdSql.execute(create_db_sql) time.sleep(3) diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py index ba6ab53fc7c..56490558384 100644 --- a/tests/system-test/2-query/max.py +++ b/tests/system-test/2-query/max.py @@ -117,7 +117,7 @@ def check_max_functions(self, tbname , col_name): def support_distributed_aggregate(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 100 vgroups 5") tdSql.execute(f"use {dbname} ") tdSql.execute( f'''create table {dbname}.stb1 diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py index a43c2e635eb..efead7735b4 100644 --- a/tests/system-test/2-query/sample.py +++ b/tests/system-test/2-query/sample.py @@ -611,7 +611,7 @@ def check_sample(self , sample_query , origin_query ): def basic_sample_query(self, dbname="db"): tdSql.execute(f" drop database if exists {dbname} ") - tdSql.execute(f" create database if not exists {dbname} duration 300d ") + tdSql.execute(f" create database if not exists {dbname} duration 120d ") tdSql.execute( f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) diff --git a/tests/system-test/2-query/tsma.py b/tests/system-test/2-query/tsma.py index f05398600b0..a1638ae4cbc 100644 --- a/tests/system-test/2-query/tsma.py +++ b/tests/system-test/2-query/tsma.py @@ -693,7 +693,7 @@ def init_data(self, db: str = 'test', ctb_num: int = 10, rows_per_ctb: int = 100 "======== prepare test env include database, stable, ctables, and insert data: ") paraDict = {'dbName': db, 'dropFlag': 1, - 'vgroups': 2, + 'vgroups': 4, 'stbName': 'meters', 'colPrefix': 'c', 'tagPrefix': 't', @@ -1273,6 +1273,21 @@ def wait_query(self, sql: str, expected_row_num: int, timeout_in_seconds: float, else: tdLog.debug(f'wait query succeed: {sql} to return {expected_row_num}, got: {tdSql.getRows()}') + def wait_query_err(self, sql: str, timeout_in_seconds: float, err): + timeout = timeout_in_seconds + while timeout > 0: + try: + tdSql.query(sql, queryTimes=1) + time.sleep(1) + timeout = timeout - 1 + except: + tdSql.error(sql, err); + break + if timeout <= 0: + tdLog.exit(f'failed to wait query: {sql} to return error timeout: {timeout_in_seconds}s') + else: + tdLog.debug(f'wait query error succeed: {sql}') + def test_drop_tsma(self): function_name = sys._getframe().f_code.co_name tdLog.debug(f'-----{function_name}------') @@ -1338,15 +1353,15 @@ def test_tb_ddl_with_created_tsma(self): self.create_tsma('tsma1', 'test', 'meters', ['avg(c1)', 'avg(c2)'], '5m') tdSql.execute('alter table test.t0 ttl 2', queryTimes=1) tdSql.execute('flush database test') - self.wait_query('show test.tables like "%t0"', 0, wait_query_seconds) + res_tb = TSMAQCBuilder().md5('1.test.tsma1_t0') + self.wait_query_err(f'desc test.`{res_tb}`', wait_query_seconds, -2147473917) # test drop multi tables tdSql.execute('drop table test.t3, test.t4') - self.wait_query('show test.tables like "%t3"', 0, wait_query_seconds) - self.wait_query('show test.tables like "%t4"', 0, wait_query_seconds) - - tdSql.query('show test.tables like "%tsma%"') - tdSql.checkRows(0) + res_tb = TSMAQCBuilder().md5('1.test.tsma1_t3') + self.wait_query_err(f'desc test.`{res_tb}`', wait_query_seconds, -2147473917) + res_tb = TSMAQCBuilder().md5('1.test.tsma1_t4') + self.wait_query_err(f'desc test.`{res_tb}`', wait_query_seconds, -2147473917) # test drop stream tdSql.error('drop stream tsma1', -2147471088) ## TSMA must be dropped first diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py index 16b9779fa85..ebd439fd099 100644 --- a/tests/system-test/2-query/twa.py +++ b/tests/system-test/2-query/twa.py @@ -22,7 +22,7 @@ def init(self, conn, logSql, replicaVar=1): def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 100 vgroups 5") tdSql.execute( f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp,c11 int UNSIGNED, c12 bigint UNSIGNED, c13 smallint UNSIGNED, c14 tinyint UNSIGNED) diff --git a/tests/system-test/6-cluster/5dnode1mnode.py b/tests/system-test/6-cluster/5dnode1mnode.py index 61451f03b19..ae093ffb908 100644 --- a/tests/system-test/6-cluster/5dnode1mnode.py +++ b/tests/system-test/6-cluster/5dnode1mnode.py @@ -110,7 +110,7 @@ def five_dnode_one_mnode(self): tdSql.error("drop mnode on dnode 1;") tdSql.execute("drop database if exists db") - tdSql.execute("create database if not exists db replica 1 duration 300") + tdSql.execute("create database if not exists db replica 1 duration 100") tdSql.execute("use db") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode2mnode.py b/tests/system-test/6-cluster/5dnode2mnode.py index ca7d6a58d54..aa9c3fc053e 100644 --- a/tests/system-test/6-cluster/5dnode2mnode.py +++ b/tests/system-test/6-cluster/5dnode2mnode.py @@ -84,7 +84,7 @@ def five_dnode_two_mnode(self): # fisrt add data : db\stable\childtable\general table tdSql.execute("drop database if exists db2") - tdSql.execute("create database if not exists db2 replica 1 duration 300") + tdSql.execute("create database if not exists db2 replica 1 duration 100") tdSql.execute("use db2") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py b/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py index f0f9c955663..e2cf0d3dd3e 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py +++ b/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py @@ -73,8 +73,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti, 20) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode3mnodeDrop.py b/tests/system-test/6-cluster/5dnode3mnodeDrop.py index aefa7a09f89..0ac28b2d167 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeDrop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeDrop.py @@ -58,7 +58,7 @@ def insert_data(self,count): # fisrt add data : db\stable\childtable\general table for couti in count: tdSql.execute("drop database if exists db%d" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py b/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py index db183d80c16..26ead3dc2b8 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py +++ b/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py @@ -78,8 +78,8 @@ def createDbTbale(self,dbcountStart,dbcountStop,stbname,chilCount): for couti in range(dbcountStart,dbcountStop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table %s diff --git a/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py b/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py index 7af5982decb..2941a643fd4 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py @@ -72,8 +72,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py index 16916034725..1d2644c65f6 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py @@ -73,8 +73,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode3mnodeRoll.py b/tests/system-test/6-cluster/5dnode3mnodeRoll.py index 11a153c48f3..4816f976c6e 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRoll.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRoll.py @@ -37,7 +37,7 @@ def init(self, conn, logSql, replicaVar=1): tdSql.init(conn.cursor()) self.host = socket.gethostname() self.replicaVar = int(replicaVar) - self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300;use deldata; + self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 100;use deldata; create table deldata.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int); create table deldata.ct1 using deldata.stb1 tags ( 1 ); insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a ); @@ -140,8 +140,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py index fb62110b14c..e89285c3274 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py @@ -71,8 +71,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py index 7eaf756737e..77892a17001 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py @@ -72,8 +72,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeRCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeRCreateDb.py index 27b15d4c990..c7af2d162f3 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeRCreateDb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeRCreateDb.py @@ -71,8 +71,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py index 9395dd2a2b7..3e20721838e 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py @@ -71,8 +71,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py index 2fb196635fb..adc8e8a3136 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py @@ -71,8 +71,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode3mnodeSepVnodeStopDnodeCreateUser.py b/tests/system-test/6-cluster/5dnode3mnodeSepVnodeStopDnodeCreateUser.py index bcc7edf5cbb..04526971d7e 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSepVnodeStopDnodeCreateUser.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSepVnodeStopDnodeCreateUser.py @@ -73,8 +73,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py b/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py index 9d2430506fd..374381dc18a 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py @@ -77,8 +77,8 @@ def insert_data(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - tdLog.debug("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + tdLog.debug("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootAlterRep1-3.py b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootAlterRep1-3.py index 0d3b920bb48..c583149ce6a 100644 --- a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootAlterRep1-3.py +++ b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootAlterRep1-3.py @@ -70,8 +70,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDatarRebootAlterRep1-3.py b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDatarRebootAlterRep1-3.py index 0d3b920bb48..c583149ce6a 100644 --- a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDatarRebootAlterRep1-3.py +++ b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDatarRebootAlterRep1-3.py @@ -70,8 +70,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py index 9ab47764c8f..c817756edc4 100644 --- a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py +++ b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py @@ -70,8 +70,8 @@ def insertData(self,countstart,countstop): for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) - print("create database if not exists db%d replica 1 duration 300" %couti) - tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + print("create database if not exists db%d replica 1 duration 100" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 100" %couti) tdSql.execute("use db%d" %couti) tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py index 52d675208ba..fb00fc08460 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py @@ -83,7 +83,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py index 9cc97543add..51923f56a93 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py @@ -88,7 +88,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py index 4ea00ff2e28..6567b1024cd 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py @@ -89,7 +89,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py index 51da6fc7239..db45582c3b6 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py @@ -88,7 +88,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py index a111e0bab5c..64809a269b1 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py @@ -89,7 +89,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py index 66eca7143da..3d061d4f639 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py @@ -91,7 +91,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py index db9139dca2d..b573d8eafa5 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py @@ -91,7 +91,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") time.sleep(3) tdSql.execute("use test") tdSql.execute( diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py index 4fc4507c3fc..049464b5393 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py @@ -91,7 +91,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") time.sleep(3) tdSql.execute( diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py index eb77c6d0032..b5db868e687 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py @@ -97,7 +97,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py index 9079bedb7cb..31b8fd23261 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py @@ -97,7 +97,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py index 35cbceb2689..d7a161263e3 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py @@ -97,7 +97,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py index bf2ebadd06b..82c9dbf86c5 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py @@ -180,7 +180,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py index 25aba29235b..7f8c75fa036 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py @@ -89,7 +89,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py index 45ceb73059d..4f3b2e2defd 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py @@ -88,7 +88,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py index 3f72f33951b..e136517a4ff 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py @@ -91,7 +91,7 @@ def check_setup_cluster_status(self): def create_db_check_vgroups(self): tdSql.execute("drop database if exists test") - tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("create database if not exists test replica 1 duration 100") tdSql.execute("use test") tdSql.execute( '''create table stb1 diff --git a/tests/system-test/7-tmq/tmq_taosx.py b/tests/system-test/7-tmq/tmq_taosx.py index 4e90aefe7ca..5047ada1d10 100644 --- a/tests/system-test/7-tmq/tmq_taosx.py +++ b/tests/system-test/7-tmq/tmq_taosx.py @@ -131,14 +131,14 @@ def checkData(self): tdSql.checkData(0, 2, 1) tdSql.query("select * from ct3 order by c1 desc") - tdSql.checkRows(2) + tdSql.checkRows(5) tdSql.checkData(0, 1, 51) tdSql.checkData(0, 4, 940) tdSql.checkData(1, 1, 23) tdSql.checkData(1, 4, None) tdSql.query("select * from st1 order by ts") - tdSql.checkRows(8) + tdSql.checkRows(14) tdSql.checkData(0, 1, 1) tdSql.checkData(1, 1, 3) tdSql.checkData(4, 1, 4) @@ -180,7 +180,7 @@ def checkData(self): tdSql.checkData(6, 8, None) tdSql.query("select * from ct1") - tdSql.checkRows(4) + tdSql.checkRows(7) tdSql.query("select * from ct2") tdSql.checkRows(0) diff --git a/tests/system-test/7-tmq/ts-4674.py b/tests/system-test/7-tmq/ts-4674.py index 0b3dc1b077d..79379aaaed7 100644 --- a/tests/system-test/7-tmq/ts-4674.py +++ b/tests/system-test/7-tmq/ts-4674.py @@ -35,10 +35,11 @@ def get_leader(self): def balance_vnode(self): leader_before = self.get_leader() - tdSql.query("balance vgroup leader") + while True: leader_after = -1 tdLog.debug("balancing vgroup leader") + tdSql.execute("balance vgroup leader") while True: tdLog.debug("get new vgroup leader") leader_after = self.get_leader() @@ -51,6 +52,7 @@ def balance_vnode(self): break else : time.sleep(1) + tdLog.debug("leader not changed") def consume_TS_4674_Test(self): diff --git a/tests/system-test/8-stream/force_window_close_interp.py b/tests/system-test/8-stream/force_window_close_interp.py new file mode 100644 index 00000000000..f39ad82ed76 --- /dev/null +++ b/tests/system-test/8-stream/force_window_close_interp.py @@ -0,0 +1,615 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + + +class TDTestCase: + updatecfgDict = {"debugFlag": 135, "asynclog": 0} + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def get_stream_first_ts(self, table_name1, table_name2): + tdSql.query( + f'select * from {table_name1}{table_name2} order by 1 ' + ) + res_ts = tdSql.getData(0, 0) + return res_ts + + def force_window_close( + self, + interval, + partition="tbname", + funciton_name="", + funciton_name_alias="", + delete=False, + fill_value=None, + fill_history_value=None, + case_when=None, + ignore_expired=1, + ignore_update=1, + ): + # partition must be tbname, and not NONE. + tdLog.info( + f"*** testing stream force_window_close+interp+every: every: {interval}, partition: {partition}, fill_history: {fill_history_value}, fill: {fill_value}, delete: {delete}, case_when: {case_when} ***" + ) + self.tdCom.subtable = False + col_value_type = "Incremental" if partition == "c1" else "random" + custom_col_index = 1 if partition == "c1" else None + self.tdCom.custom_col_val = 0 + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data( + interval=interval, + fill_history_value=fill_history_value, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f"{self.stb_name}{self.tdCom.des_table_suffix}" + + self.ctb_stream_des_table = f"{self.ctb_name}{self.tdCom.des_table_suffix}" + self.tb_stream_des_table = f"{self.tb_name}{self.tdCom.des_table_suffix}" + if partition == "tbname": + partition_elm_alias = self.tdCom.partition_tbname_alias + + elif partition == "c1": + partition_elm_alias = self.tdCom.partition_col_alias + elif partition == "abs(c1)": + partition_elm_alias = self.tdCom.partition_expression_alias + elif partition is None: + partition_elm_alias = '"no_partition"' + else: + partition_elm_alias = self.tdCom.partition_tag_alias + + if partition: + partition_elm = f"partition by {partition} {partition_elm_alias}" + else: + partition_elm = "" + if fill_value: + if "value" in fill_value.lower(): + fill_value = "VALUE,1" + + # create error stream + tdLog.info("create error stream") + sleep(10) + tdSql.error( + f"create stream itp_force_error_1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 0 into itp_force_error_1 as select _irowts,tbname,_isfilled,interp(c1,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;" + ) + tdSql.error( + f"create stream itp_force_error_1 trigger force_window_close IGNORE EXPIRED 0 IGNORE UPDATE 1 into itp_force_error_1 as select _irowts,tbname,_isfilled,interp(c1,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;" + ) + tdSql.error( + f"create stream itp_force_error_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 1 into itp_force_error_1 as select _irowts,tbname,_isfilled,interp(c1,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;" + ) + tdSql.error( + f"create stream itp_force_error_1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 0 into itp_force_error_1 as select _irowts,tbname,_isfilled,interp(c11,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;" + ) + + # function name : interp + trigger_mode = "force_window_close" + + # # subtable is true + # create stream add :subtable_value=stb_subtable_value or subtable_value=ctb_subtable_value + + # no subtable + # create stream super table and child table + tdLog.info("create stream super table and child table") + self.tdCom.create_stream( + stream_name=f"{self.stb_name}{self.tdCom.stream_suffix}", + des_table=self.stb_stream_des_table, + source_sql=f'select _irowts as irowts,tbname as table_name, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.stb_name} {partition_elm} every({self.tdCom.dataDict["interval"]}s)', + trigger_mode=trigger_mode, + fill_value=fill_value, + fill_history_value=fill_history_value, + ignore_expired=ignore_expired, + ignore_update=ignore_update, + ) + self.tdCom.create_stream( + stream_name=f"{self.ctb_name}{self.tdCom.stream_suffix}", + des_table=self.ctb_stream_des_table, + source_sql=f'select _irowts as irowts, tbname as table_name, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.ctb_name} {partition_elm} every({self.tdCom.dataDict["interval"]}s)', + trigger_mode=trigger_mode, + fill_value=fill_value, + fill_history_value=fill_history_value, + ignore_expired=ignore_expired, + ignore_update=ignore_update, + ) + + # creat stream set filter of tag and tbname + tdLog.info("create stream with tag and tbname filter") + tag_t1_value = self.tdCom.tag_value_list[0] + where_tag = f"where t1 = {tag_t1_value}" + where_tbname = f'where tbname="{self.ctb_name}"' + # print(f"tag: {tag_t1_value}") + + self.stb_stream_des_where_tag_table = ( + f"{self.stb_name}_where_tag{self.tdCom.des_table_suffix}" + ) + self.tdCom.create_stream( + stream_name=f"{self.stb_name}_where_tag{self.tdCom.stream_suffix}", + des_table=self.stb_stream_des_where_tag_table, + source_sql=f'select _irowts as irowts,tbname as table_name, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.stb_name} {where_tag} {partition_elm} every({self.tdCom.dataDict["interval"]}s)', + trigger_mode=trigger_mode, + fill_value=fill_value, + fill_history_value=fill_history_value, + ignore_expired=ignore_expired, + ignore_update=ignore_update, + ) + self.stb_stream_des_where_tbname_table = ( + f"{self.stb_name}_where_tbname{self.tdCom.des_table_suffix}" + ) + self.tdCom.create_stream( + stream_name=f"{self.stb_name}_where_tbname{self.tdCom.stream_suffix}", + des_table=self.stb_stream_des_where_tbname_table, + source_sql=f'select _irowts as irowts,tbname as table_name, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.stb_name} {where_tbname} {partition_elm} every({self.tdCom.dataDict["interval"]}s)', + trigger_mode=trigger_mode, + fill_value=fill_value, + fill_history_value=fill_history_value, + ignore_expired=ignore_expired, + ignore_update=ignore_update, + ) + + # set partition by tag and column + self.stb_stream_des_partition_tag_table = ( + f"{self.stb_name}_partition_tag{self.tdCom.des_table_suffix}" + ) + self.stb_stream_des_partition_column1_table = ( + f"{self.stb_name}_partition_column1{self.tdCom.des_table_suffix}" + ) + self.stb_stream_des_partition_column2_table = ( + f"{self.stb_name}_partition_column2{self.tdCom.des_table_suffix}" + ) + if partition: + tdLog.info("create stream with partition by tag and tbname ") + partition_elm_new = f"partition by {partition}, t1" + self.tdCom.create_stream( + stream_name=f"{self.stb_name}_partition_tag{self.tdCom.stream_suffix}", + des_table=self.stb_stream_des_partition_tag_table, + source_sql=f'select _irowts as irowts, tbname as table_name, t1 as t_t1, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.stb_name} {partition_elm_new} every({self.tdCom.dataDict["interval"]}s)', + trigger_mode=trigger_mode, + fill_value=fill_value, + fill_history_value=fill_history_value, + ignore_expired=ignore_expired, + ignore_update=ignore_update, + ) + partition_elm_new = f"partition by {partition}, c1" + self.tdCom.create_stream( + stream_name=f"{self.stb_name}_partition_column1{self.tdCom.stream_suffix}", + des_table=f"{self.stb_name}_partition_column1{self.tdCom.des_table_suffix}", + source_sql=f'select _irowts as irowts, tbname as table_name, c1 as c_c1, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.stb_name} {partition_elm_new} every({self.tdCom.dataDict["interval"]}s)', + trigger_mode=trigger_mode, + fill_value=fill_value, + fill_history_value=fill_history_value, + ignore_expired=ignore_expired, + ignore_update=ignore_update, + ) + partition_elm_new = f"partition by {partition}, c2" + self.tdCom.create_stream( + stream_name=f"{self.stb_name}_partition_column2{self.tdCom.stream_suffix}", + des_table=f"{self.stb_name}_partition_column2{self.tdCom.des_table_suffix}", + source_sql=f'select _irowts as irowts, tbname as table_name, c2 as c_c2, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.stb_name} {partition_elm_new} every({self.tdCom.dataDict["interval"]}s)', + trigger_mode=trigger_mode, + fill_value=fill_value, + fill_history_value=fill_history_value, + ignore_expired=ignore_expired, + ignore_update=ignore_update, + ) + + if fill_value: + if "value" in fill_value.lower(): + fill_value = "VALUE,1" + + # create stream general table + tdLog.info("create stream general table") + self.tdCom.create_stream( + stream_name=f"{self.tb_name}{self.tdCom.stream_suffix}", + des_table=self.tb_stream_des_table, + source_sql=f'select _irowts as irowts,tbname as table_name, _isfilled as isfilled, {funciton_name} as {funciton_name_alias} from {self.tb_name} every({self.tdCom.dataDict["interval"]}s)', + trigger_mode=trigger_mode, + fill_value=fill_value, + fill_history_value=fill_history_value, + ignore_expired=ignore_expired, + ignore_update=ignore_update, + ) + + # wait and check stream_task status is ready + time.sleep(self.tdCom.dataDict["interval"]) + tdSql.query("show streams") + tdLog.info(f"tdSql.queryResult:{tdSql.queryResult},tdSql.queryRows:{tdSql.queryRows}") + localQueryResult = tdSql.queryResult + for stream_number in range(tdSql.queryRows): + stream_name = localQueryResult[stream_number][0] + tdCom.check_stream_task_status( + stream_name=stream_name, vgroups=2, stream_timeout=20,check_wal_info=False + ) + time.sleep(self.tdCom.dataDict["interval"]) + time.sleep(30) + + # insert data + self.tdCom.date_time = self.tdCom.genTs(precision=self.tdCom.precision)[0] + start_time = self.tdCom.date_time + time.sleep(1) + tdSql.query("select 1;") + start_force_ts = str(0) + for i in range(self.tdCom.range_count): + cur_time = str(self.tdCom.date_time + self.tdCom.dataDict["interval"]) + ts_value = ( + cur_time + f"+{i*10 + 30}s" + ) + # print(ts_value) + if start_force_ts == "0": + start_force_ts = cur_time + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows( + tbname=self.tdCom.ctb_name, + ts_value=ts_value, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + if i % 2 == 0: + self.tdCom.sinsert_rows( + tbname=self.tdCom.ctb_name, + ts_value=ts_value, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + if self.delete and i % 2 != 0: + self.tdCom.sdelete_rows( + tbname=self.tdCom.ctb_name, start_ts=ts_cast_delete_value + ) + self.tdCom.date_time += 1 + self.tdCom.sinsert_rows( + tbname=self.tdCom.tb_name, + ts_value=ts_value, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + if i % 2 == 0: + self.tdCom.sinsert_rows( + tbname=self.tdCom.tb_name, + ts_value=ts_value, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + if self.delete and i % 2 != 0: + self.tdCom.sdelete_rows( + tbname=self.tdCom.tb_name, start_ts=ts_cast_delete_value + ) + self.tdCom.date_time += 1 + + if self.tdCom.subtable: + for tname in [self.stb_name, self.ctb_name]: + group_id = self.tdCom.get_group_id_from_stb(f"{tname}_output") + tdSql.query(f"select * from {self.ctb_name}") + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tbname = self.tdCom.get_subtable_wait( + f"{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}" + ) + tdSql.query(f"select count(*) from `{tbname}`") + elif partition is None: + tbname = self.tdCom.get_subtable_wait( + f"{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}" + ) + tdSql.query(f"select count(*) from `{tbname}`") + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tbname = self.tdCom.get_subtable_wait( + f"{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}" + ) + tdSql.query(f"select count(*) from `{tbname}`") + elif partition == "tbname" and ptn_counter == 0: + tbname = self.tdCom.get_subtable_wait( + f"{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}_{tname}_output_{group_id}" + ) + tdSql.query(f"select count(*) from `{tbname}`") + ptn_counter += 1 + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + group_id = self.tdCom.get_group_id_from_stb(f"{self.tb_name}_output") + tdSql.query(f"select * from {self.tb_name}") + ptn_counter = 0 + for c1_value in tdSql.queryResult: + if partition == "c1": + tbname = self.tdCom.get_subtable_wait( + f"{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}" + ) + tdSql.query(f"select count(*) from `{tbname}`") + elif partition is None: + tbname = self.tdCom.get_subtable_wait( + f"{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}" + ) + tdSql.query(f"select count(*) from `{tbname}`") + elif partition == "abs(c1)": + abs_c1_value = abs(c1_value[1]) + tbname = self.tdCom.get_subtable_wait( + f"{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}" + ) + tdSql.query(f"select count(*) from `{tbname}`") + elif partition == "tbname" and ptn_counter == 0: + tbname = self.tdCom.get_subtable_wait( + f"{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}_{self.tb_name}_output_{group_id}" + ) + tdSql.query(f"select count(*) from `{tbname}`") + ptn_counter += 1 + + tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) + if fill_value: + end_date_time = self.tdCom.date_time + final_range_count = self.tdCom.range_count + history_ts = ( + str(start_time) + + f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + ) + start_ts = self.tdCom.time_cast(history_ts, "-") + future_ts = ( + str(end_date_time) + + f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + ) + end_ts = self.tdCom.time_cast(future_ts) + tdSql.query("select 2;") + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) + self.tdCom.date_time = start_time + # update + history_ts = ( + str(start_time) + + f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + ) + start_ts = self.tdCom.time_cast(history_ts, "-") + future_ts = ( + str(end_date_time) + + f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s' + ) + end_ts = self.tdCom.time_cast(future_ts) + tdSql.query("select 3;") + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts) + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts) + + # get query time range using interval count windows + tdSql.query( + f'select _wstart, _wend ,last(ts) from {self.stb_name} where ts >= {start_force_ts} and ts <= {end_ts} partition by tbname interval({self.tdCom.dataDict["interval"]}s)fill ({fill_value}) ' + ) + # getData don't support negative index + end_new_ts = tdSql.getData(tdSql.queryRows - 1, 1) + end_last_but_one_ts = tdSql.getData(tdSql.queryRows - 2, 1) + # source data include that fill valuse is null and "_isfilled" column of the stream output is false + tdSql.execute( + f'insert into {self.ctb_name} (ts,c1) values("{end_new_ts}",-102) ' + ) + tdSql.execute( + f'insert into {self.tb_name} (ts,c1) values("{end_new_ts}",-51) ' + ) + tdSql.execute( + f'insert into {self.ctb_name} (ts,c1) values("{end_last_but_one_ts}",NULL) ' + ) + + tdSql.query("select 4;") + for i in range(self.tdCom.range_count): + ts_value = ( + str(self.tdCom.date_time + self.tdCom.dataDict["interval"]) + + f"+{i*10+30}s" + ) + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.tdCom.date_time += 1 + self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + self.tdCom.date_time += 1 + if self.delete: + self.tdCom.sdelete_rows( + tbname=self.ctb_name, + start_ts=self.tdCom.time_cast(start_time), + end_ts=ts_cast_delete_value, + ) + self.tdCom.sdelete_rows( + tbname=self.tb_name, + start_ts=self.tdCom.time_cast(start_time), + end_ts=ts_cast_delete_value, + ) + + # wait for the stream to process the data + # print(self.tdCom.dataDict["interval"]*(final_range_count+2)) + time.sleep(self.tdCom.dataDict["interval"] * (final_range_count + 2)) + + # check the data + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + tdLog.info(f"tbname:{tbname}") + tdSql.query( + f'select _wstart, _wend ,last(ts) from {tbname} where ts >= {start_force_ts} and ts <= {end_ts} partition by tbname interval({self.tdCom.dataDict["interval"]}s)fill ({fill_value}) ' + ) + start_new_ts = tdSql.getData(0, 1) + ragne_start_ts = start_new_ts + if tbname == self.ctb_name: + if partition == "tbname": + # check data for child table + tdLog.info("check data for child table ") + if fill_value != "PREV": + ragne_start_ts = self.get_stream_first_ts(tbname, self.tdCom.des_table_suffix) + self.tdCom.check_query_data( + f'select irowts, table_name, isfilled, {funciton_name_alias} from {tbname}{self.tdCom.des_table_suffix} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by irowts', + f'select _irowts as irowts ,tb1 as table_name, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from ( select *, tbname as tb1 from {tbname} where ts >= {start_force_ts} ) partition by tb1 range("{ragne_start_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by irowts', + fill_value=fill_value, + ) + elif tbname == self.stb_name: + if partition == "tbname": + # check data for super table + tdLog.info("check data for super table") + if fill_value != "PREV": + ragne_start_ts = self.get_stream_first_ts(tbname, self.tdCom.des_table_suffix) + self.tdCom.check_query_data( + f'select irowts, table_name, isfilled, {funciton_name_alias} from {tbname}{self.tdCom.des_table_suffix} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by irowts', + f'select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from {tbname} where ts >= {start_force_ts} partition by {partition} range("{ragne_start_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by irowts', + fill_value=fill_value, + ) + # tag and tbname filter + tdLog.info("check data for tag and tbname filter") + if fill_value != "PREV": + ragne_start_ts = self.get_stream_first_ts(self.stb_stream_des_where_tag_table, '') + self.tdCom.check_query_data( + f'select irowts, table_name, isfilled, {funciton_name_alias} from {self.stb_stream_des_where_tag_table} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by irowts', + f'select _irowts as irowts ,tbname as table_name, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from {tbname} {where_tag} and ts >= {start_force_ts} partition by {partition} range("{ragne_start_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by irowts', + fill_value=fill_value, + ) + if fill_value != "PREV": + ragne_start_ts = self.get_stream_first_ts(self.stb_stream_des_where_tbname_table, '') + self.tdCom.check_query_data( + f'select irowts, table_name, isfilled, {funciton_name_alias} from {self.stb_stream_des_where_tbname_table} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by irowts', + f'select _irowts as irowts ,tb1 as table_name, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from ( select *, tbname as tb1 from {tbname} {where_tbname} and ts >= {start_force_ts} ) partition by tb1 range("{ragne_start_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by irowts', + fill_value=fill_value, + ) + # check partition by tag and column(c1 or c2) + tdLog.info("check data for partition by tag and column") + if fill_value != "PREV": + ragne_start_ts = self.get_stream_first_ts(self.stb_stream_des_partition_tag_table, '') + self.tdCom.check_query_data( + f'select irowts, table_name, t_t1, isfilled, {funciton_name_alias} from {self.stb_stream_des_partition_tag_table} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by t_t1, irowts', + f'select _irowts as irowts ,tb1 as table_name, t1 as t_t1, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from ( select *, tbname as tb1 from {tbname} {where_tbname} and ts >= {start_force_ts} ) partition by tb1,t1 range("{ragne_start_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by t_t1, irowts', + fill_value=fill_value, + ) + if fill_value == "PREV": + self.tdCom.check_query_data( + f'select irowts, c_c1, isfilled, {funciton_name_alias} from {self.stb_stream_des_partition_column1_table} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by c_c1, irowts', + f'select _irowts as irowts , c1 as c_c1, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from {tbname} {where_tbname} and ts >= {start_force_ts} partition by {partition},c1 range("{start_new_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by c_c1, irowts', + fill_value=fill_value, + ) + self.tdCom.check_query_data( + f'select irowts, c_c2, isfilled, {funciton_name_alias} from {self.stb_stream_des_partition_column2_table} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by c_c2, irowts', + f'select _irowts as irowts , c2 as c_c2, _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from {tbname} {where_tbname} and ts >= {start_force_ts} partition by {partition},c2 range("{start_new_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by c_c2, irowts', + fill_value=fill_value, + ) + else: + if partition == "tbname": + # check data for general table + if fill_value != "PREV": + ragne_start_ts = self.get_stream_first_ts(self.stb_stream_des_partition_tag_table, '') + self.tdCom.check_query_data( + f'select irowts, isfilled, {funciton_name_alias} from {tbname}{self.tdCom.des_table_suffix} where irowts >= {start_force_ts} and irowts <= "{end_new_ts}" order by irowts', + f'select _irowts as irowts , _isfilled as isfilled , {funciton_name} as {funciton_name_alias} from ( select *, tbname as tb1 from {tbname} where ts >= {start_force_ts} ) partition by tb1 range("{ragne_start_ts}","{end_new_ts}") every({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by irowts', + fill_value=fill_value, + ) + + # Recreate a sub-table that meets the filtering "where_tag" and check if the streaming results are automatically included within it." + where_tag_ctbname = f"{self.ctb_name}_where_tag" + where_tag_ctbname_other_tag = f"{self.ctb_name}_where_tag_1" + tag_t1_value_other = abs(tag_t1_value)-1 + tdSql.execute( + f"create table {where_tag_ctbname} using {self.stb_name} (t1) tags({tag_t1_value}) " + ) + tdSql.execute( + f"create table {where_tag_ctbname_other_tag} using {self.stb_name} (t1) tags({tag_t1_value_other}) " + ) + where_tag_timestamp = self.tdCom.genTs(precision=self.tdCom.precision)[0] + where_tag_ts_start_value = str(where_tag_timestamp) + "+2s" + tdSql.query("select 5;") + self.tdCom.sinsert_rows( + tbname=where_tag_ctbname, ts_value=where_tag_ts_start_value + ) + self.tdCom.sinsert_rows( + tbname=where_tag_ctbname_other_tag, ts_value=where_tag_ts_start_value + ) + time.sleep(self.tdCom.dataDict["interval"]) + for _ in range(self.tdCom.dataDict["interval"]): + tdSql.query( + f"select distinct(table_name) from {self.stb_stream_des_where_tag_table} where table_name=\"{where_tag_ctbname}\"" + ) + if tdSql.queryRows > 0: + if tdSql.checkDataNotExit(0,0, where_tag_ctbname): + break + else: + time.sleep(1) + + if self.delete: + self.tdCom.sdelete_rows( + tbname=self.ctb_name, start_ts=start_ts, end_ts=ts_cast_delete_value + ) + self.tdCom.sdelete_rows( + tbname=self.tb_name, start_ts=start_ts, end_ts=ts_cast_delete_value + ) + for tbname in [self.stb_name, self.ctb_name, self.tb_name]: + if tbname != self.tb_name: + if "value" in fill_value.lower(): + fill_value = ( + "VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11" + ) + if partition == "tbname": + self.tdCom.check_query_data( + f"select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart", + f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', + fill_value=fill_value, + ) + else: + self.tdCom.check_query_data( + f"select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`", + f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', + fill_value=fill_value, + ) + + else: + if "value" in fill_value.lower(): + fill_value = "VALUE,1,2,3,6,7,8,9,10,11" + if partition == "tbname": + self.tdCom.check_query_data( + f"select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart", + f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', + fill_value=fill_value, + ) + else: + self.tdCom.check_query_data( + f"select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`", + f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', + fill_value=fill_value, + ) + + def run(self): + for fill_value in ["PREV", "VALUE","NULL"]: + self.force_window_close( + interval=10, + partition="tbname", + funciton_name="interp(c1)", + funciton_name_alias="intp_c1", + delete=False, + ignore_update=1, + fill_value=fill_value, + ) + self.force_window_close( + interval=8, + partition="tbname", + funciton_name="interp(c1)", + funciton_name_alias="intp_c1", + delete=False, + ignore_update=1, + fill_value="PREV", + ) + # self.force_window_close(interval=random.randint(10, 15), partition="c1", ignore_update=1) + # self.force_window_close(interval=random.randint(10, 15), partition="abs(c1)", ignore_update=1) + # self.force_window_close(interval=random.randint(10, 15), partition=None, delete=True) + # self.force_window_close(interval=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end') + # self.force_window_close(interval=random.randint(10, 15), partition="tbname", fill_history_value=1, fill_value="NULL") + # for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + # # for fill_value in ["PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: + # self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value) + # self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value, delete=True) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +event = threading.Event() + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/8-stream/force_window_close_interval.py b/tests/system-test/8-stream/force_window_close_interval.py new file mode 100644 index 00000000000..b75579d2201 --- /dev/null +++ b/tests/system-test/8-stream/force_window_close_interval.py @@ -0,0 +1,300 @@ +import sys +import threading +from util.log import * +from util.sql import * +from util.cases import * +from util.common import * + + +class TDTestCase: + updatecfgDict = {"debugFlag": 135, "asynclog": 0} + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tdCom = tdCom + + def get_source_firt_ts(self, table_name1): + tdSql.query( + f'select cast(first(ts) as bigint) from {table_name1} order by 1' + ) + # getData don't support negative index + res_ts = tdSql.getData(0, 0) + return res_ts + + def get_source_last_ts(self, table_name1): + tdSql.query( + f'select cast(last(ts) as bigint) from {table_name1} order by 1' + ) + # getData don't support negative index + res_ts = tdSql.getData(0, 0) + return res_ts + + def get_stream_first_win_ts(self, table_name1): + tdSql.query( + f'select _wstart, count(*) from {table_name1} interval({self.tdCom.dataDict["interval"]}s) order by 1' + ) + res_ts = tdSql.getData(0, 0) + return res_ts + + def insert_data(self, custom_col_index, col_value_type): + self.tdCom.date_time = self.tdCom.genTs(precision=self.tdCom.precision)[0] + time.sleep(1) + min_new_ts = 0 + for i in range(self.tdCom.range_count): + cur_time = str(self.tdCom.date_time + self.tdCom.dataDict["interval"]) + ts_value = ( + cur_time + f"+{i * 5 + 30}s" + ) + if min_new_ts == 0: + min_new_ts = ts_value + + ts_cast_delete_value = self.tdCom.time_cast(ts_value) + self.tdCom.sinsert_rows( + tbname=self.tdCom.ctb_name, + ts_value=ts_value, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + if i % 2 == 0 and min_new_ts != ts_value: + self.tdCom.sinsert_rows( + tbname=self.tdCom.ctb_name, + ts_value=ts_value, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + if self.delete and i % 2 != 0: + self.tdCom.sdelete_rows( + tbname=self.tdCom.ctb_name, start_ts=ts_cast_delete_value + ) + self.tdCom.date_time += 1 + self.tdCom.sinsert_rows( + tbname=self.tdCom.tb_name, + ts_value=ts_value, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + if i % 2 == 0 and min_new_ts != ts_value: + self.tdCom.sinsert_rows( + tbname=self.tdCom.tb_name, + ts_value=ts_value, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + if self.delete and i % 2 != 0: + self.tdCom.sdelete_rows( + tbname=self.tdCom.tb_name, start_ts=ts_cast_delete_value + ) + self.tdCom.date_time += 1 + cur_time = str(self.tdCom.date_time + self.tdCom.dataDict["interval"]) + max_new_ts = (cur_time + f"+{self.tdCom.range_count * 10 + 30}s") + self.tdCom.sinsert_rows( + tbname=self.tdCom.ctb_name, + ts_value=max_new_ts, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + self.tdCom.sinsert_rows( + tbname=self.tdCom.tb_name, + ts_value=max_new_ts, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + return (min_new_ts, max_new_ts) + + def insert_disorder_data(self, custom_col_index, col_value_type): + min_ts = self.get_source_firt_ts(self.tb_name) + max_ts = self.get_source_last_ts(self.tb_name) + min_ts_str = str(min_ts) + f"-10000s" + max_ts_str = str(max_ts) + f"+10000s" + self.tdCom.sinsert_rows( + tbname=self.tdCom.ctb_name, + ts_value=min_ts_str, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + self.tdCom.sinsert_rows( + tbname=self.tdCom.tb_name, + ts_value=min_ts_str, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + self.tdCom.sinsert_rows( + tbname=self.tdCom.ctb_name, + ts_value=max_ts_str, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + self.tdCom.sinsert_rows( + tbname=self.tdCom.tb_name, + ts_value=max_ts_str, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + + def force_window_close( + self, + interval, + partition="tbname", + delete=False, + fill_value=None, + filter=None + ): + # partition must be tbname, and not NONE. + tdLog.info( + f"*** testing stream force_window_close + interval + fill. partition: {partition}, interval: {interval}, fill: {fill_value}, delete: {delete} ***" + ) + fwc_downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "twa(c7)", "count(c8)", "elapsed(ts)", "timediff(1, 0, 1h)", "timezone()","min(t1)", "max(t2)", "sum(t3)", + "twa(t7)", "count(t8)"] + fwc_stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', fwc_downsampling_function_list))) + fwc_tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', fwc_downsampling_function_list[0:7]))) + fwc_stb_source_select_str = ','.join(fwc_downsampling_function_list) + fwc_tb_source_select_str = ','.join(fwc_downsampling_function_list[0:7]) + + fill_history_value = 0 + ignore_expired = 1 + ignore_update = 1 + self.tdCom.subtable = False + col_value_type = "Incremental" if partition == "c1" else "random" + custom_col_index = 1 if partition == "c1" else None + self.tdCom.custom_col_val = 0 + self.delete = delete + self.tdCom.case_name = sys._getframe().f_code.co_name + self.tdCom.prepare_data( + interval=interval, + custom_col_index=custom_col_index, + col_value_type=col_value_type, + ) + self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "") + self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "") + self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "") + self.stb_stream_des_table = f"{self.stb_name}{self.tdCom.des_table_suffix}" + + self.ctb_stream_des_table = f"{self.ctb_name}{self.tdCom.des_table_suffix}" + self.tb_stream_des_table = f"{self.tb_name}{self.tdCom.des_table_suffix}" + + if partition: + partition_elm = f"partition by {partition}" + else: + partition_elm = "" + + query_partition_elm = partition_elm + + if fill_value: + if "value" in fill_value.lower(): + stb_fill_value='VALUE,1,2,3,4,5,6,1,2,3,4,5' + tb_fill_value='VALUE,1,2,3,4,5,6' + else: + stb_fill_value=fill_value + tb_fill_value=fill_value + query_stb_fill_elm = f"fill({stb_fill_value})" + query_tb_fill_elm = f"fill({tb_fill_value})" + else: + query_stb_fill_elm = "" + query_tb_fill_elm = "" + stb_fill_value = None + tb_fill_value=None + + where_elm = "where 1=1" + if filter: + where_elm = f" and {filter}" + + trigger_mode = "force_window_close" + + # no subtable + # create stream super table and child table + tdLog.info("create stream super table and child table") + self.tdCom.create_stream( + stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', + des_table=self.stb_stream_des_table, + source_sql=f'select _wstart AS wstart, {fwc_stb_source_select_str} from {self.stb_name} {where_elm} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', + trigger_mode=trigger_mode, + fill_value=stb_fill_value, + fill_history_value=fill_history_value, + ignore_expired=ignore_expired, + ignore_update=ignore_update, + ) + + self.tdCom.create_stream( + stream_name=f"{self.tb_name}{self.tdCom.stream_suffix}", + des_table=self.tb_stream_des_table, + source_sql=f'select _wstart AS wstart, {fwc_tb_source_select_str} from {self.tb_name} {where_elm} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', + trigger_mode=trigger_mode, + fill_value=tb_fill_value, + fill_history_value=fill_history_value, + ignore_expired=ignore_expired, + ignore_update=ignore_update, + ) + + # wait and check stream_task status is ready + tdSql.query("show streams") + tdLog.info(f"tdSql.queryResult:{tdSql.queryResult},tdSql.queryRows:{tdSql.queryRows}") + localQueryResult = tdSql.queryResult + for stream_number in range(tdSql.queryRows): + stream_name = localQueryResult[stream_number][0] + tdCom.check_stream_task_status( + stream_name=stream_name, vgroups=2, stream_timeout=20,check_wal_info=False + ) + time.sleep(self.tdCom.dataDict["interval"]) + time.sleep(20) + + # insert data + tdLog.info("insert data") + start_new_ts, temp = self.insert_data(custom_col_index, col_value_type) + time.sleep(self.tdCom.dataDict["interval"] * 2) + tdLog.info("insert data") + temp, end_new_ts = self.insert_data(custom_col_index, col_value_type) + + #history and future + self.insert_disorder_data(custom_col_index, col_value_type) + + time.sleep(self.tdCom.dataDict["interval"] * 6 * 2) + + tdLog.info("check data") + # check the data + where_elm = f'{where_elm} and ts >= {start_new_ts} and ts <= {end_new_ts}' + for tbname in [self.stb_name, self.tb_name]: + if fill_value: + query_first_win_ts = self.get_stream_first_win_ts(tbname) + query_where_elm = f'where wstart >= "{query_first_win_ts}"' + stream_where_elm = f'where wstart <= {end_new_ts}' + else: + query_where_elm = "" + stream_where_elm = "" + + # check data + tdLog.info(f"check data for table {tbname}") + if tbname == self.stb_name: + self.tdCom.check_query_data( + f'select wstart, {fwc_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} {stream_where_elm} order by wstart', + f'select * from (select _wstart AS wstart, {fwc_stb_source_select_str} from {tbname} {where_elm} {query_partition_elm} interval({self.tdCom.dataDict["interval"]}s) {query_stb_fill_elm} order by wstart) {query_where_elm}', + sorted=True + ) + else: + self.tdCom.check_query_data( + f'select wstart, {fwc_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} {stream_where_elm} order by wstart', + f'select * from (select _wstart AS wstart, {fwc_tb_source_select_str} from {tbname} {where_elm} {query_partition_elm} interval({self.tdCom.dataDict["interval"]}s) {query_tb_fill_elm} order by wstart) {query_where_elm}', + sorted=True + ) + + def run(self): + for fill_value in ["VALUE", "NULL", "PREV", None]: + self.force_window_close( + interval=5, + partition="tbname", + delete=True, + fill_value=fill_value, + ) + + def stop(self): + tdLog.info("stop========================================") + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +event = threading.Event() + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 9defcd083aa..0d40544be8e 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -24,6 +24,8 @@ import socket import threading import importlib +print(f"Python version: {sys.version}") +print(f"Version info: {sys.version_info}") import toml sys.path.append("../pytest") @@ -689,4 +691,7 @@ def runOnPreviousCluster(host, config, fileName): if asan: # tdDnodes.StopAllSigint() tdLog.info("Address sanitizer mode finished") + else: + tdDnodes.stopAll() + tdLog.info("stop all td process finished") sys.exit(0) diff --git a/tests/taosc_test/CMakeLists.txt b/tests/taosc_test/CMakeLists.txt index 3ea6964462e..c16fe592718 100644 --- a/tests/taosc_test/CMakeLists.txt +++ b/tests/taosc_test/CMakeLists.txt @@ -5,15 +5,15 @@ FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64 /usr/local/taos/driver/) FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64 /usr/local/taos/driver/) -IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) +IF(HEADER_GTEST_INCLUDE_DIR AND(LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) MESSAGE(STATUS "gTest library found, build os test") INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) - ENDIF() aux_source_directory(src OS_SRC) + # taoscTest add_executable(taoscTest "taoscTest.cpp") target_link_libraries(taoscTest taos os gtest_main) @@ -25,4 +25,3 @@ add_test( NAME taoscTest COMMAND taoscTest ) - diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 87630b773b3..5e93be695d7 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -218,3 +218,75 @@ ELSE() ) ENDIF() ENDIF() + +IF(TD_BUILD_KEEPER) + MESSAGE("") + MESSAGE("${Green} build taoskeeper, current platform is ${PLATFORM_ARCH_STR} ${ColourReset}") + + EXECUTE_PROCESS( + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/keeper + COMMAND git rev-parse HEAD + OUTPUT_VARIABLE taoskeeper_commit_sha1 + ) + + IF("${taoskeeper_commit_sha1}" STREQUAL "") + SET(taoskeeper_commit_sha1 "unknown") + ELSE() + STRING(STRIP "${taoskeeper_commit_sha1}" taoskeeper_commit_sha1) + ENDIF() + + SET(taos_version ${TD_VER_NUMBER}) + MESSAGE("${Green} taoskeeper will use ${taos_version} and commit ${taoskeeper_commit_sha1} as version ${ColourReset}") + MESSAGE(" current source dir is ${CMAKE_CURRENT_SOURCE_DIR}") + + IF(TD_WINDOWS) + MESSAGE("Building taoskeeper on Windows") + INCLUDE(ExternalProject) + ExternalProject_Add(taoskeeper + PREFIX "taoskeeper" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/keeper + BUILD_ALWAYS off + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND cmake -E echo "taoskeeper no need cmake to config" + PATCH_COMMAND + COMMAND git clean -f -d + BUILD_COMMAND + COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" + INSTALL_COMMAND + COMMAND cmake -E echo "Comparessing taoskeeper.exe" + COMMAND cmake -E time upx taoskeeper.exe + COMMAND cmake -E echo "Copy taoskeeper.exe" + COMMAND cmake -E copy taoskeeper.exe ${CMAKE_BINARY_DIR}/build/bin/taoskeeper.exe + COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taoskeeper.toml" + COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/ + ) + ELSE() + IF(TD_DARWIN) + MESSAGE("Building taoskeeper on macOS") + ELSE() + MESSAGE("Building taoskeeper on Linux") + ENDIF() + + INCLUDE(ExternalProject) + ExternalProject_Add(taoskeeper + PREFIX "taoskeeper" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/keeper + BUILD_ALWAYS off + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND cmake -E echo "taoskeeper no need cmake to config" + PATCH_COMMAND + COMMAND git clean -f -d + BUILD_COMMAND + COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" + INSTALL_COMMAND + COMMAND cmake -E echo "Copy taoskeeper" + COMMAND cmake -E copy taoskeeper ${CMAKE_BINARY_DIR}/build/bin + COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taoskeeper.toml" + COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taoskeeper.service" + COMMAND cmake -E copy ./taoskeeper.service ${CMAKE_BINARY_DIR}/test/cfg/ + ) + ENDIF() +ENDIF() diff --git a/tools/keeper/.dockerignore b/tools/keeper/.dockerignore new file mode 100644 index 00000000000..cff5a58d80a --- /dev/null +++ b/tools/keeper/.dockerignore @@ -0,0 +1 @@ +!taoskeeper diff --git a/tools/keeper/.gitignore b/tools/keeper/.gitignore new file mode 100644 index 00000000000..2cba3f06c8b --- /dev/null +++ b/tools/keeper/.gitignore @@ -0,0 +1,22 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +*.html +*.data + +# Dependency directories (remove the comment below to include it) +vendor +/debug/ +/.idea/ +/taoskeeper +/test_data +/.vscode diff --git a/tools/keeper/CHANGELOG.md b/tools/keeper/CHANGELOG.md new file mode 100644 index 00000000000..6775343b0f5 --- /dev/null +++ b/tools/keeper/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Conventional Changelog](https://www.conventionalcommits.org/en/v1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Footnote + +This changelog is automatically generated. diff --git a/tools/keeper/Dockerfile b/tools/keeper/Dockerfile new file mode 100644 index 00000000000..c38bdc1acbd --- /dev/null +++ b/tools/keeper/Dockerfile @@ -0,0 +1,16 @@ +FROM golang:1.18.6-alpine as builder +LABEL maintainer = "Linhe Huo " + +WORKDIR /usr/src/taoskeeper +COPY ./ /usr/src/taoskeeper/ +ENV GO111MODULE=on \ + GOPROXY=https://goproxy.cn,direct +RUN go mod tidy && go build + +FROM alpine:3 +RUN mkdir -p /etc/taos +COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/ +COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml +RUN chmod u+rw /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] diff --git a/tools/keeper/DockerfileCloud b/tools/keeper/DockerfileCloud new file mode 100644 index 00000000000..11137f61c23 --- /dev/null +++ b/tools/keeper/DockerfileCloud @@ -0,0 +1,24 @@ +FROM golang:1.18.6-alpine as builder +LABEL maintainer = "TDengine" + +ARG latestv +ARG gitinfo +ARG buildinfo + +RUN apk --no-cache add upx && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /usr/src/taoskeeper +COPY ./ /usr/src/taoskeeper/ +ENV GO111MODULE=on \ + GOPROXY=https://goproxy.cn,direct + +RUN echo "$latestv $gitinfo $buildinfo" +RUN go mod tidy && go build -ldflags="-s -w -X 'github.com/taosdata/taoskeeper/version.Version=${latestv}' -X 'github.com/taosdata/taoskeeper/version.Gitinfo=${gitinfo}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${buildinfo}'" -o taoskeeper . && upx -9 taoskeeper +FROM alpine:3 +RUN mkdir -p /etc/taos +COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/ +COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml +RUN chmod u+rw /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] diff --git a/tools/keeper/README-CN.md b/tools/keeper/README-CN.md new file mode 100644 index 00000000000..770e9513c1a --- /dev/null +++ b/tools/keeper/README-CN.md @@ -0,0 +1,267 @@ +# TaosKeeper + +taosKeeper 是 TDengine 各项监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。并且 taosKeeper 企业版支持多种收集器,可以方便进行监控数据的展示。 + +taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。 + +## 构建 + +### 获取源码 + +从 GitHub 克隆源码: + +```sh +git clone https://github.com/taosdata/TDengine +cd TDengine/tools/keeper +``` + +### 编译 + +taosKeeper 使用 `GO` 语言编写,在构建前需要配置好 `GO` 语言开发环境。 + +```sh +go mod tidy +go build +``` + +## 安装 + +如果是自行构建的项目,仅需要拷贝 `taoskeeper` 文件到你的 `PATH` 中。 + +```sh +sudo install taoskeeper /usr/bin/ +``` + +## 启动 + +在启动前,应该做好如下配置: +在 `/etc/taos/taoskeeper.toml` 配置 TDengine 连接参数以及监控指标前缀等其他信息。 + +```toml +# gin 框架是否启用 debug +debug = false + +# 服务监听端口, 默认为 6043 +port = 6043 + +# 日志级别,包含 panic、error、info、debug、trace等 +loglevel = "info" + +# 程序中使用协程池的大小 +gopoolsize = 50000 + +# 查询 TDengine 监控数据轮询间隔 +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" + +# 需要被监控的 taosAdapter +[taosAdapter] +address = ["127.0.0.1:6041"] + +[metrics] +# 监控指标前缀 +prefix = "taos" + +# 存放监控数据的数据库 +database = "log" + +# 指定需要监控的普通表 +tables = [] + +[environment] +# 是否在容器中运行,影响 taosKeeper 自身的监控数据 +incgroup = false +``` + +现在可以启动服务,输入: + +```sh +taoskeeper +``` + +如果你使用 `systemd`,复制 `taoskeeper.service` 到 `/lib/systemd/system/`,并启动服务。 + +```sh +sudo cp taoskeeper.service /lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start taoskeeper +``` + +让 taosKeeper 随系统开机自启动。 + +```sh +sudo systemctl enable taoskeeper +``` + +如果使用 `systemd`,你可以使用如下命令完成安装。 + +```sh +go mod tidy +go build +sudo install taoskeeper /usr/bin/ +sudo cp taoskeeper.service /lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start taoskeeper +sudo systemctl enable taoskeeper +``` + +## Docker + +如下介绍了如何在 docker 中构建 taosKeeper: + +在构建前请配置好 `./config/taoskeeper.toml` 中合适的参数,并编辑 Dockerfile ,示例如下。 + +```dockerfile +FROM golang:1.18.6-alpine as builder + +WORKDIR /usr/src/taoskeeper +COPY ./ /usr/src/taoskeeper/ +ENV GO111MODULE=on \ + GOPROXY=https://goproxy.cn,direct +RUN go mod tidy && go build + +FROM alpine:3 +RUN mkdir -p /etc/taos +COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/ +COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] +``` + +如果已经有 taosKeeper 可执行文件,在配置好 `taoskeeper.toml` 后你可以使用如下方式构建: + +```dockerfile +FROM ubuntu:18.04 +RUN mkdir -p /etc/taos +COPY ./taoskeeper /usr/bin/ +COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] +``` + +## 使用(**企业版**) + +### Prometheus (by scrape) + +taosKeeper 可以像 `node-exporter` 一样向 Prometheus 提供监控指标。\ +在 `/etc/prometheus/prometheus.yml` 添加配置: + +```yml +global: + scrape_interval: 5s + +scrape_configs: + - job_name: "taoskeeper" + static_configs: + - targets: ["taoskeeper:6043"] +``` + +现在使用 PromQL 查询即可以显示结果,比如要查看指定主机(通过 FQDN 正则匹配表达式筛选)硬盘使用百分比: + +```promql +taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"} +``` + +你可以使用 `docker-compose` 测试完整的链路。 +`docker-compose.yml`示例: + +```yml +version: "3.7" + +services: + tdengine: + image: tdengine/tdengine + environment: + TAOS_FQDN: tdengine + volumes: + - taosdata:/var/lib/taos + taoskeeper: + build: ./ + depends_on: + - tdengine + environment: + TDENGINE_HOST: tdengine + TDENGINE_PORT: 6041 + volumes: + - ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml + ports: + - 6043:6043 + prometheus: + image: prom/prometheus + volumes: + - ./prometheus/:/etc/prometheus/ + ports: + - 9090:9090 +volumes: + taosdata: +``` + +启动: + +```sh +docker-compose up -d +``` + +现在通过访问 来查询结果。访问[simple dashboard](https://grafana.com/grafana/dashboards/15164) 来查看TaosKeeper + Prometheus + Grafana 监控 TDengine 的快速启动实例。 + +### Telegraf + +如果使用 telegraf 来收集各个指标,仅需要在配置中增加: + +```toml +[[inputs.prometheus]] +## An array of urls to scrape metrics from. +urls = ["http://taoskeeper:6043/metrics"] +``` + +可以通过 `docker-compose` 来测试 + +```sh +docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper +``` + +由于可以在 `telegraf.conf` 设置日志为标准输出: + +```toml +[[outputs.file]] +files = ["stdout"] +``` + +所以你可以通过 `docker-compose logs` 在标准输出中追踪 TDengine 各项指标。 + +```sh +docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf +``` + +### Zabbix + +1. 导入 zabbix 临时文件 `zbx_taos_keeper_templates.xml`。 +2. 使用 `TDengine` 模板来创建主机,修改宏 `{$TAOSKEEPER_HOST}` 和 `{$COLLECTION_INTERVAL}`。 +3. 等待并查看到自动创建的条目。 + +### 常见问题 + +* 启动报错,显示connection refused + + **解析**:taosKeeper 依赖 restful 接口查询数据,请检查 taosAdapter 是否正常运行或 taoskeeper.toml 中 taosAdapter 地址是否正确。 + +* taosKeeper 监控不同 TDengine 显示的检测指标数目不一致? + + **解析**:如果 TDengine 中未创建某项指标,taoskeeper 不能获取对应的检测结果。 + +* 不能接收到 TDengine 的监控日志。 + + **解析**: 修改 `/etc/taos/taos.cfg` 文件并增加如下参数: + + ```cfg + monitor 1 // 启用monitor + monitorInterval 30 // 发送间隔 (s) + monitorFqdn localhost // 接收消息的FQDN,默认为空 + monitorPort 6043 // 接收消息的端口号 + monitorMaxLogs 100 // 每个监控间隔缓存的最大日志数量 + ``` diff --git a/tools/keeper/README.md b/tools/keeper/README.md new file mode 100644 index 00000000000..18e351f1603 --- /dev/null +++ b/tools/keeper/README.md @@ -0,0 +1,273 @@ +# TaosKeeper + +TDengine Metrics Exporter for Kinds of Collectors, you can obtain the running status of TDengine by performing several simple configurations. + +This tool uses TDengine RESTful API, so you could just build it without TDengine client. + +## Build + +### Get the source codes + +```sh +git clone https://github.com/taosdata/TDengine +cd TDengine/tools/keeper +``` + +### compile + +```sh +go mod tidy +go build +``` + +## Install + +If you build the tool by your self, just copy the `taoskeeper` binary to your `PATH`. + +```sh +sudo install taoskeeper /usr/bin/ +``` + +## Start + +Before start, you should configure some options like database ip, port or the prefix and others for exported metrics. + +in `/etc/taos/taoskeeper.toml`. + +```toml +# Start with debug middleware for gin +debug = false + +# Listen port, default is 6043 +port = 6043 + +# log level +loglevel = "info" + +# go pool size +gopoolsize = 50000 + +# interval for TDengine metrics +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" + +# list of taosAdapter that need to be monitored +[taosAdapter] +address = ["127.0.0.1:6041"] + +[metrics] +# metrics prefix in metrics names. +prefix = "taos" + +# database for storing metrics data +database = "log" + +# export some tables that are not super table +tables = [] + +[environment] +# Whether running in cgroup. +incgroup = false +``` + +Now you could run the tool: + +```sh +taoskeeper +``` + +If you use `systemd`, copy the `taoskeeper.service` to `/lib/systemd/system/` and start the service. + +```sh +sudo cp taoskeeper.service /lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start taoskeeper +``` + +To start taoskeeper whenever os rebooted, you should enable the systemd service: + +```sh +sudo systemctl enable taoskeeper +``` + +So if use `systemd`, you'd better install it with these lines all-in-one: + +```sh +go mod tidy +go build +sudo install taoskeeper /usr/bin/ +sudo cp taoskeeper.service /lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start taoskeeper +sudo systemctl enable taoskeeper +``` + +## Docker + +Here is an example to show how to build this tool in docker: + +Before building, you should configure `./config/taoskeeper.toml` with proper parameters and edit Dockerfile. Take following as example. + +```dockerfile +FROM golang:1.18.2 as builder + +WORKDIR /usr/src/taoskeeper +COPY ./ /usr/src/taoskeeper/ +ENV GO111MODULE=on \ + GOPROXY=https://goproxy.cn,direct +RUN go mod tidy && go build + +FROM alpine:3 +RUN mkdir -p /etc/taos +COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/ +COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] +``` + +If you already have taosKeeper binary file, you can build this tool like: + +```dockerfile +FROM ubuntu:18.04 +RUN mkdir -p /etc/taos +COPY ./taoskeeper /usr/bin/ +COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] +``` + +## Usage (**Enterprise Edition**) + +### Prometheus (by scrape) + +It's now act as a prometheus exporter like `node-exporter`. + +Here's how to add this in scrape configs of `/etc/prometheus/prometheus.yml`: + +```yml +global: + scrape_interval: 5s + +scrape_configs: + - job_name: "taoskeeper" + static_configs: + - targets: [ "taoskeeper:6043" ] +``` + +Now PromQL query will show the right result, for example, to show disk used percent in an specific host with FQDN regex +match expression: + +```promql +taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"} +``` + +You can use `docker-compose` with the current `docker-compose.yml` to test the whole stack. + +Here is the `docker-compose.yml`: + +```yml +version: "3.7" + +services: + tdengine: + image: tdengine/tdengine + environment: + TAOS_FQDN: tdengine + volumes: + - taosdata:/var/lib/taos + taoskeeper: + build: ./ + depends_on: + - tdengine + environment: + TDENGINE_HOST: tdengine + TDENGINE_PORT: 6041 + volumes: + - ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml + ports: + - 6043:6043 + prometheus: + image: prom/prometheus + volumes: + - ./prometheus/:/etc/prometheus/ + ports: + - 9090:9090 +volumes: + taosdata: + +``` + +Start the stack: + +```sh +docker-compose up -d +``` + +Now you point to (if you have not started a prometheus server by yourself) and query. + +For a quick demo with TaosKeeper + Prometheus + Grafana, we provide +a [simple dashboard](https://grafana.com/grafana/dashboards/15164) to monitor TDengine. + +### Telegraf + +If you are using telegraf to collect metrics, just add inputs like this: + +```toml +[[inputs.prometheus]] + ## An array of urls to scrape metrics from. + urls = ["http://taoskeeper:6043/metrics"] +``` + +You can test it with `docker-compose`: + +```sh +docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper +``` + +Since we have set an stdout file output in `telegraf.conf`: + +```toml +[[outputs.file]] + files = ["stdout"] +``` + +So you can track with TDengine metrics in standard output with `docker-compose logs`: + +```sh +docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf +``` + +### Zabbix + +1. Import the zabbix template file `zbx_taos_keeper_templates.xml`. +2. Use the template `TDengine` to create the host and modify the macros `{$TAOSKEEPER_HOST}` + and `{$COLLECTION_INTERVAL}`. +3. Waiting for monitoring items to be created automatically. + +### FAQ + +* Error occurred: Connection refused, while taosKeeper was starting + + **Answer**: taoskeeper relies on restful interfaces to query data. Check whether the taosAdapter is running or whether + the taosAdapter address in taoskeeper.toml is correct. + +* Why detection metrics displayed by different TDengine's inconsistent with taoskeeper monitoring? + + **Answer**: If a metric is not created in TDengine, taoskeeper cannot get the corresponding test results. + +* Cannot receive log from TDengine server. + + **Answer**: Modify `/etc/taos/taos.cfg` file and add parameters like: + + ```cfg + monitor 1 // start monitor + monitorInterval 30 // send log interval (s) + monitorFqdn localhost + monitorPort 6043 // taosKeeper port + monitorMaxLogs 100 + ``` diff --git a/tools/keeper/api/adapter2.go b/tools/keeper/api/adapter2.go new file mode 100644 index 00000000000..645b9a176b5 --- /dev/null +++ b/tools/keeper/api/adapter2.go @@ -0,0 +1,260 @@ +package api + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var adapterLog = log.GetLogger("ADP") + +type adapterReqType int + +const ( + rest adapterReqType = iota // 0 - rest + ws // 1 - ws +) + +type Adapter struct { + username string + password string + host string + port int + usessl bool + conn *db.Connector + db string + dbOptions map[string]interface{} +} + +func NewAdapter(c *config.Config) *Adapter { + return &Adapter{ + username: c.TDengine.Username, + password: c.TDengine.Password, + host: c.TDengine.Host, + port: c.TDengine.Port, + usessl: c.TDengine.Usessl, + db: c.Metrics.Database.Name, + dbOptions: c.Metrics.Database.Options, + } +} + +func (a *Adapter) Init(c gin.IRouter) error { + if err := a.createDatabase(); err != nil { + return fmt.Errorf("create database error:%s", err) + } + if err := a.initConnect(); err != nil { + return fmt.Errorf("init db connect error:%s", err) + } + if err := a.createTable(); err != nil { + return fmt.Errorf("create table error:%s", err) + } + c.POST("/adapter_report", a.handleFunc()) + return nil +} + +func (a *Adapter) handleFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + adapterLog := adapterLog.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if a.conn == nil { + adapterLog.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + adapterLog.Errorf("get adapter report data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get adapter report data error. %s", err)}) + return + } + if adapterLog.Logger.IsLevelEnabled(logrus.TraceLevel) { + adapterLog.Tracef("received adapter report data:%s", string(data)) + } + + var report AdapterReport + if err = json.Unmarshal(data, &report); err != nil { + adapterLog.Errorf("parse adapter report data error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse adapter report data error: %s", err)}) + return + } + sql := a.parseSql(report) + adapterLog.Debugf("adapter report sql:%s", sql) + + if _, err = a.conn.Exec(context.Background(), sql, qid); err != nil { + adapterLog.Errorf("adapter report error, msg:%s", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + c.JSON(http.StatusOK, gin.H{}) + } +} + +func (a *Adapter) initConnect() error { + conn, err := db.NewConnectorWithDb(a.username, a.password, a.host, a.port, a.db, a.usessl) + if err != nil { + adapterLog.Dup().Errorf("init db connect error, msg:%s", err) + return err + } + a.conn = conn + return nil +} + +func (a *Adapter) parseSql(report AdapterReport) string { + // reqType: 0: rest, 1: websocket + restTbName := a.tableName(report.Endpoint, rest) + wsTbName := a.tableName(report.Endpoint, ws) + ts := time.Unix(report.Timestamp, 0).Format(time.RFC3339) + metric := report.Metric + return fmt.Sprintf("insert into %s using adapter_requests tags ('%s', %d) "+ + "values('%s', %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d) "+ + "%s using adapter_requests tags ('%s', %d) "+ + "values('%s', %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)", + restTbName, report.Endpoint, rest, ts, metric.RestTotal, metric.RestQuery, metric.RestWrite, metric.RestOther, + metric.RestInProcess, metric.RestSuccess, metric.RestFail, metric.RestQuerySuccess, metric.RestQueryFail, + metric.RestWriteSuccess, metric.RestWriteFail, metric.RestOtherSuccess, metric.RestOtherFail, + metric.RestQueryInProcess, metric.RestWriteInProcess, + wsTbName, report.Endpoint, ws, ts, metric.WSTotal, + metric.WSQuery, metric.WSWrite, metric.WSOther, metric.WSInProcess, metric.WSSuccess, metric.WSFail, + metric.WSQuerySuccess, metric.WSQueryFail, metric.WSWriteSuccess, metric.WSWriteFail, metric.WSOtherSuccess, + metric.WSOtherFail, metric.WSQueryInProcess, metric.WSWriteInProcess) +} + +func (a *Adapter) tableName(endpoint string, reqType adapterReqType) string { + var tbname string + if reqType == rest { + tbname = fmt.Sprintf("adapter_req_%s_%s", endpoint, "rest") + } else { + tbname = fmt.Sprintf("adapter_req_%s_%s", endpoint, "ws") + } + + if len(tbname) <= util.MAX_TABLE_NAME_LEN { + return util.ToValidTableName(tbname) + } else { + sum := md5.Sum([]byte(fmt.Sprintf("%s%d", endpoint, reqType))) + return fmt.Sprintf("adapter_req_%s", hex.EncodeToString(sum[:])) + } +} + +func (a *Adapter) createDatabase() error { + qid := util.GetQidOwn() + + adapterLog := adapterLog.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + conn, err := db.NewConnector(a.username, a.password, a.host, a.port, a.usessl) + if err != nil { + return fmt.Errorf("connect to database error, msg:%s", err) + } + defer func() { _ = conn.Close() }() + sql := a.createDBSql() + adapterLog.Infof("create database, sql:%s", sql) + _, err = conn.Exec(context.Background(), sql, util.GetQidOwn()) + if err != nil { + adapterLog.Errorf("create database error, msg:%s", err) + return err + } + + return err +} + +func (a *Adapter) createDBSql() string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("create database if not exists %s ", a.db)) + + for k, v := range a.dbOptions { + buf.WriteString(k) + switch v := v.(type) { + case string: + buf.WriteString(fmt.Sprintf(" '%s'", v)) + default: + buf.WriteString(fmt.Sprintf(" %v", v)) + } + buf.WriteString(" ") + } + + return buf.String() +} + +var adapterTableSql = "create stable if not exists `adapter_requests` (" + + "`ts` timestamp, " + + "`total` int unsigned, " + + "`query` int unsigned, " + + "`write` int unsigned, " + + "`other` int unsigned, " + + "`in_process` int unsigned, " + + "`success` int unsigned, " + + "`fail` int unsigned, " + + "`query_success` int unsigned, " + + "`query_fail` int unsigned, " + + "`write_success` int unsigned, " + + "`write_fail` int unsigned, " + + "`other_success` int unsigned, " + + "`other_fail` int unsigned, " + + "`query_in_process` int unsigned, " + + "`write_in_process` int unsigned ) " + + "tags (`endpoint` varchar(32), `req_type` tinyint unsigned )" + +func (a *Adapter) createTable() error { + if a.conn == nil { + return errNoConnection + } + _, err := a.conn.Exec(context.Background(), adapterTableSql, util.GetQidOwn()) + return err +} + +type AdapterReport struct { + Timestamp int64 `json:"ts"` + Metric AdapterMetrics `json:"metrics"` + Endpoint string `json:"endpoint"` +} + +type AdapterMetrics struct { + RestTotal int `json:"rest_total"` + RestQuery int `json:"rest_query"` + RestWrite int `json:"rest_write"` + RestOther int `json:"rest_other"` + RestInProcess int `json:"rest_in_process"` + RestSuccess int `json:"rest_success"` + RestFail int `json:"rest_fail"` + RestQuerySuccess int `json:"rest_query_success"` + RestQueryFail int `json:"rest_query_fail"` + RestWriteSuccess int `json:"rest_write_success"` + RestWriteFail int `json:"rest_write_fail"` + RestOtherSuccess int `json:"rest_other_success"` + RestOtherFail int `json:"rest_other_fail"` + RestQueryInProcess int `json:"rest_query_in_process"` + RestWriteInProcess int `json:"rest_write_in_process"` + WSTotal int `json:"ws_total"` + WSQuery int `json:"ws_query"` + WSWrite int `json:"ws_write"` + WSOther int `json:"ws_other"` + WSInProcess int `json:"ws_in_process"` + WSSuccess int `json:"ws_success"` + WSFail int `json:"ws_fail"` + WSQuerySuccess int `json:"ws_query_success"` + WSQueryFail int `json:"ws_query_fail"` + WSWriteSuccess int `json:"ws_write_success"` + WSWriteFail int `json:"ws_write_fail"` + WSOtherSuccess int `json:"ws_other_success"` + WSOtherFail int `json:"ws_other_fail"` + WSQueryInProcess int `json:"ws_query_in_process"` + WSWriteInProcess int `json:"ws_write_in_process"` +} diff --git a/tools/keeper/api/adapter2_test.go b/tools/keeper/api/adapter2_test.go new file mode 100644 index 00000000000..e6fd263c43f --- /dev/null +++ b/tools/keeper/api/adapter2_test.go @@ -0,0 +1,98 @@ +package api + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/util" +) + +func TestAdapter2(t *testing.T) { + c := &config.Config{ + InstanceID: 64, + Port: 6043, + TDengine: config.TDengineRestful{ + Host: "127.0.0.1", + Port: 6041, + Username: "root", + Password: "taosdata", + Usessl: false, + }, + Metrics: config.MetricsConfig{ + Database: config.Database{ + Name: "adapter_report_test", + Options: map[string]interface{}{}, + }, + }, + } + a := NewAdapter(c) + err := a.Init(router) + assert.NoError(t, err) + + w := httptest.NewRecorder() + body := strings.NewReader(" {\"ts\": 1696928323, \"metrics\": {\"rest_total\": 10, \"rest_query\": 2, " + + "\"rest_write\": 5, \"rest_other\": 3, \"rest_in_process\": 1, \"rest_fail\": 5, \"rest_success\": 3, " + + "\"rest_query_success\": 1, \"rest_query_fail\": 2, \"rest_write_success\": 2, \"rest_write_fail\": 3, " + + "\"rest_other_success\": 1, \"rest_other_fail\": 2, \"rest_query_in_process\": 1, \"rest_write_in_process\": 2, " + + "\"ws_total\": 10, \"ws_query\": 2, \"ws_write\": 3, \"ws_other\": 5, \"ws_in_process\": 1, \"ws_success\": 3, " + + "\"ws_fail\": 3, \"ws_query_success\": 1, \"ws_query_fail\": 1, \"ws_write_success\": 2, \"ws_write_fail\": 2, " + + "\"ws_other_success\": 1, \"ws_other_fail\": 2, \"ws_query_in_process\": 1, \"ws_write_in_process\": 2 }, " + + "\"endpoint\": \"adapter-1:6041\"}") + req, _ := http.NewRequest(http.MethodPost, "/adapter_report", body) + req.Header.Set("X-QID", "0x1234567890ABCD00") + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + conn, err := db.NewConnectorWithDb(c.TDengine.Username, c.TDengine.Password, c.TDengine.Host, c.TDengine.Port, c.Metrics.Database.Name, c.TDengine.Usessl) + defer func() { + _, _ = conn.Query(context.Background(), "drop database if exists adapter_report_test", util.GetQidOwn()) + }() + + assert.NoError(t, err) + data, err := conn.Query(context.Background(), "select * from adapter_report_test.adapter_requests where req_type=0", util.GetQidOwn()) + + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, uint32(10), data.Data[0][1]) + assert.Equal(t, uint32(2), data.Data[0][2]) + assert.Equal(t, uint32(5), data.Data[0][3]) + assert.Equal(t, uint32(3), data.Data[0][4]) + assert.Equal(t, uint32(1), data.Data[0][5]) + assert.Equal(t, uint32(3), data.Data[0][6]) + assert.Equal(t, uint32(5), data.Data[0][7]) + assert.Equal(t, uint32(1), data.Data[0][8]) + assert.Equal(t, uint32(2), data.Data[0][9]) + assert.Equal(t, uint32(2), data.Data[0][10]) + assert.Equal(t, uint32(3), data.Data[0][11]) + assert.Equal(t, uint32(1), data.Data[0][12]) + assert.Equal(t, uint32(2), data.Data[0][13]) + assert.Equal(t, uint32(1), data.Data[0][14]) + assert.Equal(t, uint32(2), data.Data[0][15]) + + data, err = conn.Query(context.Background(), "select * from adapter_report_test.adapter_requests where req_type=1", util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, uint32(10), data.Data[0][1]) + assert.Equal(t, uint32(2), data.Data[0][2]) + assert.Equal(t, uint32(3), data.Data[0][3]) + assert.Equal(t, uint32(5), data.Data[0][4]) + assert.Equal(t, uint32(1), data.Data[0][5]) + assert.Equal(t, uint32(3), data.Data[0][6]) + assert.Equal(t, uint32(3), data.Data[0][7]) + assert.Equal(t, uint32(1), data.Data[0][8]) + assert.Equal(t, uint32(1), data.Data[0][9]) + assert.Equal(t, uint32(2), data.Data[0][10]) + assert.Equal(t, uint32(2), data.Data[0][11]) + assert.Equal(t, uint32(1), data.Data[0][12]) + assert.Equal(t, uint32(2), data.Data[0][13]) + assert.Equal(t, uint32(1), data.Data[0][14]) + assert.Equal(t, uint32(2), data.Data[0][15]) + + conn.Exec(context.Background(), "drop database "+c.Metrics.Database.Name, util.GetQidOwn()) +} diff --git a/tools/keeper/api/audit.go b/tools/keeper/api/audit.go new file mode 100644 index 00000000000..fd9fc4f6670 --- /dev/null +++ b/tools/keeper/api/audit.go @@ -0,0 +1,336 @@ +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var auditLogger = log.GetLogger("AUD") + +const MAX_DETAIL_LEN = 50000 + +type Audit struct { + username string + password string + host string + port int + usessl bool + conn *db.Connector + db string + dbOptions map[string]interface{} +} + +type AuditInfo struct { + Timestamp string `json:"timestamp"` + ClusterID string `json:"cluster_id"` + User string `json:"user"` + Operation string `json:"operation"` + Db string `json:"db"` + Resource string `json:"resource"` + ClientAdd string `json:"client_add"` // client address + Details string `json:"details"` +} + +type AuditArrayInfo struct { + Records []AuditInfo `json:"records"` +} + +type AuditInfoOld struct { + Timestamp int64 `json:"timestamp"` + ClusterID string `json:"cluster_id"` + User string `json:"user"` + Operation string `json:"operation"` + Db string `json:"db"` + Resource string `json:"resource"` + ClientAdd string `json:"client_add"` // client address + Details string `json:"details"` +} + +func NewAudit(c *config.Config) (*Audit, error) { + a := Audit{ + username: c.TDengine.Username, + password: c.TDengine.Password, + host: c.TDengine.Host, + port: c.TDengine.Port, + usessl: c.TDengine.Usessl, + db: c.Audit.Database.Name, + dbOptions: c.Audit.Database.Options, + } + if a.db == "" { + a.db = "audit" + } + return &a, nil +} + +func (a *Audit) Init(c gin.IRouter) error { + if err := a.createDatabase(); err != nil { + return fmt.Errorf("create database error, msg:%s", err) + } + if err := a.initConnect(); err != nil { + return fmt.Errorf("init db connect error, msg:%s", err) + } + if err := a.createSTables(); err != nil { + return fmt.Errorf("create stable error, msg:%s", err) + } + c.POST("/audit", a.handleFunc()) + c.POST("/audit_v2", a.handleFunc()) + c.POST("/audit-batch", a.handleBatchFunc()) + return nil +} + +func (a *Audit) handleBatchFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + auditLogger := auditLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if a.conn == nil { + auditLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + auditLogger.Errorf("get audit data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get audit data error. %s", err)}) + return + } + + if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) { + auditLogger.Tracef("receive audit request, data:%s", string(data)) + } + var auditArray AuditArrayInfo + + if err := json.Unmarshal(data, &auditArray); err != nil { + auditLogger.Errorf("parse audit data error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)}) + return + } + + if len(auditArray.Records) == 0 { + if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) { + auditLogger.Trace("handle request successfully (no records)") + } + c.JSON(http.StatusOK, gin.H{}) + return + } + + err = handleBatchRecord(auditArray.Records, a.conn, qid) + + if err != nil { + auditLogger.Errorf("process records error, error:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("process records error. %s", err)}) + return + } + + c.JSON(http.StatusOK, gin.H{}) + } +} + +func (a *Audit) handleFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + auditLogger := auditLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if a.conn == nil { + auditLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + auditLogger.Errorf("get audit data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get audit data error. %s", err)}) + return + } + if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) { + auditLogger.Tracef("receive audit request, data:%s", string(data)) + } + sql := "" + + isStrTime, _ := regexp.MatchString(`"timestamp"\s*:\s*"[^"]*"`, string(data)) + if isStrTime { + var audit AuditInfo + if err := json.Unmarshal(data, &audit); err != nil { + auditLogger.Errorf("parse audit data error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)}) + return + } + + sql = parseSql(audit) + } else { + var audit AuditInfoOld + if err := json.Unmarshal(data, &audit); err != nil { + auditLogger.Errorf("parse old audit error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)}) + return + } + + sql = parseSqlOld(audit) + } + + if _, err = a.conn.Exec(context.Background(), sql, qid); err != nil { + auditLogger.Errorf("save audit data error, sql:%s, error:%s", sql, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("save audit data error: %s", err)}) + return + } + c.JSON(http.StatusOK, gin.H{}) + } +} + +func handleDetails(details string) string { + if strings.Contains(details, "'") { + details = strings.ReplaceAll(details, "'", "\\'") + } + if strings.Contains(details, "\"") { + details = strings.ReplaceAll(details, "\"", "\\\"") + } + if len(details) > MAX_DETAIL_LEN { + details = details[:MAX_DETAIL_LEN] + } + return details +} + +func parseSql(audit AuditInfo) string { + details := handleDetails(audit.Details) + + return fmt.Sprintf( + "insert into %s using operations tags ('%s') values (%s, '%s', '%s', '%s', '%s', '%s', '%s') ", + getTableName(audit), audit.ClusterID, audit.Timestamp, audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details) +} + +func parseSqlOld(audit AuditInfoOld) string { + details := handleDetails(audit.Details) + + return fmt.Sprintf( + "insert into %s using operations tags ('%s') values (%s, '%s', '%s', '%s', '%s', '%s', '%s') ", + getTableNameOld(audit), audit.ClusterID, strconv.FormatInt(audit.Timestamp, 10)+"000000", audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details) +} + +func handleBatchRecord(auditArray []AuditInfo, conn *db.Connector, qid uint64) error { + var builder strings.Builder + var head = fmt.Sprintf( + "insert into %s using operations tags ('%s') values", + getTableName(auditArray[0]), auditArray[0].ClusterID) + + builder.WriteString(head) + var qid_counter uint8 = 0 + for _, audit := range auditArray { + + details := handleDetails(audit.Details) + valuesStr := fmt.Sprintf( + "(%s, '%s', '%s', '%s', '%s', '%s', '%s') ", + audit.Timestamp, audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details) + + if (builder.Len() + len(valuesStr)) > MAX_SQL_LEN { + sql := builder.String() + if _, err := conn.Exec(context.Background(), sql, qid|uint64((qid_counter%255))); err != nil { + return err + } + builder.Reset() + builder.WriteString(head) + } + builder.WriteString(valuesStr) + qid_counter++ + } + + if builder.Len() > len(head) { + sql := builder.String() + if _, err := conn.Exec(context.Background(), sql, qid|uint64((qid_counter%255))); err != nil { + return err + } + } + + return nil +} + +func getTableName(audit AuditInfo) string { + return fmt.Sprintf("t_operations_%s", audit.ClusterID) +} + +func getTableNameOld(audit AuditInfoOld) string { + return fmt.Sprintf("t_operations_%s", audit.ClusterID) +} + +func (a *Audit) initConnect() error { + conn, err := db.NewConnectorWithDb(a.username, a.password, a.host, a.port, a.db, a.usessl) + if err != nil { + auditLogger.Errorf("init db connect error, msg:%s", err) + return err + } + a.conn = conn + return nil +} + +func (a *Audit) createDatabase() error { + conn, err := db.NewConnector(a.username, a.password, a.host, a.port, a.usessl) + if err != nil { + return fmt.Errorf("connect to database error, msg:%s", err) + } + defer func() { _ = conn.Close() }() + sql := a.createDBSql() + auditLogger.Infof("create database, sql:%s", sql) + _, err = conn.Exec(context.Background(), sql, util.GetQidOwn()) + if err != nil { + auditLogger.Errorf("create database error, msg:%s", err) + return err + } + return err +} + +var errNoConnection = errors.New("no connection") + +func (a *Audit) createDBSql() string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("create database if not exists %s precision 'ns' ", a.db)) + + for k, v := range a.dbOptions { + buf.WriteString(k) + switch v := v.(type) { + case string: + buf.WriteString(fmt.Sprintf(" '%s'", v)) + default: + buf.WriteString(fmt.Sprintf(" %v", v)) + } + buf.WriteString(" ") + } + + return buf.String() +} + +func (a *Audit) createSTables() error { + var createTableSql = "create stable if not exists operations " + + "(ts timestamp, user_name varchar(25), operation varchar(20), db varchar(65), resource varchar(193), client_address varchar(25), details varchar(50000)) " + + "tags (cluster_id varchar(64))" + + if a.conn == nil { + return errNoConnection + } + _, err := a.conn.Exec(context.Background(), createTableSql, util.GetQidOwn()) + if err != nil { + auditLogger.Errorf("## create stable error, msg:%s", err) + return err + } + return nil +} diff --git a/tools/keeper/api/audit_test.go b/tools/keeper/api/audit_test.go new file mode 100644 index 00000000000..99beae7a544 --- /dev/null +++ b/tools/keeper/api/audit_test.go @@ -0,0 +1,153 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/util" +) + +func TestAudit(t *testing.T) { + cfg := util.GetCfg() + cfg.Audit = config.AuditConfig{ + Database: config.Database{ + Name: "keepter_test_audit", + }, + Enable: true, + } + + a, err := NewAudit(cfg) + assert.NoError(t, err) + err = a.Init(router) + assert.NoError(t, err) + + longDetails := strings.Repeat("0123456789", 5000) + + cases := []struct { + name string + ts int64 + detail string + data string + expect string + }{ + { + name: "1", + ts: 1699839716440000000, + data: `{"timestamp": "1699839716440000000", "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "detail"}`, + expect: "detail", + }, + { + name: "2", + ts: 1699839716441000000, + data: `{"timestamp": "1699839716441000000", "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "` + longDetails + `"}`, + expect: longDetails[:50000], + }, + { + name: "3", + ts: 1699839716442000000, + data: "{\"timestamp\": \"1699839716442000000\", \"cluster_id\": \"cluster_id\", \"user\": \"user\", \"operation\": \"operation\", \"db\":\"dbnameb\", \"resource\":\"resourcenameb\", \"client_add\": \"localhost:30000\", \"details\": \"create database `meter` buffer 32 cachemodel 'none' duration 50d keep 3650d single_stable 0 wal_retention_period 3600 precision 'ms'\"}", + expect: "create database `meter` buffer 32 cachemodel 'none' duration 50d keep 3650d single_stable 0 wal_retention_period 3600 precision 'ms'", + }, + } + + cases2 := []struct { + name string + ts int64 + detail string + data string + expect string + }{ + { + name: "1", + ts: 1699839716445000000, + data: `{"timestamp":1699839716445, "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "details"}`, + expect: "details", + }, + } + conn, err := db.NewConnectorWithDb(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.Audit.Database.Name, cfg.TDengine.Usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Audit.Database.Name), util.GetQidOwn()) + }() + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(c.data) + req, _ := http.NewRequest(http.MethodPost, "/audit_v2", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, c.expect, data.Data[0][1]) + }) + } + + for _, c := range cases2 { + t.Run(c.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(c.data) + req, _ := http.NewRequest(http.MethodPost, "/audit", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, c.expect, data.Data[0][1]) + }) + } + + for _, c := range cases2 { + t.Run(c.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(c.data) + req, _ := http.NewRequest(http.MethodPost, "/audit", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, c.expect, data.Data[0][1]) + }) + } + + MAX_SQL_LEN = 300 + // test audit batch + input := `{"records":[{"timestamp":"1702548856940013848","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45166","db":"test","resource":"","details":"d630302"},{"timestamp":"1702548856939746458","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45230","db":"test","resource":"","details":"d130277"},{"timestamp":"1702548856939586665","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50288","db":"test","resource":"","details":"d5268"},{"timestamp":"1702548856939528940","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50222","db":"test","resource":"","details":"d255282"},{"timestamp":"1702548856939336371","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45126","db":"test","resource":"","details":"d755297"},{"timestamp":"1702548856939075131","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45122","db":"test","resource":"","details":"d380325"},{"timestamp":"1702548856938640661","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45152","db":"test","resource":"","details":"d255281"},{"timestamp":"1702548856938505795","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45122","db":"test","resource":"","details":"d130276"},{"timestamp":"1702548856938363319","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45178","db":"test","resource":"","details":"d755296"},{"timestamp":"1702548856938201478","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45166","db":"test","resource":"","details":"d380324"},{"timestamp":"1702548856937740618","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50288","db":"test","resource":"","details":"d5266"}]}` + + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Audit.Database.Name), util.GetQidOwn()) + }() + + t.Run("testbatch", func(t *testing.T) { + //test empty array + w1 := httptest.NewRecorder() + body1 := strings.NewReader(`{"records": []}`) + + req1, _ := http.NewRequest(http.MethodPost, "/audit-batch", body1) + router.ServeHTTP(w1, req1) + assert.Equal(t, 200, w1.Code) + + //test 2 items array + w := httptest.NewRecorder() + body := strings.NewReader(input) + req, _ := http.NewRequest(http.MethodPost, "/audit-batch", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), "select ts, details from "+cfg.Audit.Database.Name+".operations where cluster_id='8468922059162439502'", util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 11, len(data.Data)) + }) +} diff --git a/tools/keeper/api/checkhealth.go b/tools/keeper/api/checkhealth.go new file mode 100644 index 00000000000..c5d5a2d24a8 --- /dev/null +++ b/tools/keeper/api/checkhealth.go @@ -0,0 +1,21 @@ +package api + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +func NewCheckHealth(version string) *CheckHealth { + return &CheckHealth{version: version} +} + +type CheckHealth struct { + version string +} + +func (h *CheckHealth) Init(c gin.IRouter) { + c.GET("check_health", func(context *gin.Context) { + context.JSON(http.StatusOK, map[string]string{"version": h.version}) + }) +} diff --git a/tools/keeper/api/common.go b/tools/keeper/api/common.go new file mode 100644 index 00000000000..d02a30eb8b3 --- /dev/null +++ b/tools/keeper/api/common.go @@ -0,0 +1,89 @@ +package api + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var commonLogger = log.GetLogger("CMN") + +func CreateDatabase(username string, password string, host string, port int, usessl bool, dbname string, databaseOptions map[string]interface{}) { + qid := util.GetQidOwn() + + commonLogger := commonLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + ctx := context.Background() + + conn, err := db.NewConnector(username, password, host, port, usessl) + if err != nil { + commonLogger.Errorf("connect to adapter error, msg:%s", err) + return + } + + defer closeConn(conn) + + createDBSql := generateCreateDBSql(dbname, databaseOptions) + commonLogger.Warningf("create database sql: %s", createDBSql) + + for i := 0; i < 3; i++ { + if _, err := conn.Exec(ctx, createDBSql, util.GetQidOwn()); err != nil { + commonLogger.Errorf("try %v times: create database %s error, msg:%v", i+1, dbname, err) + time.Sleep(5 * time.Second) + continue + } + return + } + panic(err) +} + +func generateCreateDBSql(dbname string, databaseOptions map[string]interface{}) string { + var buf bytes.Buffer + buf.WriteString("create database if not exists ") + buf.WriteString(dbname) + + for k, v := range databaseOptions { + buf.WriteString(" ") + buf.WriteString(k) + switch v := v.(type) { + case string: + buf.WriteString(fmt.Sprintf(" '%s'", v)) + default: + buf.WriteString(fmt.Sprintf(" %v", v)) + } + buf.WriteString(" ") + } + return buf.String() +} + +func CreatTables(username string, password string, host string, port int, usessl bool, dbname string, createList []string) { + ctx := context.Background() + conn, err := db.NewConnectorWithDb(username, password, host, port, dbname, usessl) + if err != nil { + commonLogger.Errorf("connect to database error, msg:%s", err) + return + } + defer closeConn(conn) + + for _, createSql := range createList { + commonLogger.Infof("execute sql:%s", createSql) + if _, err = conn.Exec(ctx, createSql, util.GetQidOwn()); err != nil { + commonLogger.Errorf("execute sql: %s, error: %s", createSql, err) + } + } +} + +func closeConn(conn *db.Connector) { + if err := conn.Close(); err != nil { + commonLogger.Errorf("close connection error, msg:%s", err) + } +} diff --git a/tools/keeper/api/exporter_test.go b/tools/keeper/api/exporter_test.go new file mode 100644 index 00000000000..f9ef6b169a7 --- /dev/null +++ b/tools/keeper/api/exporter_test.go @@ -0,0 +1,297 @@ +package api + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/gin-gonic/gin" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/cmd" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/process" + "github.com/taosdata/taoskeeper/util" +) + +var router *gin.Engine +var conf *config.Config +var dbName = "exporter_test" + +func TestMain(m *testing.M) { + conf = config.InitConfig() + log.ConfigLog() + + conf.Metrics.Database.Name = dbName + conn, err := db.NewConnector(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl) + if err != nil { + panic(err) + } + defer conn.Close() + ctx := context.Background() + conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn()) + + if _, err = conn.Exec(ctx, fmt.Sprintf("create database if not exists %s", dbName), util.GetQidOwn()); err != nil { + logger.Errorf("execute sql: %s, error: %s", fmt.Sprintf("create database %s", dbName), err) + } + gin.SetMode(gin.ReleaseMode) + router = gin.New() + reporter := NewReporter(conf) + reporter.Init(router) + + var createList = []string{ + CreateClusterInfoSql, + CreateDnodeSql, + CreateMnodeSql, + CreateDnodeInfoSql, + CreateDataDirSql, + CreateLogDirSql, + CreateTempDirSql, + CreateVgroupsInfoSql, + CreateVnodeRoleSql, + CreateSummarySql, + CreateGrantInfoSql, + CreateKeeperSql, + } + CreatTables(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl, conf.Metrics.Database.Name, createList) + + processor := process.NewProcessor(conf) + node := NewNodeExporter(processor) + node.Init(router) + m.Run() + if _, err = conn.Exec(ctx, fmt.Sprintf("drop database if exists %s", dbName), util.GetQidOwn()); err != nil { + logger.Errorf("execute sql: %s, error: %s", fmt.Sprintf("drop database %s", dbName), err) + } +} + +func TestGetMetrics(t *testing.T) { + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodGet, "/metrics", nil) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) +} + +var now = time.Now() +var nowStr = now.Format(time.RFC3339Nano) + +var report = Report{ + Ts: nowStr, + DnodeID: 1, + DnodeEp: "localhost:7100", + ClusterID: "6980428120398645172", + Protocol: 1, + ClusterInfo: &ClusterInfo{ + FirstEp: "localhost:7100", + FirstEpDnodeID: 1, + Version: "3.0.0.0", + MasterUptime: 2.3090276954462752e-05, + MonitorInterval: 1, + VgroupsTotal: 2, + VgroupsAlive: 2, + VnodesTotal: 2, + VnodesAlive: 2, + ConnectionsTotal: 1, + Dnodes: []Dnode{ + { + DnodeID: 1, + DnodeEp: "localhost:7100", + Status: "ready", + }, + }, + Mnodes: []Mnode{ + { + MnodeID: 1, + MnodeEp: "localhost:7100", + Role: "master", + }, + }, + }, + VgroupInfos: []VgroupInfo{ + { + VgroupID: 1, + DatabaseName: "test", + TablesNum: 1, + Status: "ready", + Vnodes: []Vnode{ + { + DnodeID: 1, + VnodeRole: "LEADER", + }, + { + DnodeID: 2, + VnodeRole: "FOLLOWER", + }, + }, + }, + }, + GrantInfo: &GrantInfo{ + ExpireTime: 2147483647, + TimeseriesUsed: 800, + TimeseriesTotal: 2147483647, + }, + DnodeInfo: DnodeInfo{ + Uptime: 0.000291412026854232, + CPUEngine: 0.0828500414250207, + CPUSystem: 0.4971002485501243, + CPUCores: 12, + MemEngine: 9268, + MemSystem: 54279816, + MemTotal: 65654816, + DiskEngine: 0, + DiskUsed: 39889702912, + DiskTotal: 210304475136, + NetIn: 4727.45292368682, + NetOut: 2194.251734390486, + IoRead: 3789.8909811694753, + IoWrite: 12311.19920713578, + IoReadDisk: 0, + IoWriteDisk: 12178.394449950447, + ReqSelect: 2, + ReqSelectRate: 0, + ReqInsert: 6, + ReqInsertSuccess: 4, + ReqInsertRate: 0, + ReqInsertBatch: 10, + ReqInsertBatchSuccess: 8, + ReqInsertBatchRate: 0, + Errors: 2, + VnodesNum: 2, + Masters: 2, + HasMnode: 1, + HasQnode: 1, + HasSnode: 1, + HasBnode: 1, + }, + DiskInfos: DiskInfo{ + Datadir: []DataDir{ + { + Name: "/root/TDengine/sim/dnode1/data", + Level: 0, + Avail: decimal.NewFromInt(171049893888), + Used: decimal.NewFromInt(39254581248), + Total: decimal.NewFromInt(210304475136), + }, + { + Name: "/root/TDengine/sim/dnode2/data", + Level: 1, + Avail: decimal.NewFromInt(171049893888), + Used: decimal.NewFromInt(39254581248), + Total: decimal.NewFromInt(210304475136), + }, + }, + Logdir: LogDir{ + Name: "/root/TDengine/sim/dnode1/log", + Avail: decimal.NewFromInt(171049771008), + Used: decimal.NewFromInt(39254704128), + Total: decimal.NewFromInt(210304475136), + }, + Tempdir: TempDir{ + Name: "/tmp", + Avail: decimal.NewFromInt(171049771008), + Used: decimal.NewFromInt(39254704128), + Total: decimal.NewFromInt(210304475136), + }, + }, + LogInfos: LogInfo{ + Summary: []Summary{ + { + Level: "error", + Total: 0, + }, { + Level: "info", + Total: 114, + }, { + Level: "debug", + Total: 117, + }, { + Level: "trace", + Total: 126, + }, + }, + }, +} + +func TestPutMetrics(t *testing.T) { + w := httptest.NewRecorder() + b, _ := json.Marshal(report) + body := strings.NewReader(string(b)) + req, _ := http.NewRequest(http.MethodPost, "/report", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, + conf.TDengine.Port, dbName, conf.TDengine.Usessl) + if err != nil { + logger.Errorf("connect to database error, msg:%s", err) + return + } + + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn()) + }() + + ctx := context.Background() + data, err := conn.Query(ctx, "select info from log_summary", util.GetQidOwn()) + if err != nil { + logger.Errorf("execute sql:%s, error:%s", "select * from log_summary", err) + t.Fatal(err) + } + for _, info := range data.Data { + assert.Equal(t, int32(114), info[0]) + } + + var tenMinutesBefore = now.Add(-10 * time.Minute) + var tenMinutesBeforeStr = tenMinutesBefore.Format(time.RFC3339Nano) + + conf.FromTime = tenMinutesBeforeStr + conf.Transfer = "old_taosd_metric" + + var cmd = cmd.NewCommand(conf) + cmd.Process(conf) + + type TableInfo struct { + TsName string + RowNum int + } + + tables := map[string]*TableInfo{ + "taosd_cluster_basic": {"ts", 1}, + "taosd_cluster_info": {"_ts", 1}, + "taosd_vgroups_info": {"_ts", 1}, + "taosd_dnodes_info": {"_ts", 1}, + "taosd_dnodes_status": {"_ts", 1}, + "taosd_dnodes_data_dirs": {"_ts", 1}, + "taosd_dnodes_log_dirs": {"_ts", 2}, + "taosd_mnodes_info": {"_ts", 1}, + "taosd_vnodes_info": {"_ts", 1}, + } + + for table, tableInfo := range tables { + data, err = conn.Query(ctx, fmt.Sprintf("select %s from %s", tableInfo.TsName, table), util.GetQidOwn()) + if err != nil { + logger.Errorf("execute sql:%s, error:%s", "select * from "+table, err) + t.Fatal(err) + } + + assert.Equal(t, tableInfo.RowNum, len(data.Data)) + assert.Equal(t, now.UnixMilli(), data.Data[0][0].(time.Time).UnixMilli()) + } + + conf.Transfer = "" + conf.Drop = "old_taosd_metric_stables" + cmd.Process(conf) + + data, err = conn.Query(ctx, "select * from information_schema.ins_stables where stable_name = 'm_info'", util.GetQidOwn()) + if err != nil { + logger.Errorf("execute sql:%s, error:%s", "m_info is not droped", err) + t.Fatal(err) + } + assert.Equal(t, 0, len(data.Data)) + logger.Infof("ALL OK !!!") +} diff --git a/tools/keeper/api/gen_metric.go b/tools/keeper/api/gen_metric.go new file mode 100644 index 00000000000..5534fe453df --- /dev/null +++ b/tools/keeper/api/gen_metric.go @@ -0,0 +1,770 @@ +package api + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "regexp" + + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var re = regexp.MustCompile("'+") +var gmLogger = log.GetLogger("GEN") + +var MAX_SQL_LEN = 1000000 + +var STABLE_NAME_KEY = "priv_stn" + +type ColumnSeq struct { + tagNames []string + metricNames []string +} + +var ( + mu sync.RWMutex + gColumnSeqMap = make(map[string]ColumnSeq) +) + +type GeneralMetric struct { + client *http.Client + conn *db.Connector + username string + password string + host string + port int + usessl bool + database string + url *url.URL +} + +type Tag struct { + Name string `json:"name"` + Value string `json:"value"` +} + +type Metric struct { + Name string `json:"name"` + Value float64 `json:"value"` +} + +type MetricGroup struct { + Tags []Tag `json:"tags"` + Metrics []Metric `json:"metrics"` +} + +type StableInfo struct { + Name string `json:"name"` + MetricGroups []MetricGroup `json:"metric_groups"` +} + +type StableArrayInfo struct { + Ts string `json:"ts"` + Protocol int `json:"protocol"` + Tables []StableInfo `json:"tables"` +} + +type ClusterBasic struct { + ClusterId string `json:"cluster_id"` + Ts string `json:"ts"` + FirstEp string `json:"first_ep"` + FirstEpDnodeId int32 `json:"first_ep_dnode_id"` + ClusterVersion string `json:"cluster_version"` +} + +type SlowSqlDetailInfo struct { + StartTs string `json:"start_ts"` + RequestId string `json:"request_id"` + QueryTime int32 `json:"query_time"` + Code int32 `json:"code"` + ErrorInfo string `json:"error_info"` + Type int8 `json:"type"` + RowsNum int64 `json:"rows_num"` + Sql string `json:"sql"` + ProcessName string `json:"process_name"` + ProcessId string `json:"process_id"` + Db string `json:"db"` + User string `json:"user"` + Ip string `json:"ip"` + ClusterId string `json:"cluster_id"` +} + +func (gm *GeneralMetric) Init(c gin.IRouter) error { + c.POST("/general-metric", gm.handleFunc()) + c.POST("/taosd-cluster-basic", gm.handleTaosdClusterBasic()) + c.POST("/slow-sql-detail-batch", gm.handleSlowSqlDetailBatch()) + + conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl) + if err != nil { + gmLogger.Errorf("init db connect error, msg:%s", err) + return err + } + gm.conn = conn + + err = gm.createSTables() + if err != nil { + gmLogger.Errorf("create stable error, msg:%s", err) + return err + } + + err = gm.initColumnSeqMap() + if err != nil { + gmLogger.Errorf("init gColumnSeqMap error, msg:%s", err) + return err + } + + return err +} + +func NewGeneralMetric(conf *config.Config) *GeneralMetric { + + client := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableCompression: true, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + + var protocol string + if conf.TDengine.Usessl { + protocol = "https" + } else { + protocol = "http" + } + + imp := &GeneralMetric{ + client: client, + username: conf.TDengine.Username, + password: conf.TDengine.Password, + host: conf.TDengine.Host, + port: conf.TDengine.Port, + usessl: conf.TDengine.Usessl, + database: conf.Metrics.Database.Name, + url: &url.URL{ + Scheme: protocol, + Host: fmt.Sprintf("%s:%d", conf.TDengine.Host, conf.TDengine.Port), + Path: "/influxdb/v1/write", + RawQuery: fmt.Sprintf("db=%s&precision=ms&table_name_key=%s", conf.Metrics.Database.Name, STABLE_NAME_KEY), + }, + } + return imp +} + +func (gm *GeneralMetric) handleFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + gmLogger := gmLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if gm.client == nil { + gmLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + gmLogger.Errorf("get general metric data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get general metric data error. %s", err)}) + return + } + + var request []StableArrayInfo + + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + gmLogger.Tracef("data:%s", string(data)) + } + + if err := json.Unmarshal(data, &request); err != nil { + gmLogger.Errorf("parse general metric data error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse general metric data error: %s", err)}) + return + } + + if len(request) == 0 { + c.JSON(http.StatusOK, gin.H{}) + return + } + + err = gm.handleBatchMetrics(request, qid) + + if err != nil { + gmLogger.Errorf("process records error. msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("process records error. %s", err)}) + return + } + + c.JSON(http.StatusOK, gin.H{}) + } +} + +func (gm *GeneralMetric) handleBatchMetrics(request []StableArrayInfo, qid uint64) error { + var buf bytes.Buffer + + for _, stableArrayInfo := range request { + if stableArrayInfo.Ts == "" { + gmLogger.Error("ts data is empty") + continue + } + + for _, table := range stableArrayInfo.Tables { + if table.Name == "" { + gmLogger.Error("stable name is empty") + continue + } + + table.Name = strings.ToLower(table.Name) + if _, ok := Load(table.Name); !ok { + Init(table.Name) + } + + for _, metricGroup := range table.MetricGroups { + buf.WriteString(table.Name) + writeTags(metricGroup.Tags, table.Name, &buf) + buf.WriteString(" ") + writeMetrics(metricGroup.Metrics, table.Name, &buf) + buf.WriteString(" ") + buf.WriteString(stableArrayInfo.Ts) + buf.WriteString("\n") + } + } + } + + if buf.Len() > 0 { + return gm.lineWriteBody(&buf, qid) + } + return nil +} + +func (gm *GeneralMetric) lineWriteBody(buf *bytes.Buffer, qid uint64) error { + gmLogger := gmLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + header := map[string][]string{ + "Connection": {"keep-alive"}, + } + req_data := buf.String() + + //build new URL,add qid to URL + urlWithQid := *gm.url + query := urlWithQid.Query() + query.Set("qid", fmt.Sprintf("%d", qid)) + urlWithQid.RawQuery = query.Encode() + + req := &http.Request{ + Method: http.MethodPost, + URL: &urlWithQid, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: header, + Host: gm.url.Host, + } + req.SetBasicAuth(gm.username, gm.password) + + req.Body = io.NopCloser(buf) + + startTime := time.Now() + resp, err := gm.client.Do(req) + + endTime := time.Now() + latency := endTime.Sub(startTime) + + if err != nil { + gmLogger.Errorf("latency:%v, req_data:%v, url:%s, resp:%d, err:%s", latency, req_data, urlWithQid.String(), resp.StatusCode, err) + return err + } + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + gmLogger.Tracef("latency:%v, req_data:%v, url:%s, resp:%d", latency, req_data, urlWithQid.String(), resp.StatusCode) + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unexpected status code %d:body:%s", resp.StatusCode, string(body)) + } + return nil +} + +func (gm *GeneralMetric) handleTaosdClusterBasic() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + gmLogger := gmLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if gm.conn == nil { + gmLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + gmLogger.Errorf("get taosd cluster basic data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get general metric data error. %s", err)}) + return + } + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + gmLogger.Tracef("receive taosd cluster basic data:%s", string(data)) + } + + var request ClusterBasic + + if err := json.Unmarshal(data, &request); err != nil { + gmLogger.Errorf("parse general metric data error, data:%s, msg:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse general metric data error: %s", err)}) + return + } + + sql := fmt.Sprintf( + "insert into %s.taosd_cluster_basic_%s using taosd_cluster_basic tags ('%s') values (%s, '%s', %d, '%s') ", + gm.database, request.ClusterId, request.ClusterId, request.Ts, request.FirstEp, request.FirstEpDnodeId, request.ClusterVersion) + + if _, err = gm.conn.Exec(context.Background(), sql, qid); err != nil { + gmLogger.Errorf("insert taosd_cluster_basic error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taosd_cluster_basic error. %s", err)}) + return + } + c.JSON(http.StatusOK, gin.H{}) + } +} + +func processString(input string) string { + // remove number in the beginning + re := regexp.MustCompile(`^\d+`) + input = re.ReplaceAllString(input, "") + + // replage "." to "_" + input = strings.ReplaceAll(input, ".", "_") + + // remove special characters + re = regexp.MustCompile(`[^a-zA-Z0-9_]`) + input = re.ReplaceAllString(input, "") + + return input +} + +func (gm *GeneralMetric) handleSlowSqlDetailBatch() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + gmLogger := gmLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if gm.conn == nil { + gmLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + gmLogger.Errorf("get taos slow sql detail data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get taos slow sql detail data error. %s", err)}) + return + } + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + gmLogger.Tracef("receive taos slow sql detail data:%s", string(data)) + } + + var request []SlowSqlDetailInfo + + if err := json.Unmarshal(data, &request); err != nil { + gmLogger.Errorf("parse taos slow sql detail error, msg:%s", string(data)) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse taos slow sql detail error: %s", err)}) + return + } + + var sql_head = "INSERT INTO `taos_slow_sql_detail` (tbname, `db`, `user`, `ip`, `cluster_id`, `start_ts`, `request_id`, `query_time`, `code`, `error_info`, `type`, `rows_num`, `sql`, `process_name`, `process_id`) values " + var buf bytes.Buffer + buf.WriteString(sql_head) + var qid_counter uint8 = 0 + for _, slowSqlDetailInfo := range request { + if slowSqlDetailInfo.StartTs == "" { + gmLogger.Error("start_ts data is empty") + continue + } + + // cut string to max len + slowSqlDetailInfo.Sql = re.ReplaceAllString(slowSqlDetailInfo.Sql, "'") // 将匹配到的部分替换为一个单引号 + slowSqlDetailInfo.Sql = strings.ReplaceAll(slowSqlDetailInfo.Sql, "'", "''") + slowSqlDetailInfo.Sql = util.SafeSubstring(slowSqlDetailInfo.Sql, 16384) + slowSqlDetailInfo.ClusterId = util.SafeSubstring(slowSqlDetailInfo.ClusterId, 32) + slowSqlDetailInfo.Db = util.SafeSubstring(slowSqlDetailInfo.Db, 1024) + if slowSqlDetailInfo.Db == "" { + slowSqlDetailInfo.Db = "unknown" + } + slowSqlDetailInfo.User = util.SafeSubstring(slowSqlDetailInfo.User, 32) + slowSqlDetailInfo.Ip = util.SafeSubstring(slowSqlDetailInfo.Ip, 32) + slowSqlDetailInfo.ProcessName = util.SafeSubstring(slowSqlDetailInfo.ProcessName, 32) + slowSqlDetailInfo.ProcessId = util.SafeSubstring(slowSqlDetailInfo.ProcessId, 32) + slowSqlDetailInfo.ErrorInfo = util.SafeSubstring(slowSqlDetailInfo.ErrorInfo, 128) + + // max len 192 + var sub_table_name = slowSqlDetailInfo.User + "_" + util.SafeSubstring(slowSqlDetailInfo.Db, 80) + "_" + slowSqlDetailInfo.Ip + "_clusterId_" + slowSqlDetailInfo.ClusterId + sub_table_name = strings.ToLower(processString(sub_table_name)) + + var sql = fmt.Sprintf( + "('%s', '%s', '%s', '%s', '%s', %s, %s, %d, %d, '%s', %d, %d, '%s', '%s', '%s') ", + sub_table_name, + slowSqlDetailInfo.Db, slowSqlDetailInfo.User, slowSqlDetailInfo.Ip, slowSqlDetailInfo.ClusterId, slowSqlDetailInfo.StartTs, slowSqlDetailInfo.RequestId, + slowSqlDetailInfo.QueryTime, slowSqlDetailInfo.Code, slowSqlDetailInfo.ErrorInfo, slowSqlDetailInfo.Type, slowSqlDetailInfo.RowsNum, slowSqlDetailInfo.Sql, + slowSqlDetailInfo.ProcessName, slowSqlDetailInfo.ProcessId) + if (buf.Len() + len(sql)) < MAX_SQL_LEN { + buf.WriteString(sql) + } else { + if _, err = gm.conn.Exec(context.Background(), buf.String(), qid|uint64((qid_counter%255))); err != nil { + gmLogger.Errorf("insert taos_slow_sql_detail error, sql:%s, error:%s", buf.String(), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taos_slow_sql_detail error. %s", err)}) + return + } + buf.Reset() + buf.WriteString(sql_head) + buf.WriteString(sql) + qid_counter++ + } + } + + if buf.Len() > len(sql_head) { + if _, err = gm.conn.Exec(context.Background(), buf.String(), qid|uint64((qid_counter%255))); err != nil { + gmLogger.Errorf("insert taos_slow_sql_detail error, data:%s, msg:%s", buf.String(), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taos_slow_sql_detail error. %s", err)}) + return + } + } + c.JSON(http.StatusOK, gin.H{}) + } +} + +func writeTags(tags []Tag, stbName string, buf *bytes.Buffer) { + var nameArray []string + if columnSeq, ok := Load(stbName); ok { + if len(columnSeq.tagNames) < len(tags) { + // add column, only schema change will hit here + for _, tag := range tags { + if !contains(columnSeq.tagNames, tag.Name) { + columnSeq.tagNames = append(columnSeq.tagNames, tag.Name) + } + } + Store(stbName, columnSeq) + } + nameArray = columnSeq.tagNames + } + + // 将 Tag 切片转换为 map + tagMap := make(map[string]string) + for _, tag := range tags { + tagMap[tag.Name] = tag.Value + } + + for _, name := range nameArray { + if value, ok := tagMap[name]; ok { + if value != "" { + buf.WriteString(fmt.Sprintf(",%s=%s", name, util.EscapeInfluxProtocol(value))) + } else { + buf.WriteString(fmt.Sprintf(",%s=%s", name, "unknown")) + gmLogger.Errorf("tag value is empty, tag name:%s", name) + } + } else { + buf.WriteString(fmt.Sprintf(",%s=%s", name, "unknown")) + } + } + + // have sub table name + if _, ok := tagMap[STABLE_NAME_KEY]; ok { + return + } + + subTableName := get_sub_table_name_valid(stbName, tagMap) + if subTableName != "" { + buf.WriteString(fmt.Sprintf(",%s=%s", STABLE_NAME_KEY, subTableName)) + } else { + gmLogger.Errorf("get sub stable name error, stable name:%s, tag map:%v", stbName, tagMap) + } +} + +func checkKeysExist(data map[string]string, keys ...string) bool { + for _, key := range keys { + _, ok := data[key] + if !ok { + return false + } + } + return true +} + +func get_sub_table_name_valid(stbName string, tagMap map[string]string) string { + stbName = get_sub_table_name(stbName, tagMap) + return util.ToValidTableName(stbName) +} + +func get_sub_table_name(stbName string, tagMap map[string]string) string { + if strings.HasPrefix(stbName, "taosx") { + switch stbName { + case "taosx_sys": + if checkKeysExist(tagMap, "taosx_id") { + return fmt.Sprintf("sys_%s", tagMap["taosx_id"]) + } + case "taosx_agent": + if checkKeysExist(tagMap, "taosx_id", "agent_id") { + return fmt.Sprintf("agent_%s_%s", tagMap["taosx_id"], tagMap["agent_id"]) + } + case "taosx_connector": + if checkKeysExist(tagMap, "taosx_id", "ds_name", "task_id") { + return fmt.Sprintf("connector_%s_%s_%s", tagMap["taosx_id"], tagMap["ds_name"], tagMap["task_id"]) + } + default: + if strings.HasPrefix(stbName, "taosx_task_") { + ds_name := stbName[len("taosx_task_"):] + if checkKeysExist(tagMap, "taosx_id", "task_id") { + return fmt.Sprintf("task_%s_%s_%s", tagMap["taosx_id"], ds_name, tagMap["task_id"]) + } + } + return "" + } + } + + switch stbName { + case "taosd_cluster_info": + if checkKeysExist(tagMap, "cluster_id") { + return fmt.Sprintf("cluster_%s", tagMap["cluster_id"]) + } + case "taosd_vgroups_info": + if checkKeysExist(tagMap, "cluster_id", "vgroup_id", "database_name") { + return fmt.Sprintf("vginfo_%s_vgroup_%s_cluster_%s", tagMap["database_name"], tagMap["vgroup_id"], tagMap["cluster_id"]) + } + case "taosd_dnodes_info": + if checkKeysExist(tagMap, "cluster_id", "dnode_id") { + return fmt.Sprintf("dinfo_%s_cluster_%s", tagMap["dnode_id"], tagMap["cluster_id"]) + } + case "taosd_dnodes_status": + if checkKeysExist(tagMap, "cluster_id", "dnode_id") { + return fmt.Sprintf("dstatus_%s_cluster_%s", tagMap["dnode_id"], tagMap["cluster_id"]) + } + case "taosd_dnodes_log_dirs": + if checkKeysExist(tagMap, "cluster_id", "dnode_id", "data_dir_name") { + subTableName := fmt.Sprintf("dlog_%s_%s_cluster_%s", tagMap["dnode_id"], tagMap["data_dir_name"], tagMap["cluster_id"]) + if len(subTableName) <= util.MAX_TABLE_NAME_LEN { + return subTableName + } + return fmt.Sprintf("dlog_%s_%s_cluster_%s", tagMap["dnode_id"], + util.GetMd5HexStr(tagMap["data_dir_name"]), + tagMap["cluster_id"]) + } + case "taosd_dnodes_data_dirs": + if checkKeysExist(tagMap, "cluster_id", "dnode_id", "data_dir_name", "data_dir_level") { + subTableName := fmt.Sprintf("ddata_%s_%s_level_%s_cluster_%s", tagMap["dnode_id"], tagMap["data_dir_name"], tagMap["data_dir_level"], tagMap["cluster_id"]) + if len(subTableName) <= util.MAX_TABLE_NAME_LEN { + return subTableName + } + return fmt.Sprintf("ddata_%s_%s_level_%s_cluster_%s", tagMap["dnode_id"], + util.GetMd5HexStr(tagMap["data_dir_name"]), + tagMap["data_dir_level"], + tagMap["cluster_id"]) + } + case "taosd_mnodes_info": + if checkKeysExist(tagMap, "cluster_id", "mnode_id") { + return fmt.Sprintf("minfo_%s_cluster_%s", tagMap["mnode_id"], tagMap["cluster_id"]) + } + case "taosd_vnodes_info": + if checkKeysExist(tagMap, "cluster_id", "database_name", "vgroup_id", "dnode_id") { + return fmt.Sprintf("vninfo_%s_dnode_%s_vgroup_%s_cluster_%s", tagMap["database_name"], tagMap["dnode_id"], tagMap["vgroup_id"], tagMap["cluster_id"]) + } + case "taosd_sql_req": + if checkKeysExist(tagMap, "username", "sql_type", "result", "dnode_id", "vgroup_id", "cluster_id") { + return fmt.Sprintf("taosdsql_%s_%s_%s_%s_vgroup_%s_cluster_%s", tagMap["username"], + tagMap["sql_type"], tagMap["result"], tagMap["dnode_id"], tagMap["vgroup_id"], tagMap["cluster_id"]) + } + case "taos_sql_req": + if checkKeysExist(tagMap, "username", "sql_type", "result", "cluster_id") { + return fmt.Sprintf("taossql_%s_%s_%s_cluster_%s", tagMap["username"], + tagMap["sql_type"], tagMap["result"], tagMap["cluster_id"]) + } + case "taos_slow_sql": + if checkKeysExist(tagMap, "username", "duration", "result", "cluster_id") { + return fmt.Sprintf("slowsql_%s_%s_%s_cluster_%s", tagMap["username"], + tagMap["duration"], tagMap["result"], tagMap["cluster_id"]) + } + + default: + return "" + } + return "" +} + +func contains(array []string, item string) bool { + for _, value := range array { + if value == item { + return true + } + } + return false +} + +func writeMetrics(metrics []Metric, stbName string, buf *bytes.Buffer) { + var nameArray []string + if columnSeq, ok := Load(stbName); ok { + if len(columnSeq.metricNames) < len(metrics) { + // add column, only schema change will hit here + for _, metric := range metrics { + if !contains(columnSeq.metricNames, metric.Name) { + columnSeq.metricNames = append(columnSeq.metricNames, metric.Name) + } + } + Store(stbName, columnSeq) + } + nameArray = columnSeq.metricNames + } + + // 将 Metric 切片转换为 map + metricMap := make(map[string]float64) + for _, metric := range metrics { + metricMap[metric.Name] = metric.Value + } + + for i, name := range nameArray { + if value, ok := metricMap[name]; ok { + buf.WriteString(fmt.Sprintf("%s=%sf64", name, strconv.FormatFloat(value, 'f', -1, 64))) + if i != len(nameArray)-1 { + buf.WriteString(",") + } + } + } +} + +// 存储数据 +func Store(key string, value ColumnSeq) { + mu.Lock() + defer mu.Unlock() + gColumnSeqMap[key] = value +} + +// 加载数据 +func Load(key string) (ColumnSeq, bool) { + mu.RLock() + defer mu.RUnlock() + value, ok := gColumnSeqMap[key] + return value, ok +} + +// 初始化单表的列序列 +func Init(key string) { + mu.Lock() + defer mu.Unlock() + if _, ok := gColumnSeqMap[key]; !ok { + columnSeq := ColumnSeq{ + tagNames: []string{}, + metricNames: []string{}, + } + gColumnSeqMap[key] = columnSeq + } +} + +// 初始化所有列序列 +func (gm *GeneralMetric) initColumnSeqMap() error { + query := fmt.Sprintf(` + select stable_name + from information_schema.ins_stables + where db_name = '%s' + and ( + stable_name like 'taos_%%' + or stable_name like 'taosd_%%' + or stable_name like 'taosx_%%' + ) + order by stable_name asc; + `, gm.database) + + data, err := gm.conn.Query(context.Background(), query, util.GetQidOwn()) + + if err != nil { + return err + } + + //get all stables, then init gColumnSeqMap + for _, row := range data.Data { + stableName := row[0].(string) + Init(stableName) + } + //set gColumnSeqMap with desc stables + for tableName, columnSeq := range gColumnSeqMap { + data, err := gm.conn.Query(context.Background(), fmt.Sprintf(`desc %s.%s;`, gm.database, tableName), util.GetQidOwn()) + + if err != nil { + return err + } + + if len(data.Data) < 1 || len(data.Data[0]) < 4 { + return fmt.Errorf("desc %s.%s error", gm.database, tableName) + } + + for i, row := range data.Data { + if i == 0 { + continue + } + + if row[3].(string) == "TAG" { + columnSeq.tagNames = append(columnSeq.tagNames, row[0].(string)) + } else { + columnSeq.metricNames = append(columnSeq.metricNames, row[0].(string)) + } + } + Store(tableName, columnSeq) + } + + gmLogger.Infof("gColumnSeqMap:%v", gColumnSeqMap) + return nil +} + +func (gm *GeneralMetric) createSTables() error { + var createTableSql = "create stable if not exists taosd_cluster_basic " + + "(ts timestamp, first_ep varchar(100), first_ep_dnode_id INT, cluster_version varchar(20)) " + + "tags (cluster_id varchar(50))" + + if gm.conn == nil { + return errNoConnection + } + _, err := gm.conn.Exec(context.Background(), createTableSql, util.GetQidOwn()) + if err != nil { + return err + } + + createTableSql = "create stable if not exists taos_slow_sql_detail" + + " (start_ts TIMESTAMP, request_id BIGINT UNSIGNED PRIMARY KEY, query_time INT, code INT, error_info varchar(128), " + + "type TINYINT, rows_num BIGINT, sql varchar(16384), process_name varchar(32), process_id varchar(32)) " + + "tags (db varchar(1024), `user` varchar(32), ip varchar(32), cluster_id varchar(32))" + + _, err = gm.conn.Exec(context.Background(), createTableSql, util.GetQidOwn()) + return err +} diff --git a/tools/keeper/api/gen_metric_test.go b/tools/keeper/api/gen_metric_test.go new file mode 100644 index 00000000000..88987d65440 --- /dev/null +++ b/tools/keeper/api/gen_metric_test.go @@ -0,0 +1,358 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/util" +) + +var router_inited bool = false + +func TestClusterBasic(t *testing.T) { + cfg := util.GetCfg() + + CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options) + + gm := NewGeneralMetric(cfg) + if !router_inited { + err := gm.Init(router) + assert.NoError(t, err) + router_inited = true + } + + testcfg := struct { + name string + ts int64 + tbname string + data string + expect string + }{ + name: "1", + tbname: "taosd_cluster_basic", + ts: 1705655770381, + data: `{"ts":"1705655770381","cluster_id":"7648966395564416484","protocol":2,"first_ep":"ssfood06:6130","first_ep_dnode_id":1,"cluster_version":"3.2.1.0.alp"}`, + expect: "7648966395564416484", + } + + conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn()) + }() + + t.Run(testcfg.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(testcfg.data) + req, _ := http.NewRequest(http.MethodPost, "/taosd-cluster-basic", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, cluster_id from %s.%s where ts=%d", gm.database, testcfg.tbname, testcfg.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, testcfg.expect, data.Data[0][1]) + }) + + testcfg = struct { + name string + ts int64 + tbname string + data string + expect string + }{ + name: "1", + tbname: "taos_slow_sql_detail", + ts: 1703226836762, + data: `[{ + "start_ts": "1703226836762", + "request_id": "1", + "query_time": 100, + "code": 0, + "error_info": "", + "type": 1, + "rows_num": 5, + "sql": "select * from abc;", + "process_name": "abc", + "process_id": "123", + "db": "dbname", + "user": "root", + "ip": "127.0.0.1", + "cluster_id": "1234567" + }, + { + "start_ts": "1703226836763", + "request_id": "2", + "query_time": 100, + "code": 0, + "error_info": "", + "type": 1, + "rows_num": 5, + "sql": "insert into abc ('a', 'b') values ('aaa', 'bbb');", + "process_name": "abc", + "process_id": "123", + "db": "dbname", + "user": "root", + "ip": "127.0.0.1", + "cluster_id": "1234567" + }]`, + expect: "1234567", + } + + conn, err = db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn()) + }() + + t.Run(testcfg.name, func(t *testing.T) { + MAX_SQL_LEN = 1000000 + w := httptest.NewRecorder() + body := strings.NewReader(testcfg.data) + req, _ := http.NewRequest(http.MethodPost, "/slow-sql-detail-batch", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select start_ts, cluster_id from %s.%s where start_ts=%d", gm.database, testcfg.tbname, testcfg.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, testcfg.expect, data.Data[0][1]) + }) +} + +func TestGenMetric(t *testing.T) { + cfg := util.GetCfg() + + CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options) + + gm := NewGeneralMetric(cfg) + if !router_inited { + err := gm.Init(router) + assert.NoError(t, err) + router_inited = true + } + + testcfg := struct { + name string + ts []int64 + tbname []string + data string + expect string + }{ + name: "1", + tbname: []string{"taosd_cluster_info", "taosd_dnodes_info"}, + ts: []int64{1703226836761, 1703226836762}, + data: `[{ + "ts": "1703226836761", + "protocol": 2, + "tables": [{ + "name": "taosd_cluster_info", + "metric_groups": [{ + "tags": [{ + "name": "cluster_id", + "value": "1397715317673023180" + }], + "metrics": [{ + "name": "dbs_total", + "value": 1 + }, { + "name": "master_uptime", + "value": 0 + }] + }] + }, { + "name": "taosd_dnodes_info", + "metric_groups": [{ + "tags": [{ + "name": "cluster_id", + "value": "1397715317673023180" + }, { + "name": "dnode_id", + "value": "1" + }, { + "name": "dnode_ep", + "value": "ssfood06:6130" + }], + "metrics": [{ + "name": "uptime", + "value": 0 + }, { + "name": "cpu_engine", + "value": 0 + }] + }] + }] + }, { + "ts": "1703226836762", + "protocol": 2, + "tables": [{ + "name": "taosd_cluster_info", + "metric_groups": [{ + "tags": [{ + "name": "cluster_id", + "value": "1397715317673023180" + }], + "metrics": [{ + "name": "dbs_total", + "value": 1 + }, { + "name": "master_uptime", + "value": 0 + }] + }] + }, { + "name": "taosd_dnodes_info", + "metric_groups": [{ + "tags": [{ + "name": "cluster_id", + "value": "1397715317673023180" + }, { + "name": "dnode_id", + "value": "1" + }, { + "name": "dnode_ep", + "value": ", =\"ssfood06:6130" + }], + "metrics": [{ + "name": "uptime", + "value": 0 + }, { + "name": "cpu_engine", + "value": 0 + }] + }] + }] + }]`, + expect: "1397715317673023180", + } + + conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn()) + }() + + t.Run(testcfg.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(testcfg.data) + req, _ := http.NewRequest(http.MethodPost, "/general-metric", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + for _, tbname := range testcfg.tbname { + for _, ts := range testcfg.ts { + data, err := conn.Query(context.Background(), fmt.Sprintf("select _ts, cluster_id from %s.%s where _ts=%d", gm.database, tbname, ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, testcfg.expect, data.Data[0][1]) + } + } + }) +} +func TestGetSubTableName(t *testing.T) { + tests := []struct { + stbName string + tagMap map[string]string + want string + }{ + { + stbName: "taosx_sys", + tagMap: map[string]string{"taosx_id": "123"}, + want: "sys_123", + }, + { + stbName: "taosx_agent", + tagMap: map[string]string{"taosx_id": "123", "agent_id": "456"}, + want: "agent_123_456", + }, + { + stbName: "taosx_connector", + tagMap: map[string]string{"taosx_id": "123", "ds_name": "ds", "task_id": "789"}, + want: "connector_123_ds_789", + }, + { + stbName: "taosx_task_example", + tagMap: map[string]string{"taosx_id": "123", "task_id": "789"}, + want: "task_123_example_789", + }, + { + stbName: "taosd_cluster_info", + tagMap: map[string]string{"cluster_id": "123"}, + want: "cluster_123", + }, + { + stbName: "taosd_vgroups_info", + tagMap: map[string]string{"cluster_id": "123", "vgroup_id": "456", "database_name": "db"}, + want: "vginfo_db_vgroup_456_cluster_123", + }, + { + stbName: "taosd_dnodes_info", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123"}, + want: "dinfo_123_cluster_123", + }, + { + stbName: "taosd_dnodes_status", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123"}, + want: "dstatus_123_cluster_123", + }, + { + stbName: "taosd_dnodes_log_dirs", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "log"}, + want: "dlog_123_log_cluster_123", + }, + { + stbName: "taosd_dnodes_log_dirs", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "loglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglog"}, + want: "dlog_123_9cdc719961a632a27603cd5ed9f1aee2_cluster_123", + }, + { + stbName: "taosd_dnodes_data_dirs", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "data", "data_dir_level": "5"}, + want: "ddata_123_data_level_5_cluster_123", + }, + { + stbName: "taosd_dnodes_data_dirs", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "datadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadata", "data_dir_level": "5"}, + want: "ddata_123_03bf8dffdf6b97e08f347c6ae795998b_level_5_cluster_123", + }, + { + stbName: "taosd_mnodes_info", + tagMap: map[string]string{"cluster_id": "123", "mnode_id": "12"}, + want: "minfo_12_cluster_123", + }, + { + stbName: "taosd_vnodes_info", + tagMap: map[string]string{"cluster_id": "123", "database_name": "db", "vgroup_id": "456", "dnode_id": "789"}, + want: "vninfo_db_dnode_789_vgroup_456_cluster_123", + }, + { + stbName: "taosd_sql_req", + tagMap: map[string]string{"username": "user", "sql_type": "select", "result": "success", "dnode_id": "123", "vgroup_id": "456", "cluster_id": "123"}, + want: "taosdsql_user_select_success_123_vgroup_456_cluster_123", + }, + { + stbName: "taos_sql_req", + tagMap: map[string]string{"username": "user", "sql_type": "select", "result": "success", "cluster_id": "123"}, + want: "taossql_user_select_success_cluster_123", + }, + { + stbName: "taos_slow_sql", + tagMap: map[string]string{"username": "user", "duration": "100ms", "result": "success", "cluster_id": "123"}, + want: "slowsql_user_100ms_success_cluster_123", + }, + } + + for _, tt := range tests { + t.Run(tt.stbName, func(t *testing.T) { + if got := get_sub_table_name_valid(tt.stbName, tt.tagMap); got != tt.want { + panic(fmt.Sprintf("get_sub_table_name() = %v, want %v", got, tt.want)) + } + }) + } +} diff --git a/tools/keeper/api/https_test.go b/tools/keeper/api/https_test.go new file mode 100644 index 00000000000..c73cbfc2e41 --- /dev/null +++ b/tools/keeper/api/https_test.go @@ -0,0 +1,127 @@ +package api + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + crand "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "log" + "math/big" + "net/http" + "net/http/httputil" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/util" +) + +func TestHttps(t *testing.T) { + server := startProxy() + defer server.Shutdown(context.Background()) + + cfg := util.GetCfg() + cfg.TDengine.Usessl = true + cfg.TDengine.Port = 34443 + + CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options) + + conn, err := db.NewConnectorWithDb(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.Metrics.Database.Name, cfg.TDengine.Usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Metrics.Database.Name), util.GetQidOwn()) + }() + + data, err := conn.Query(context.Background(), "select server_version()", util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) +} + +func generateSelfSignedCert() (tls.Certificate, error) { + priv, err := ecdsa.GenerateKey(elliptic.P384(), crand.Reader) + if err != nil { + return tls.Certificate{}, err + } + + notBefore := time.Now() + notAfter := notBefore.Add(365 * 24 * time.Hour) + + serialNumber, err := crand.Int(crand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) + if err != nil { + return tls.Certificate{}, err + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Your Company"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(crand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return tls.Certificate{}, err + } + + certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) + keyPEM, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return tls.Certificate{}, err + } + + keyPEMBlock := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: keyPEM}) + + return tls.X509KeyPair(certPEM, keyPEMBlock) +} + +func startProxy() *http.Server { + // Generate self-signed certificate + cert, err := generateSelfSignedCert() + if err != nil { + log.Fatalf("Failed to generate self-signed certificate: %v", err) + } + + target := "http://127.0.0.1:6041" + proxyURL, err := url.Parse(target) + if err != nil { + log.Fatalf("Failed to parse target URL: %v", err) + } + + proxy := httputil.NewSingleHostReverseProxy(proxyURL) + proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, e error) { + http.Error(w, "Proxy error", http.StatusBadGateway) + } + mux := http.NewServeMux() + mux.Handle("/", proxy) + + server := &http.Server{ + Addr: ":34443", + Handler: mux, + TLSConfig: &tls.Config{Certificates: []tls.Certificate{cert}}, + // Setup server timeouts for better handling of idle connections and slowloris attacks + WriteTimeout: 10 * time.Second, + ReadTimeout: 10 * time.Second, + IdleTimeout: 30 * time.Second, + } + + log.Println("Starting server on :34443") + go func() { + err = server.ListenAndServeTLS("", "") + if err != nil && err != http.ErrServerClosed { + log.Fatalf("Failed to start HTTPS server: %v", err) + } + }() + return server +} diff --git a/tools/keeper/api/nodeexporter.go b/tools/keeper/api/nodeexporter.go new file mode 100644 index 00000000000..7b87a14336b --- /dev/null +++ b/tools/keeper/api/nodeexporter.go @@ -0,0 +1,32 @@ +package api + +import ( + "net/http" + + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/taosdata/taoskeeper/process" +) + +type NodeExporter struct { + processor *process.Processor +} + +func NewNodeExporter(processor *process.Processor) *NodeExporter { + return &NodeExporter{processor: processor} +} + +func (z *NodeExporter) Init(c gin.IRouter) { + reg := prometheus.NewPedanticRegistry() + reg.MustRegister(z.processor) + c.GET("metrics", z.myMiddleware(promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))) +} + +func (z *NodeExporter) myMiddleware(next http.Handler) gin.HandlerFunc { + return func(c *gin.Context) { + z.processor.Process() + // call Prometheus handler + next.ServeHTTP(c.Writer, c.Request) + } +} diff --git a/tools/keeper/api/report.go b/tools/keeper/api/report.go new file mode 100644 index 00000000000..eb9c3856f87 --- /dev/null +++ b/tools/keeper/api/report.go @@ -0,0 +1,478 @@ +package api + +import ( + "bytes" + "context" + "fmt" + "strconv" + "strings" + "sync/atomic" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/go-utils/json" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var logger = log.GetLogger("REP") + +var createList = []string{ + // CreateClusterInfoSql, + // CreateDnodeSql, + // CreateMnodeSql, + // CreateDnodeInfoSql, + // CreateDataDirSql, + // CreateLogDirSql, + // CreateTempDirSql, + // CreateVgroupsInfoSql, + // CreateVnodeRoleSql, + // CreateSummarySql, + // CreateGrantInfoSql, + CreateKeeperSql, +} + +type Reporter struct { + username string + password string + host string + port int + usessl bool + dbname string + databaseOptions map[string]interface{} + totalRep atomic.Value +} + +func NewReporter(conf *config.Config) *Reporter { + r := &Reporter{ + username: conf.TDengine.Username, + password: conf.TDengine.Password, + host: conf.TDengine.Host, + port: conf.TDengine.Port, + usessl: conf.TDengine.Usessl, + dbname: conf.Metrics.Database.Name, + databaseOptions: conf.Metrics.Database.Options, + } + r.totalRep.Store(0) + return r +} + +func (r *Reporter) Init(c gin.IRouter) { + c.POST("report", r.handlerFunc()) + r.createDatabase() + r.creatTables() + // todo: it can delete in the future. + if r.shouldDetectFields() { + r.detectGrantInfoFieldType() + r.detectClusterInfoFieldType() + r.detectVgroupsInfoType() + } +} + +func (r *Reporter) getConn() *db.Connector { + conn, err := db.NewConnector(r.username, r.password, r.host, r.port, r.usessl) + if err != nil { + qid := util.GetQidOwn() + + logger := logger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + logger.Errorf("connect to database error, msg:%s", err) + panic(err) + } + return conn +} + +func (r *Reporter) detectGrantInfoFieldType() { + // `expire_time` `timeseries_used` `timeseries_total` in table `grant_info` changed to bigint from TS-3003. + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + r.detectFieldType(ctx, conn, "grants_info", "expire_time", "bigint") + r.detectFieldType(ctx, conn, "grants_info", "timeseries_used", "bigint") + r.detectFieldType(ctx, conn, "grants_info", "timeseries_total", "bigint") + if r.tagExist(ctx, conn, "grants_info", "dnode_id") { + r.dropTag(ctx, conn, "grants_info", "dnode_id") + } + if r.tagExist(ctx, conn, "grants_info", "dnode_ep") { + r.dropTag(ctx, conn, "grants_info", "dnode_ep") + } +} + +func (r *Reporter) detectClusterInfoFieldType() { + // `tbs_total` in table `cluster_info` changed to bigint from TS-3003. + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + r.detectFieldType(ctx, conn, "cluster_info", "tbs_total", "bigint") + + // add column `topics_total` and `streams_total` from TD-22032 + // if exists, _ := r.columnInfo(ctx, conn, "cluster_info", "topics_total"); !exists { + // logger.Warningf("## %s.cluster_info.topics_total not exists, will add it", r.dbname) + // r.addColumn(ctx, conn, "cluster_info", "topics_total", "int") + // } + // if exists, _ := r.columnInfo(ctx, conn, "cluster_info", "streams_total"); !exists { + // logger.Warningf("## %s.cluster_info.streams_total not exists, will add it", r.dbname) + // r.addColumn(ctx, conn, "cluster_info", "streams_total", "int") + // } +} + +func (r *Reporter) detectVgroupsInfoType() { + // `tables_num` in table `vgroups_info` changed to bigint from TS-3003. + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + r.detectFieldType(ctx, conn, "vgroups_info", "tables_num", "bigint") +} + +func (r *Reporter) detectFieldType(ctx context.Context, conn *db.Connector, table, field, fieldType string) { + _, colType := r.columnInfo(ctx, conn, table, field) + if colType == "INT" { + logger.Warningf("%s.%s.%s type is %s, will change to %s", r.dbname, table, field, colType, fieldType) + // drop column `tables_num` + r.dropColumn(ctx, conn, table, field) + + // add column `tables_num` + r.addColumn(ctx, conn, table, field, fieldType) + } +} + +func (r *Reporter) shouldDetectFields() bool { + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + version, err := r.serverVersion(ctx, conn) + if err != nil { + logger.Errorf("get server version error:%s", err) + return false + } + + // if server version is less than v3.0.3.0, should not detect fields. + versions := strings.Split(version, ".") + if len(versions) < 4 { + logger.Errorf("get server version error. version:%s", version) + return false + } + + v1, _ := strconv.Atoi(versions[0]) + v2, _ := strconv.Atoi(versions[1]) + v3, _ := strconv.Atoi(versions[2]) + + if v1 > 3 || v2 > 0 || v3 >= 3 { + return true + } + + return false +} + +func (r *Reporter) serverVersion(ctx context.Context, conn *db.Connector) (version string, err error) { + res, err := conn.Query(ctx, "select server_version()", util.GetQidOwn()) + if err != nil { + logger.Errorf("get server version error, msg:%s", err) + return + } + + if len(res.Data) == 0 { + logger.Errorf("get server version error. response:%+v", res) + return + } + + if len(res.Data) != 1 && len(res.Data[0]) != 1 { + logger.Errorf("get server version error. response:%+v", res) + return + } + + version = res.Data[0][0].(string) + + return +} + +func (r *Reporter) columnInfo(ctx context.Context, conn *db.Connector, table string, field string) (exists bool, colType string) { + res, err := conn.Query(ctx, fmt.Sprintf("select col_type from information_schema.ins_columns where table_name='%s' and db_name='%s' and col_name='%s'", table, r.dbname, field), util.GetQidOwn()) + if err != nil { + logger.Errorf("get %s field type error, msg:%s", r.dbname, err) + panic(err) + } + + if len(res.Data) == 0 { + return + } + + if len(res.Data) != 1 && len(res.Data[0]) != 1 { + logger.Errorf("get field type for %s error. response:%+v", table, res) + panic(fmt.Sprintf("get field type for %s error. response:%+v", table, res)) + } + + exists = true + colType = res.Data[0][0].(string) + colType = strings.ToUpper(colType) + return +} + +func (r *Reporter) tagExist(ctx context.Context, conn *db.Connector, stable string, tag string) (exists bool) { + res, err := conn.Query(ctx, fmt.Sprintf("select tag_name from information_schema.ins_tags where stable_name='%s' and db_name='%s' and tag_name='%s'", stable, r.dbname, tag), util.GetQidOwn()) + if err != nil { + logger.Errorf("get %s tag_name error, msg:%s", r.dbname, err) + panic(err) + } + + if len(res.Data) == 0 { + exists = false + return + } + + if len(res.Data) != 1 && len(res.Data[0]) != 1 { + logger.Errorf("get tag_name for %s error. response:%+v", stable, res) + panic(fmt.Sprintf("get tag_name for %s error. response:%+v", stable, res)) + } + + exists = true + return +} + +func (r *Reporter) dropColumn(ctx context.Context, conn *db.Connector, table string, field string) { + if _, err := conn.Exec(ctx, fmt.Sprintf("alter table %s.%s drop column %s", r.dbname, table, field), util.GetQidOwn()); err != nil { + logger.Errorf("drop column %s from table %s error, msg:%s", field, table, err) + panic(err) + } +} + +func (r *Reporter) dropTag(ctx context.Context, conn *db.Connector, stable string, tag string) { + if _, err := conn.Exec(ctx, fmt.Sprintf("alter stable %s.%s drop tag %s", r.dbname, stable, tag), util.GetQidOwn()); err != nil { + logger.Errorf("drop tag %s from stable %s error, msg:%s", tag, stable, err) + panic(err) + } +} + +func (r *Reporter) addColumn(ctx context.Context, conn *db.Connector, table string, field string, fieldType string) { + if _, err := conn.Exec(ctx, fmt.Sprintf("alter table %s.%s add column %s %s", r.dbname, table, field, fieldType), util.GetQidOwn()); err != nil { + logger.Errorf("add column %s to table %s error, msg:%s", field, table, err) + panic(err) + } +} + +func (r *Reporter) createDatabase() { + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + createDBSql := r.generateCreateDBSql() + logger.Warningf("create database sql: %s", createDBSql) + + if _, err := conn.Exec(ctx, createDBSql, util.GetQidOwn()); err != nil { + logger.Errorf("create database %s error, msg:%v", r.dbname, err) + panic(err) + } +} + +func (r *Reporter) generateCreateDBSql() string { + var buf bytes.Buffer + buf.WriteString("create database if not exists ") + buf.WriteString(r.dbname) + + for k, v := range r.databaseOptions { + buf.WriteString(" ") + buf.WriteString(k) + switch v := v.(type) { + case string: + buf.WriteString(fmt.Sprintf(" '%s'", v)) + default: + buf.WriteString(fmt.Sprintf(" %v", v)) + } + buf.WriteString(" ") + } + return buf.String() +} + +func (r *Reporter) creatTables() { + ctx := context.Background() + conn, err := db.NewConnectorWithDb(r.username, r.password, r.host, r.port, r.dbname, r.usessl) + if err != nil { + logger.Errorf("connect to database error, msg:%s", err) + return + } + defer r.closeConn(conn) + + for _, createSql := range createList { + logger.Infof("execute sql:%s", createSql) + if _, err = conn.Exec(ctx, createSql, util.GetQidOwn()); err != nil { + logger.Errorf("execute sql:%s, error:%s", createSql, err) + } + } +} + +func (r *Reporter) closeConn(conn *db.Connector) { + if err := conn.Close(); err != nil { + logger.Errorf("close connection error, msg:%s", err) + } +} + +func (r *Reporter) handlerFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + logger := logger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + r.recordTotalRep() + // data parse + data, err := c.GetRawData() + if err != nil { + logger.Errorf("receiving taosd data error, msg:%s", err) + return + } + var report Report + + logger.Tracef("report data:%s", string(data)) + if e := json.Unmarshal(data, &report); e != nil { + logger.Errorf("error occurred while unmarshal request, data:%s, error:%s", data, err) + return + } + var sqls []string + if report.ClusterInfo != nil { + sqls = append(sqls, insertClusterInfoSql(*report.ClusterInfo, report.ClusterID, report.Protocol, report.Ts)...) + } + sqls = append(sqls, insertDnodeSql(report.DnodeInfo, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)) + if report.GrantInfo != nil { + sqls = append(sqls, insertGrantSql(*report.GrantInfo, report.DnodeID, report.ClusterID, report.Ts)) + } + sqls = append(sqls, insertDataDirSql(report.DiskInfos, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)...) + for _, group := range report.VgroupInfos { + sqls = append(sqls, insertVgroupSql(group, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)...) + } + sqls = append(sqls, insertLogSummary(report.LogInfos, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)) + + conn, err := db.NewConnectorWithDb(r.username, r.password, r.host, r.port, r.dbname, r.usessl) + if err != nil { + logger.Errorf("connect to database error, msg:%s", err) + return + } + defer r.closeConn(conn) + ctx := context.Background() + + for _, sql := range sqls { + logger.Tracef("execute sql:%s", sql) + if _, err := conn.Exec(ctx, sql, util.GetQidOwn()); err != nil { + logger.Errorf("execute sql error, sql:%s, error:%s", sql, err) + } + } + } +} + +func (r *Reporter) recordTotalRep() { + old := r.totalRep.Load().(int) + for i := 0; i < 3; i++ { + r.totalRep.CompareAndSwap(old, old+1) + } +} + +func (r *Reporter) GetTotalRep() *atomic.Value { + return &r.totalRep +} + +func insertClusterInfoSql(info ClusterInfo, ClusterID string, protocol int, ts string) []string { + var sqls []string + var dtotal, dalive, mtotal, malive int + for _, dnode := range info.Dnodes { + sqls = append(sqls, fmt.Sprintf("insert into d_info_%s using d_info tags (%d, '%s', '%s') values ('%s', '%s')", + ClusterID+strconv.Itoa(dnode.DnodeID), dnode.DnodeID, dnode.DnodeEp, ClusterID, ts, dnode.Status)) + dtotal++ + if "ready" == dnode.Status { + dalive++ + } + } + + for _, mnode := range info.Mnodes { + sqls = append(sqls, fmt.Sprintf("insert into m_info_%s using m_info tags (%d, '%s', '%s') values ('%s', '%s')", + ClusterID+strconv.Itoa(mnode.MnodeID), mnode.MnodeID, mnode.MnodeEp, ClusterID, ts, mnode.Role)) + mtotal++ + //LEADER FOLLOWER CANDIDATE ERROR + if "ERROR" != mnode.Role { + malive++ + } + } + + sqls = append(sqls, fmt.Sprintf( + "insert into cluster_info_%s using cluster_info tags('%s') (ts, first_ep, first_ep_dnode_id, version, "+ + "master_uptime, monitor_interval, dbs_total, tbs_total, stbs_total, dnodes_total, dnodes_alive, "+ + "mnodes_total, mnodes_alive, vgroups_total, vgroups_alive, vnodes_total, vnodes_alive, connections_total, "+ + "topics_total, streams_total, protocol) values ('%s', '%s', %d, '%s', %f, %d, %d, %d, %d, %d, %d, %d, %d, "+ + "%d, %d, %d, %d, %d, %d, %d, %d)", + ClusterID, ClusterID, ts, info.FirstEp, info.FirstEpDnodeID, info.Version, info.MasterUptime, info.MonitorInterval, + info.DbsTotal, info.TbsTotal, info.StbsTotal, dtotal, dalive, mtotal, malive, info.VgroupsTotal, info.VgroupsAlive, + info.VnodesTotal, info.VnodesAlive, info.ConnectionsTotal, info.TopicsTotal, info.StreamsTotal, protocol)) + return sqls +} + +func insertDnodeSql(info DnodeInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) string { + return fmt.Sprintf("insert into dnode_info_%s using dnodes_info tags (%d, '%s', '%s') values ('%s', %f, %f, %f, %f, %d, %d, %d, %d, %d, %d, %f, %f, %f, %f, %f, %f, %d, %f, %d, %d, %f, %d, %d, %f, %d, %d, %d, %d, %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, + ts, info.Uptime, info.CPUEngine, info.CPUSystem, info.CPUCores, info.MemEngine, info.MemSystem, info.MemTotal, + info.DiskEngine, info.DiskUsed, info.DiskTotal, info.NetIn, info.NetOut, info.IoRead, info.IoWrite, + info.IoReadDisk, info.IoWriteDisk, info.ReqSelect, info.ReqSelectRate, info.ReqInsert, info.ReqInsertSuccess, + info.ReqInsertRate, info.ReqInsertBatch, info.ReqInsertBatchSuccess, info.ReqInsertBatchRate, info.Errors, + info.VnodesNum, info.Masters, info.HasMnode, info.HasQnode, info.HasSnode, info.HasBnode) +} + +func insertDataDirSql(disk DiskInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) []string { + var sqls []string + for _, data := range disk.Datadir { + sqls = append(sqls, + fmt.Sprintf("insert into data_dir_%s using data_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, + ts, data.Name, data.Level, data.Avail.IntPart(), data.Used.IntPart(), data.Total.IntPart()), + ) + } + sqls = append(sqls, + fmt.Sprintf("insert into log_dir_%s using log_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, + ts, disk.Logdir.Name, disk.Logdir.Avail.IntPart(), disk.Logdir.Used.IntPart(), disk.Logdir.Total.IntPart()), + fmt.Sprintf("insert into temp_dir_%s using temp_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, + ts, disk.Tempdir.Name, disk.Tempdir.Avail.IntPart(), disk.Tempdir.Used.IntPart(), disk.Tempdir.Total.IntPart()), + ) + return sqls +} + +func insertVgroupSql(g VgroupInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) []string { + var sqls []string + sqls = append(sqls, fmt.Sprintf("insert into vgroups_info_%s using vgroups_info tags (%d, '%s', '%s') "+ + "(ts, vgroup_id, database_name, tables_num, status, ) values ( '%s','%d', '%s', %d, '%s')", + ClusterID+strconv.Itoa(DnodeID)+strconv.Itoa(g.VgroupID), DnodeID, DnodeEp, ClusterID, + ts, g.VgroupID, g.DatabaseName, g.TablesNum, g.Status)) + for _, v := range g.Vnodes { + sqls = append(sqls, fmt.Sprintf("insert into vnodes_role_%s using vnodes_role tags (%d, '%s', '%s') values ('%s', '%s')", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, ts, v.VnodeRole)) + } + return sqls +} + +func insertLogSummary(log LogInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) string { + var e, info, debug, trace int + for _, s := range log.Summary { + switch s.Level { + case "error": + e = s.Total + case "info": + info = s.Total + case "debug": + debug = s.Total + case "trace": + trace = s.Total + } + } + return fmt.Sprintf("insert into log_summary_%s using log_summary tags (%d, '%s', '%s') values ('%s', %d, %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, ts, e, info, debug, trace) +} + +func insertGrantSql(g GrantInfo, DnodeID int, ClusterID string, ts string) string { + return fmt.Sprintf("insert into grants_info_%s using grants_info tags ('%s') (ts, expire_time, "+ + "timeseries_used, timeseries_total) values ('%s', %d, %d, %d)", ClusterID+strconv.Itoa(DnodeID), ClusterID, ts, g.ExpireTime, g.TimeseriesUsed, g.TimeseriesTotal) +} diff --git a/tools/keeper/api/tables.go b/tools/keeper/api/tables.go new file mode 100644 index 00000000000..90f0e097212 --- /dev/null +++ b/tools/keeper/api/tables.go @@ -0,0 +1,286 @@ +package api + +import ( + "strconv" + + "github.com/shopspring/decimal" +) + +type Report struct { + Ts string `json:"ts"` + DnodeID int `json:"dnode_id"` + DnodeEp string `json:"dnode_ep"` + ClusterID string `json:"cluster_id"` + Protocol int `json:"protocol"` + ClusterInfo *ClusterInfo `json:"cluster_info"` // only reported by master + StbInfos []StbInfo `json:"stb_infos"` + VgroupInfos []VgroupInfo `json:"vgroup_infos"` // only reported by master + GrantInfo *GrantInfo `json:"grant_info"` // only reported by master + DnodeInfo DnodeInfo `json:"dnode_info"` + DiskInfos DiskInfo `json:"disk_infos"` + LogInfos LogInfo `json:"log_infos"` +} + +type ClusterInfo struct { + FirstEp string `json:"first_ep"` + FirstEpDnodeID int `json:"first_ep_dnode_id"` + Version string `json:"version"` + MasterUptime float32 `json:"master_uptime"` + MonitorInterval int `json:"monitor_interval"` + DbsTotal int `json:"dbs_total"` + TbsTotal int64 `json:"tbs_total"` // change to bigint since TS-3003 + StbsTotal int `json:"stbs_total"` + VgroupsTotal int `json:"vgroups_total"` + VgroupsAlive int `json:"vgroups_alive"` + VnodesTotal int `json:"vnodes_total"` + VnodesAlive int `json:"vnodes_alive"` + ConnectionsTotal int `json:"connections_total"` + TopicsTotal int `json:"topics_total"` + StreamsTotal int `json:"streams_total"` + Dnodes []Dnode `json:"dnodes"` + Mnodes []Mnode `json:"mnodes"` +} + +var dnodeEpLen = strconv.Itoa(255) + +var CreateClusterInfoSql = "create table if not exists cluster_info (" + + "ts timestamp, " + + "first_ep binary(134), " + + "first_ep_dnode_id int, " + + "version binary(12), " + + "master_uptime float, " + + "monitor_interval int, " + + "dbs_total int, " + + "tbs_total bigint, " + // change to bigint since TS-3003 + "stbs_total int, " + + "dnodes_total int, " + + "dnodes_alive int, " + + "mnodes_total int, " + + "mnodes_alive int, " + + "vgroups_total int, " + + "vgroups_alive int, " + + "vnodes_total int, " + + "vnodes_alive int, " + + "connections_total int, " + + "topics_total int, " + + "streams_total int, " + + "protocol int " + + ") tags (cluster_id nchar(32))" + +type Dnode struct { + DnodeID int `json:"dnode_id"` + DnodeEp string `json:"dnode_ep"` + Status string `json:"status"` +} + +var CreateDnodeSql = "create table if not exists d_info (" + + "ts timestamp, " + + "status binary(10)" + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type Mnode struct { + MnodeID int `json:"mnode_id"` + MnodeEp string `json:"mnode_ep"` + Role string `json:"role"` +} + +var CreateMnodeSql = "create table if not exists m_info (" + + "ts timestamp, " + + "role binary(10)" + + ") tags (mnode_id int, mnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type DnodeInfo struct { + Uptime float32 `json:"uptime"` + CPUEngine float32 `json:"cpu_engine"` + CPUSystem float32 `json:"cpu_system"` + CPUCores float32 `json:"cpu_cores"` + MemEngine int `json:"mem_engine"` + MemSystem int `json:"mem_system"` + MemTotal int `json:"mem_total"` + DiskEngine int64 `json:"disk_engine"` + DiskUsed int64 `json:"disk_used"` + DiskTotal int64 `json:"disk_total"` + NetIn float32 `json:"net_in"` + NetOut float32 `json:"net_out"` + IoRead float32 `json:"io_read"` + IoWrite float32 `json:"io_write"` + IoReadDisk float32 `json:"io_read_disk"` + IoWriteDisk float32 `json:"io_write_disk"` + ReqSelect int `json:"req_select"` + ReqSelectRate float32 `json:"req_select_rate"` + ReqInsert int `json:"req_insert"` + ReqInsertSuccess int `json:"req_insert_success"` + ReqInsertRate float32 `json:"req_insert_rate"` + ReqInsertBatch int `json:"req_insert_batch"` + ReqInsertBatchSuccess int `json:"req_insert_batch_success"` + ReqInsertBatchRate float32 `json:"req_insert_batch_rate"` + Errors int `json:"errors"` + VnodesNum int `json:"vnodes_num"` + Masters int `json:"masters"` + HasMnode int8 `json:"has_mnode"` + HasQnode int8 `json:"has_qnode"` + HasSnode int8 `json:"has_snode"` + HasBnode int8 `json:"has_bnode"` +} + +var CreateDnodeInfoSql = "create table if not exists dnodes_info (" + + "ts timestamp, " + + "uptime float, " + + "cpu_engine float, " + + "cpu_system float, " + + "cpu_cores float, " + + "mem_engine int, " + + "mem_system int, " + + "mem_total int, " + + "disk_engine bigint, " + + "disk_used bigint, " + + "disk_total bigint, " + + "net_in float, " + + "net_out float, " + + "io_read float, " + + "io_write float, " + + "io_read_disk float, " + + "io_write_disk float, " + + "req_select int, " + + "req_select_rate float, " + + "req_insert int, " + + "req_insert_success int, " + + "req_insert_rate float, " + + "req_insert_batch int, " + + "req_insert_batch_success int, " + + "req_insert_batch_rate float, " + + "errors int, " + + "vnodes_num int, " + + "masters int, " + + "has_mnode int, " + + "has_qnode int, " + + "has_snode int, " + + "has_bnode int " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type DiskInfo struct { + Datadir []DataDir `json:"datadir"` + Logdir LogDir `json:"logdir"` + Tempdir TempDir `json:"tempdir"` +} + +type DataDir struct { + Name string `json:"name"` + Level int `json:"level"` + Avail decimal.Decimal `json:"avail"` + Used decimal.Decimal `json:"used"` + Total decimal.Decimal `json:"total"` +} + +var CreateDataDirSql = "create table if not exists data_dir (" + + "ts timestamp, " + + "name nchar(200), " + + "`level` int, " + + "avail bigint, " + + "used bigint, " + + "total bigint" + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type LogDir struct { + Name string `json:"name"` + Avail decimal.Decimal `json:"avail"` + Used decimal.Decimal `json:"used"` + Total decimal.Decimal `json:"total"` +} + +var CreateLogDirSql = "create table if not exists log_dir (" + + "ts timestamp, " + + "name nchar(200), " + + "avail bigint, " + + "used bigint, " + + "total bigint" + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type TempDir struct { + Name string `json:"name"` + Avail decimal.Decimal `json:"avail"` + Used decimal.Decimal `json:"used"` + Total decimal.Decimal `json:"total"` +} + +var CreateTempDirSql = "create table if not exists temp_dir(" + + "ts timestamp, " + + "name nchar(200), " + + "avail bigint, " + + "used bigint, " + + "total bigint " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type StbInfo struct { + StbName string `json:"stb_name"` + DataBaseName string `json:"database_name"` +} + +type VgroupInfo struct { + VgroupID int `json:"vgroup_id"` + DatabaseName string `json:"database_name"` + TablesNum int64 `json:"tables_num"` + Status string `json:"status"` + Vnodes []Vnode `json:"vnodes"` +} + +var CreateVgroupsInfoSql = "create table if not exists vgroups_info (" + + "ts timestamp, " + + "vgroup_id int, " + + "database_name binary(33), " + + "tables_num bigint, " + // change to bigint since TS-3003 + "status binary(512) " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type Vnode struct { + DnodeID int `json:"dnode_id"` + VnodeRole string `json:"vnode_role"` +} + +var CreateVnodeRoleSql = "create table if not exists vnodes_role (" + + "ts timestamp, " + + "vnode_role binary(10) " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type LogInfo struct { + Summary []Summary `json:"summary"` +} + +type Log struct { + Ts string `json:"ts"` + Level string `json:"level"` + Content string `json:"content"` +} + +type Summary struct { + Level string `json:"level"` + Total int `json:"total"` +} + +var CreateSummarySql = "create table if not exists log_summary(" + + "ts timestamp, " + + "error int, " + + "info int, " + + "debug int, " + + "trace int " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type GrantInfo struct { + ExpireTime int64 `json:"expire_time"` + TimeseriesUsed int64 `json:"timeseries_used"` + TimeseriesTotal int64 `json:"timeseries_total"` +} + +var CreateGrantInfoSql = "create table if not exists grants_info(" + + "ts timestamp, " + + "expire_time bigint, " + + "timeseries_used bigint, " + + "timeseries_total bigint " + + ") tags (cluster_id nchar(32))" + +var CreateKeeperSql = "create table if not exists keeper_monitor (" + + "ts timestamp, " + + "cpu float, " + + "mem float, " + + "total_reports int " + + ") tags (identify nchar(50))" diff --git a/tools/keeper/api/zabbix.go b/tools/keeper/api/zabbix.go new file mode 100644 index 00000000000..8b7cb759926 --- /dev/null +++ b/tools/keeper/api/zabbix.go @@ -0,0 +1,113 @@ +package api + +import ( + "net/http" + "sort" + "strings" + + "github.com/gin-gonic/gin" + "github.com/taosdata/taoskeeper/process" + "github.com/taosdata/taoskeeper/util/pool" +) + +type Zabbix struct { + processor *process.Processor + floatGroup []*process.Metric + strGroup []*process.Metric +} + +func NewZabbix(processor *process.Processor) *Zabbix { + z := &Zabbix{processor: processor} + z.processorMetrics() + return z +} + +type zabbixMetric struct { + Data []*ZMetric `json:"data"` +} + +type ZMetric struct { + Metric string `json:"{#METRIC}"` + Key string `json:"key"` + Value interface{} `json:"value"` +} + +const ( + FloatType = iota + 1 + StringType +) + +func (z *Zabbix) Init(c gin.IRouter) { + api := c.Group("zabbix") + api.GET("float", z.getFloat) + api.GET("string", z.getString) +} + +func (z *Zabbix) getFloat(c *gin.Context) { + z.returnData(c, FloatType) +} + +func (z *Zabbix) getString(c *gin.Context) { + z.returnData(c, StringType) +} + +func (z *Zabbix) returnData(c *gin.Context, valueType int) { + var metrics []*process.Metric + switch valueType { + case FloatType: + metrics = z.floatGroup + case StringType: + metrics = z.strGroup + } + var d zabbixMetric + b := pool.BytesPoolGet() + defer pool.BytesPoolPut(b) + for _, metric := range metrics { + values := metric.GetValue() + for _, value := range values { + label := z.sortLabel(value.Label) + b.Reset() + b.WriteString(metric.FQName) + if len(label) > 0 { + b.WriteByte(',') + b.WriteString(label) + } + metricName := b.String() + d.Data = append(d.Data, &ZMetric{ + Metric: metricName, + Key: metricName, + Value: value.Value, + }) + } + } + c.JSON(http.StatusOK, d) +} + +func (z *Zabbix) sortLabel(labels map[string]string) string { + if len(labels) == 0 { + return "" + } + result := make([]string, 0, len(labels)) + b := pool.BytesPoolGet() + defer pool.BytesPoolPut(b) + for k, v := range labels { + b.Reset() + b.WriteString(k) + b.WriteByte('=') + b.WriteString(v) + result = append(result, b.String()) + } + sort.Strings(result) + return strings.Join(result, "_") +} + +func (z *Zabbix) processorMetrics() { + metrics := z.processor.GetMetric() + for _, metric := range metrics { + if metric.Type == process.Gauge || metric.Type == process.Counter { + z.floatGroup = append(z.floatGroup, metric) + } else if metric.Type == process.Info { + z.strGroup = append(z.strGroup, metric) + } + } +} diff --git a/tools/keeper/cmd/command.go b/tools/keeper/cmd/command.go new file mode 100644 index 00000000000..82d3efea1fd --- /dev/null +++ b/tools/keeper/cmd/command.go @@ -0,0 +1,461 @@ +package cmd + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strconv" + "sync" + "time" + + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" + "github.com/taosdata/taoskeeper/util/pool" +) + +var logger = log.GetLogger("CMD") + +var MAX_SQL_LEN = 1000000 + +type Command struct { + fromTime time.Time + client *http.Client + conn *db.Connector + username string + password string + url *url.URL +} + +func NewCommand(conf *config.Config) *Command { + client := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableCompression: true, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + + conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl) + if err != nil { + logger.Errorf("init db connect error, msg:%s", err) + panic(err) + } + + imp := &Command{ + client: client, + conn: conn, + username: conf.TDengine.Username, + password: conf.TDengine.Password, + url: &url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", conf.TDengine.Host, conf.TDengine.Port), + Path: "/influxdb/v1/write", + RawQuery: fmt.Sprintf("db=%s&precision=ms", conf.Metrics.Database.Name), + }, + } + return imp +} + +func (cmd *Command) Process(conf *config.Config) { + if len(conf.Transfer) > 0 && len(conf.Drop) > 0 { + logger.Errorf("transfer and drop can't be set at the same time") + return + } + + if len(conf.Transfer) > 0 && conf.Transfer != "old_taosd_metric" { + logger.Errorf("transfer only support old_taosd_metric") + return + } + + if conf.Transfer == "old_taosd_metric" { + cmd.ProcessTransfer(conf) + return + } + + if len(conf.Drop) > 0 && conf.Drop != "old_taosd_metric_stables" { + logger.Errorf("drop only support old_taosd_metric_stables") + return + } + + if conf.Drop == "old_taosd_metric_stables" { + cmd.ProcessDrop(conf) + return + } +} + +func (cmd *Command) ProcessTransfer(conf *config.Config) { + fromTime, err := time.Parse("2006-01-02T15:04:05Z07:00", conf.FromTime) + if err != nil { + logger.Errorf("parse fromTime error, msg:%s", err) + return + } + cmd.fromTime = fromTime + + funcs := []func() error{ + cmd.TransferTaosdClusterBasicInfo, + cmd.TransferTaosdClusterInfo, + cmd.TransferTaosdVgroupsInfo, + cmd.TransferTaosdDnodesInfo, + cmd.TransferTaosdDnodesStatus, + cmd.TransferTaosdDnodesLogDirs1, + cmd.TransferTaosdDnodesLogDirs2, + cmd.TransferTaosdDnodesDataDirs, + cmd.TransferTaosdMnodesInfo, + cmd.TransferTaosdVnodesInfo, + } + wg := sync.WaitGroup{} + wg.Add(len(funcs)) + + for i := range funcs { + index := i + err := pool.GoroutinePool.Submit(func() { + defer wg.Done() + funcs[index]() + }) + + if err != nil { + panic(err) + } + } + + wg.Wait() + logger.Info("transfer all old taosd metric success!!") +} + +func (cmd *Command) TransferTaosdClusterInfo() error { + sql := "select a.cluster_id, master_uptime * 3600 * 24 as cluster_uptime, dbs_total, tbs_total, stbs_total, dnodes_total, dnodes_alive, mnodes_total, mnodes_alive, vgroups_total, vgroups_alive, vnodes_total, vnodes_alive, connections_total, topics_total, streams_total, b.expire_time as grants_expire_time, b.timeseries_used as grants_timeseries_used, b.timeseries_total as grants_timeseries_total, a.ts from cluster_info a, grants_info b where a.ts = b.ts and a.cluster_id = b.cluster_id and" + dstTable := "taosd_cluster_info" + return cmd.TransferTableToDst(sql, dstTable, 1) +} + +func (cmd *Command) TransferTaosdVgroupsInfo() error { + sql := "select cluster_id, vgroup_id, database_name, tables_num, CASE status WHEN 'ready' THEN 1 ELSE 0 END as status, ts from vgroups_info a where " + dstTable := "taosd_vgroups_info" + return cmd.TransferTableToDst(sql, dstTable, 3) +} + +func (cmd *Command) TransferTaosdDnodesInfo() error { + sql := "select a.cluster_id, a.dnode_id, a.dnode_ep, uptime * 3600 * 24 as uptime, cpu_engine, cpu_system, cpu_cores, mem_engine, mem_system as mem_free, mem_total, disk_used, disk_total, disk_engine, net_in as system_net_in, net_out as system_net_out, io_read, io_write, io_read_disk, io_write_disk, vnodes_num, masters, has_mnode, has_qnode, has_snode, has_bnode, errors, b.error as error_log_count, b.info as info_log_count, b.debug as debug_log_count, b.trace as trace_log_count, a.ts as ts from dnodes_info a, log_summary b where a.ts = b.ts and a.dnode_id = b.dnode_id and a. dnode_ep = b.dnode_ep and " + dstTable := "taosd_dnodes_info" + return cmd.TransferTableToDst(sql, dstTable, 3) +} +func (cmd *Command) TransferTaosdDnodesStatus() error { + sql := "select cluster_id, dnode_id, dnode_ep, CASE status WHEN 'ready' THEN 1 ELSE 0 END as status, ts from d_info a where " + dstTable := "taosd_dnodes_status" + return cmd.TransferTableToDst(sql, dstTable, 3) +} + +func (cmd *Command) TransferTaosdDnodesLogDirs1() error { + sql := "select cluster_id, dnode_id, dnode_ep, name as log_dir_name, avail, used, total, ts from log_dir a where " + dstTable := "taosd_dnodes_log_dirs" + return cmd.TransferTableToDst(sql, dstTable, 4) +} +func (cmd *Command) TransferTaosdDnodesLogDirs2() error { + sql := "select cluster_id, dnode_id, dnode_ep, name as log_dir_name, avail, used, total, ts from temp_dir a where " + dstTable := "taosd_dnodes_log_dirs" + return cmd.TransferTableToDst(sql, dstTable, 4) +} + +func (cmd *Command) TransferTaosdDnodesDataDirs() error { + sql := "select cluster_id, dnode_id, dnode_ep, name as data_dir_name, `level` as data_dir_level, avail, used, total, ts from data_dir a where " + dstTable := "taosd_dnodes_data_dirs" + return cmd.TransferTableToDst(sql, dstTable, 5) +} + +func (cmd *Command) TransferTaosdMnodesInfo() error { + sql := "select cluster_id, mnode_id, mnode_ep, CASE role WHEN 'offline' THEN 0 WHEN 'follower' THEN 100 WHEN 'candidate' THEN 101 WHEN 'leader' THEN 102 WHEN 'learner' THEN 104 ELSE 103 END as role, ts from m_info a where " + dstTable := "taosd_mnodes_info" + return cmd.TransferTableToDst(sql, dstTable, 3) +} + +func (cmd *Command) TransferTaosdVnodesInfo() error { + sql := "select cluster_id, 0 as vgroup_id, 'UNKNOWN' as database_name, dnode_id, CASE vnode_role WHEN 'offline' THEN 0 WHEN 'follower' THEN 100 WHEN 'candidate' THEN 101 WHEN 'leader' THEN 102 WHEN 'learner' THEN 104 ELSE 103 END as role, ts from vnodes_role a where " + dstTable := "taosd_vnodes_info" + return cmd.TransferTableToDst(sql, dstTable, 4) +} + +func (cmd *Command) ProcessDrop(conf *config.Config) { + var dropStableList = []string{ + "log_dir", + "dnodes_info", + "data_dir", + "log_summary", + "m_info", + "vnodes_role", + "cluster_info", + "temp_dir", + "grants_info", + "vgroups_info", + "d_info", + "taosadapter_system_cpu_percent", + "taosadapter_restful_http_request_in_flight", + "taosadapter_restful_http_request_summary_milliseconds", + "taosadapter_restful_http_request_fail", + "taosadapter_system_mem_percent", + "taosadapter_restful_http_request_total", + } + ctx := context.Background() + logger.Infof("use database:%s", conf.Metrics.Database.Name) + + for _, stable := range dropStableList { + if _, err := cmd.conn.Exec(ctx, "DROP STABLE IF EXISTS "+stable, util.GetQidOwn()); err != nil { + logger.Errorf("drop stable %s, error:%s", stable, err) + panic(err) + } + } + logger.Info("drop old taosd metric stables success!!") +} + +func (cmd *Command) TransferDataToDest(data *db.Data, dstTable string, tagNum int) { + + var buf bytes.Buffer + + if len(data.Data) < 1 { + return + } + + for _, row := range data.Data { + // get one row here + buf.WriteString(dstTable) + + // write tags + var tag string + for j := 0; j < tagNum; j++ { + switch v := row[j].(type) { + case int: + tag = fmt.Sprint(v) + case int32: + tag = fmt.Sprint(v) + case int64: + tag = fmt.Sprint(v) + case string: + tag = v + default: + panic(fmt.Sprintf("Unexpected type for row[%d]: %T", j, row[j])) + } + + if tag != "" { + buf.WriteString(fmt.Sprintf(",%s=%s", data.Head[j], util.EscapeInfluxProtocol(tag))) + } else { + buf.WriteString(fmt.Sprintf(",%s=%s", data.Head[j], "unknown")) + logger.Errorf("tag value is empty, tag_name:%s", data.Head[j]) + } + } + buf.WriteString(" ") + + // write metrics + for j := tagNum; j < len(row)-1; j++ { + + switch v := row[j].(type) { + case int: + buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v))) + case int32: + buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v))) + case int64: + buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v))) + case float32: + buf.WriteString(fmt.Sprintf("%s=%sf64", data.Head[j], strconv.FormatFloat(float64(v), 'f', -1, 64))) + case float64: + buf.WriteString(fmt.Sprintf("%s=%sf64", data.Head[j], strconv.FormatFloat(v, 'f', -1, 64))) + default: + panic(fmt.Sprintf("Unexpected type for row[%d]: %T", j, row[j])) + } + + if j != len(row)-2 { + buf.WriteString(",") + } + } + + // write timestamp + buf.WriteString(" ") + buf.WriteString(fmt.Sprintf("%v", row[len(row)-1].(time.Time).UnixMilli())) + buf.WriteString("\n") + + if buf.Len() >= MAX_SQL_LEN { + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + logger.Tracef("buf:%v", buf.String()) + } + err := cmd.lineWriteBody(&buf) + if err != nil { + logger.Errorf("insert data error, msg:%s", err) + panic(err) + } + buf.Reset() + } + } + + if buf.Len() > 0 { + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + logger.Tracef("buf:%v", buf.String()) + } + err := cmd.lineWriteBody(&buf) + if err != nil { + logger.Errorf("insert data error, msg:%s", err) + panic(err) + } + } +} + +// cluster_info +func (cmd *Command) TransferTaosdClusterBasicInfo() error { + + ctx := context.Background() + + endTime := time.Now() + delta := time.Hour * 24 * 10 + + var createTableSql = "create stable if not exists taosd_cluster_basic " + + "(ts timestamp, first_ep varchar(100), first_ep_dnode_id INT, cluster_version varchar(20)) " + + "tags (cluster_id varchar(50))" + + if _, err := cmd.conn.Exec(ctx, createTableSql, util.GetQidOwn()); err != nil { + logger.Errorf("create taosd_cluster_basic error, msg:%s", err) + return err + } + + logger.Tracef("fromeTime:%d", cmd.fromTime.UnixMilli()) + + for current := cmd.fromTime; current.Before(endTime); current = current.Add(time.Duration(delta)) { + querySql := fmt.Sprintf("select cluster_id, first_ep, first_ep_dnode_id, `version` as cluster_version, ts from cluster_info where ts > %d and ts <= %d", + current.UnixMilli(), current.Add(time.Duration(delta)).UnixMilli()) + logger.Tracef("query sql:%s", querySql) + data, err := cmd.conn.Query(ctx, querySql, util.GetQidOwn()) + if err != nil { + logger.Errorf("query cluster_info error, msg:%s", err) + return err + } + + // transfer data to new table, only this table need use insert statement + var buf bytes.Buffer + + // 使用 map 将二维数组切分为多个二维数组 + result := make(map[string][][]interface{}) + for _, row := range data.Data { + key := row[0].(string) // 使用第一列的值作为 key + result[key] = append(result[key], row) + } + + // 按照不同 tag 来迁移数据 + for _, dataByCluster := range result { + buf.Reset() + + for _, row := range dataByCluster { + if len(buf.Bytes()) == 0 { + sql := fmt.Sprintf( + "insert into taosd_cluster_basic_%s using taosd_cluster_basic tags ('%s') values ", + row[0].(string), row[0].(string)) + + buf.WriteString(sql) + } + + sql := fmt.Sprintf( + "(%d, '%s', %d, '%s')", + row[4].(time.Time).UnixMilli(), row[1].(string), row[2].(int32), row[3].(string)) + buf.WriteString(sql) + + if buf.Len() >= MAX_SQL_LEN { + rowsAffected, err := cmd.conn.Exec(context.Background(), buf.String(), util.GetQidOwn()) + if err != nil { + logger.Errorf("insert taosd_cluster_basic error, msg:%s", err) + return err + } + if rowsAffected <= 0 { + logger.Errorf("insert taosd_cluster_basic failed, rowsAffected:%d", rowsAffected) + } + buf.Reset() + } + } + + if buf.Len() > 0 { + rowsAffected, err := cmd.conn.Exec(context.Background(), buf.String(), util.GetQidOwn()) + if err != nil { + logger.Errorf("insert taosd_cluster_basic error, msg:%s", err) + return err + } + if rowsAffected <= 0 { + logger.Errorf("insert taosd_cluster_basic failed, rowsAffected:%d", rowsAffected) + } + } + } + } + + logger.Info("transfer stable taosd_cluster_basic success!!") + return nil +} + +// cluster_info +func (cmd *Command) TransferTableToDst(sql string, dstTable string, tagNum int) error { + + ctx := context.Background() + + endTime := time.Now() + delta := time.Hour * 24 * 10 + + logger.Tracef("fromTime:%d", cmd.fromTime.UnixMilli()) + + for current := cmd.fromTime; current.Before(endTime); current = current.Add(time.Duration(delta)) { + querySql := fmt.Sprintf(sql+" a.ts > %d and a.ts <= %d", + current.UnixMilli(), current.Add(time.Duration(delta)).UnixMilli()) + logger.Tracef("query sql:%s", querySql) + data, err := cmd.conn.Query(ctx, querySql, util.GetQidOwn()) + if err != nil { + logger.Errorf("query cluster_info error, msg:%s", err) + return err + } + + // transfer data to new table, only this table need use insert statement + cmd.TransferDataToDest(data, dstTable, tagNum) + } + + logger.Info("transfer stable " + dstTable + " success!!") + return nil +} + +func (cmd *Command) lineWriteBody(buf *bytes.Buffer) error { + header := map[string][]string{ + "Connection": {"keep-alive"}, + } + + req := &http.Request{ + Method: http.MethodPost, + URL: cmd.url, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: header, + Host: cmd.url.Host, + } + req.SetBasicAuth(cmd.username, cmd.password) + + req.Body = io.NopCloser(buf) + resp, err := cmd.client.Do(req) + + if err != nil { + logger.Errorf("writing metrics exception, msg:%s", err) + return err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unexpected status code %d:body:%s", resp.StatusCode, string(body)) + } + return nil +} diff --git a/tools/keeper/cmd/empty_test.go b/tools/keeper/cmd/empty_test.go new file mode 100644 index 00000000000..143df6893c2 --- /dev/null +++ b/tools/keeper/cmd/empty_test.go @@ -0,0 +1,8 @@ +package cmd + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/config/metrics.sample b/tools/keeper/config/metrics.sample new file mode 100644 index 00000000000..9dbfea2323d --- /dev/null +++ b/tools/keeper/config/metrics.sample @@ -0,0 +1,38 @@ +prefix = "taos" +cluster = "localhost" +database = "log" +explicit = false + +[tables.restful_info] +[tables.slowquery] +[tables.cluster_info] +[tables.grants_info] +[tables.disks_info] + +[tables.logs] +explicit = true +[tables.logs.metrics.content] +type = "info" +help = "login types or messages" +[tables.logs.metrics.level] +type = "gauge" +help = "login level" + +[tables.dnodes_info] +[tables.dnodes_info.metrics.has_mnode] +type = "gauge" +help = "check if the node has mnode" + +[tables.vgroups_info] +column_as_variables = ["database_name", "dnode_roles", "dnode_ids"] +explicit = false + +[tables.vgroups_info.metrics.tables_num] +type = "gauge" +help = "Tables count of the vgroup" +[tables.vgroups_info.metrics.online_vnodes] +type = "gauge" +help = "Online v-nodes of the v-group" +[tables.vgroups_info.metrics.status] +type = "info" +help = "Status of the v-group" diff --git a/tools/keeper/config/taoskeeper.toml b/tools/keeper/config/taoskeeper.toml new file mode 100644 index 00000000000..89847db2d5e --- /dev/null +++ b/tools/keeper/config/taoskeeper.toml @@ -0,0 +1,53 @@ +instanceId = 64 + +# Listen port, default is 6043 +port = 6043 + +# go pool size +gopoolsize = 50000 + +# interval for metrics +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" +usessl = false + +[metrics] +# metrics prefix in metrics names. +prefix = "taos" + +# export some tables that are not super table +tables = [] + +# database for storing metrics data +[metrics.database] +name = "log" +# database options for db storing metrics data +[metrics.database.options] +vgroups = 1 +buffer = 64 +keep = 90 +cachemodel = "both" + +[environment] +# Whether running in cgroup. +incgroup = false + +[log] +# The directory where log files are stored. +# path = "/var/log/taos" +level = "info" +# Number of log file rotations before deletion. +rotationCount = 30 +# The number of days to retain log files. +keepDays = 30 +# The maximum size of a log file before rotation. +rotationSize = "1GB" +# If set to true, log files will be compressed. +compress = false +# Minimum disk space to reserve. Log files will not be written if disk space falls below this limit. +reservedDiskSize = "1GB" diff --git a/tools/keeper/config/taoskeeper_enterprise.toml b/tools/keeper/config/taoskeeper_enterprise.toml new file mode 100644 index 00000000000..6601b60cd89 --- /dev/null +++ b/tools/keeper/config/taoskeeper_enterprise.toml @@ -0,0 +1,65 @@ +instanceId = 64 + +# Listen port, default is 6043 +port = 6043 + +# go pool size +gopoolsize = 50000 + +# interval for TDengine metrics +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" +usessl = false + +[metrics] +# metrics prefix in metrics names. +prefix = "taos" + +# cluster identifier for multiple TDengine clusters +cluster = "" + +# export some tables that are not super table +tables = [] + +# database for storing metrics data +[metrics.database] +name = "log" +# database options for db storing metrics data +[metrics.database.options] +vgroups = 1 +buffer = 64 +keep = 90 +cachemodel = "both" + +[environment] +# Whether running in cgroup. +incgroup = false + +[audit] +enable = true +[audit.database] +name = "audit" +[audit.database.options] +vgroups = 1 +buffer = 16 +cachemodel = "both" + +[log] +# The directory where log files are stored. +# path = "/var/log/taos" +level = "info" +# Number of log file rotations before deletion. +rotationCount = 30 +# The number of days to retain log files. +keepDays = 30 +# The maximum size of a log file before rotation. +rotationSize = "1GB" +# If set to true, log files will be compressed. +compress = false +# Minimum disk space to reserve. Log files will not be written if disk space falls below this limit. +reservedDiskSize = "1GB" diff --git a/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.json b/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.json new file mode 100644 index 00000000000..153778915f3 --- /dev/null +++ b/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.json @@ -0,0 +1,5365 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": null, + "graphTooltip": 0, + "id": 3, + "iteration": 1643173897059, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": "Prometheus", + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 32, + "options": { + "content": "

TDengine Cluster Dashboard (First EP: ${firstEp}, Version: ${version})

", + "mode": "markdown" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "mnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "-- OVERVIEW --", + "transformations": [ + { + "id": "calculateField", + "options": { + "binary": { + "left": "Time", + "operator": "+", + "reducer": "sum", + "right": "" + }, + "mode": "binary", + "reduce": { + "reducer": "sum" + } + } + } + ], + "type": "text" + }, + { + "collapsed": false, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 57, + "panels": [], + "title": "Cluster Status", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 0, + "y": 4 + }, + "id": 73, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "/.*/", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_first_ep{cluster=\"$cluster\"}", + "format": "table", + "formatType": "Time series", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "label_values(taos_cluster_info_first_ep{cluster=\"$cluster\"}, value)", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "First EP", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "value" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 3, + "y": 4 + }, + "id": 74, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/.*/", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_version{cluster=\"$cluster\"}", + "format": "table", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(version) from log.cluster_info", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Version", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "value" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "MNode 被选举后经过的时长", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 6, + "y": 4 + }, + "id": 72, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_master_uptime{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(master_uptime) from log.cluster_info", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Master Uptime", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "lastNotNull" + ] + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "企业版授权到期时间", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 9, + "y": 4 + }, + "id": 99, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_grants_info_expire_time{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(expire_time) from log.grants_info", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Expire Time", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "企业版授权已用测点数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "A" + }, + "properties": [ + { + "id": "noValue", + "value": "unlimited" + } + ] + } + ] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 12, + "y": 4 + }, + "id": 100, + "options": { + "displayMode": "gradient", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_disk_engine", + "formatType": "Time series", + "interval": "", + "legendFormat": "used", + "queryType": "SQL", + "refId": "A", + "sql": "select max(timeseries_used) as used ,max(timeseries_total) as total from log.grants_info where ts >= $from and ts <= $to interval(30s)", + "target": "select metric", + "type": "timeserie" + }, + { + "exemplar": true, + "expr": "taos_dnodes_info_disk_total", + "hide": false, + "interval": "", + "legendFormat": "total", + "refId": "B" + } + ], + "title": "Used Meassuring Points", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "description": "数据库个数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 2, + "x": 16, + "y": 4 + }, + "id": 65, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "count(taos_vgroups_info_status{cluster=\"$cluster\"})", + "format": "time_series", + "formatType": "Time series", + "instant": false, + "interval": "", + "legendFormat": "databases", + "queryType": "SQL", + "refId": "A", + "sql": "show databases;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Databases", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Time", + "databases" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "所有数据库的表数量之和", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 18, + "y": 4 + }, + "id": 68, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "sum(taos_tables_per_database{cluster=\"$cluster\"})", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "show databases;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Tables", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 21, + "y": 4 + }, + "id": 82, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/.*/", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select connections_total from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Connections", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "当前TDengine集群DNode数量,Alive 为存活,Total 为所有", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 8 + }, + "id": 75, + "options": { + "displayMode": "basic", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_dnodes_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select dnodes_total as total,dnodes_alive as alive from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "DNodes", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "description": "当前TDengine集群MNode数量,Alive 为存活,Total 为所有", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 6, + "y": 8 + }, + "id": 101, + "options": { + "displayMode": "basic", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_mnodes_total{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select mnodes_total as total,mnodes_alive as alive from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "MNodes", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "description": "当前TDengine集群 VGroups 数量,Alive 为存活,Total 为所有", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 12, + "y": 8 + }, + "id": 102, + "options": { + "displayMode": "basic", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "count(count(taos_vgroups_info_status{cluster=\"$cluster\"}) by (vgroup_id))", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select vgroups_total as total, vgroups_alive as alive from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "VGroups", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "description": "当前TDengine集群 VNodes 数量,Alive 为存活,Total 为所有", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 18, + "y": 8 + }, + "id": 103, + "options": { + "displayMode": "basic", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "sum(taos_vgroups_info_online_vnodes{cluster=\"$cluster\"})", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select vnodes_total as total, vnodes_alive as alive from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "VNodes", + "transformations": [], + "type": "bargauge" + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "DNodes Alive Percent alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 12 + }, + "hiddenSeries": false, + "id": 84, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_dnodes_alive / taos_cluster_info_dnodes_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(dnodes_alive)/avg(dnodes_total) from log.cluster_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DNodes Alive Percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:71", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:72", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "MNodes Alive Percent alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 12 + }, + "hiddenSeries": false, + "id": 87, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_mnodes_alive / taos_cluster_info_mnodes_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(mnodes_alive)/avg(mnodes_total) from log.cluster_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "MNodes Alive Percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:221", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:222", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "VGroups Alive Percent alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 12 + }, + "hiddenSeries": false, + "id": 85, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_vgroups_alive / taos_cluster_info_vgroups_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(vgroups_alive)/avg(vgroups_total) from log.cluster_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "VGroups Alive Percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:256", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:257", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "VNodes Alive Percent alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 12 + }, + "hiddenSeries": false, + "id": 86, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_vnodes_alive / taos_cluster_info_vnodes_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(vnodes_alive)/avg(vnodes_total) from log.cluster_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "VNodes Alive Percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:291", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:292", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 0.95 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + }, + { + "evaluator": { + "params": [ + 0, + 1 + ], + "type": "within_range" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "keep_state", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "DNodes Alive Percent alert", + "noDataState": "ok", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 18 + }, + "hiddenSeries": false, + "id": 104, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_grants_info_timeseries_used / taos_grants_info_timeseries_total {cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "percent", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(timeseries_used)/avg(timeseries_total) as percent from log.grants_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 0.95, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Messuring Points Used Percent Alert", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:333", + "decimals": null, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:334", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 86400 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "keep_state", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "Grants Expire Time alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 18 + }, + "hiddenSeries": false, + "id": 105, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:368", + "alias": "percent", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_grants_info_expire_time", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(expire_time) as expire_time from log.grants_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 86400, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Grants Expire Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:375", + "decimals": null, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:376", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 10 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "Error Rate alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "错误率(每秒错误数)", + "fieldConfig": { + "defaults": { + "unit": "cps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 18 + }, + "hiddenSeries": false, + "id": 106, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:410", + "alias": "percent", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "deriv(taos_dnodes_info_errors{cluster=\"$cluster\"}[1m])", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select derivative(errors, 1s, 1) as errors from (select sum(errors) as errors from log.dnodes_info where ts >= $from and ts <= $to interval(1s))", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 10, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Error Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:417", + "decimals": null, + "format": "cps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:418", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 24, + "panels": [], + "repeat": null, + "title": "DNodes Overview", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 0, + "y": 25 + }, + "id": 90, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "value_and_name" + }, + "pluginVersion": "8.2.2", + "repeat": null, + "targets": [ + { + "alias": "", + "colNameFormatStr": "{{groupValue}}", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "taos_dnodes_info_uptime{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "{{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select last(uptime) from log.dnodes_info where ts >= now -1m and ts <= now group by dnode_ep", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "DNodes Lifetime", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 10, + "x": 4, + "y": 25 + }, + "hiddenSeries": false, + "id": 88, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_dnodes_total{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "total", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(dnodes_total) as total, avg(dnodes_alive) as alive from log.cluster_info where ts >= $from and ts <= $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_dnodes_alive{cluster=\"$cluster\"}", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "alive", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(dnodes_total) as total, avg(dnodes_alive) as alive from log.cluster_info where ts >= $from and ts <= $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DNodes Number", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:128", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:129", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 10, + "x": 14, + "y": 25 + }, + "hiddenSeries": false, + "id": 89, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_mnodes_total{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "total", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(mnodes_total) as total, avg(mnodes_alive) as alive from log.cluster_info where ts >= $from and ts <= $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "exemplar": true, + "expr": "taos_cluster_info_mnodes_alive{cluster=\"$cluster\"}", + "hide": false, + "interval": "", + "legendFormat": "alive", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "MNodes Number", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:452", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:453", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": true, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 31 + }, + "id": 108, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "Include two parts:", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 32 + }, + "hiddenSeries": false, + "id": 110, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": 600, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:66", + "alias": "/success_rate/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "insert_count - {{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_insert) as insert_count, sum(req_insert_success) as insert_success, sum(req_insert_batch) as insert_batches, sum(req_insert_batch_success) as insert_batch_success from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_success{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "insert_success - {{dnode_ep}}", + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_insert_success) / sum(req_insert) as success_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_batch{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "insert_batches - {{dnode_ep}}", + "queryType": "SQL", + "refId": "C", + "sql": "select sum(req_insert) as total_inserts, sum(req_insert_batch) as total_batches from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_batch_success{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "insert_batches_success - {{dnode_ep}}", + "queryType": "SQL", + "refId": "D", + "sql": "select sum(req_insert_success) / sum(req_insert) as success_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_success{cluster=\"$cluster\"}) by (dnode_ep) / sum(taos_dnodes_info_req_insert{cluster=\"$cluster\"}) by (dnode_ep)", + "hide": false, + "interval": "", + "legendFormat": "success_rate - {{dnode_ep}}", + "refId": "E" + }, + { + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_batch_success{cluster=\"$cluster\"}) by (dnode_ep) / sum(taos_dnodes_info_req_insert_batch{cluster=\"$cluster\"}) by (dnode_ep)", + "hide": false, + "interval": "", + "legendFormat": "batch_success_rate - {{dnode_ep}}", + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (Inserts)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "cps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 41 + }, + "hiddenSeries": false, + "id": 112, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": 600, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_insert_rate{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "interval": "", + "legendFormat": "insert_rate - {{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(req_insert_rate) as insert_rate, avg(req_insert_batch_rate) as batch_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_insert_batch_rate{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "batch_rate - {{dnode_ep}}", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(insert_rate) as cluster_insert_rate, avg(batch_rate) as cluster_batch_rate from (select sum(req_insert_rate) as insert_rate, sum(req_insert_batch_rate) as batch_rate from log.dnodes_info where ts >= $from and ts <= $to interval(1s)) where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_insert_rate{cluster=\"$cluster\"})", + "hide": false, + "interval": "", + "legendFormat": "cluster_insert_rate", + "refId": "C" + }, + { + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_insert_batch_rate{cluster=\"$cluster\"})", + "hide": false, + "interval": "", + "legendFormat": "cluster_batch_rate", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests Rate (Inserts per Second)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 50 + }, + "hiddenSeries": false, + "id": 114, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": 600, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:227", + "alias": "/rate/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_select{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "interval": "", + "legendFormat": "req_select - {{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_select) as req_select from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_select{cluster=\"$cluster\"})", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "cluster_req_select", + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_select) as total from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_select_rate{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "req_select_rate - {{dnode_ep}}", + "queryType": "SQL", + "refId": "C", + "sql": "select avg(req_select_rate) as req_select_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_select_rate{cluster=\"$cluster\"})", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "cluster_req_select_rate", + "queryType": "SQL", + "refId": "D", + "sql": "select avg(req_select_rate) as req_select_rate from (select sum(req_select_rate) as req_select_rate from log.dnodes_info where ts >= $from and ts <= $to interval(1s)) where ts >= $from and ts <= $to interval($interval) fill(null)" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (Selects)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "cps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 58 + }, + "hiddenSeries": false, + "id": 111, + "interval": null, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": 600, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:100", + "alias": "/.*rate.*/", + "dashes": true, + "fill": 4, + "spaceLength": 1, + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_http{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "interval": "", + "legendFormat": "req_http - {{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_http) as req_http from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_http{cluster=\"$cluster\"})", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "cluster_req_http", + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_http) as req_http from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_http_rate{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "req_http_rate - {{dnode_ep}}", + "queryType": "SQL", + "refId": "C", + "sql": "select avg(req_http_rate) as req_http_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_http_rate{cluster=\"$cluster\"})", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "cluster_req_http_rate", + "queryType": "SQL", + "refId": "D", + "sql": "select avg(req_http_rate) as req_http_rate from (select sum(req_http_rate) as req_http_rate from log.dnodes_info where ts >= $from and ts <= $to interval(1s)) where ts >= $from and ts <= $to interval($interval) fill(null)" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (HTTP)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Requests", + "type": "row" + }, + { + "collapsed": true, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 92, + "panels": [ + { + "datasource": "Prometheus", + "description": "超级表的个数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 67 + }, + "id": 96, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_stables_per_database{cluster=\"$cluster\", database=\"$database\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "show $database.stables;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "STables", + "transformations": [], + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 9, + "x": 3, + "y": 67 + }, + "hiddenSeries": false, + "id": 94, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colName": "vgroup_id", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "sum(taos_vgroups_info_tables_num{cluster=\"$cluster\", database_name=\"$database\"})", + "formatType": "Time series", + "groupDataByColName": "", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(tables_num) as tables_num from (select last(ts) as ts, sum(tables_num) as tables_num from log.vgroups_info where ts > $from and ts <= $to and database_name = '$database' group by ts) where ts >= $from and ts <= $to interval($interval) fill(null)" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Tables", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 67 + }, + "hiddenSeries": false, + "id": 95, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colName": "vgroup_id", + "colNameFormatStr": "vgoup{{groupValue}}", + "colNameToGroup": "vgroup_id", + "exemplar": true, + "expr": "taos_vgroups_info_tables_num", + "formatType": "Time series", + "groupDataByColName": "", + "interval": "", + "legendFormat": "vgroup_{{vgroup_id}}", + "queryType": "SQL", + "refId": "A", + "sql": "select max(tables_num) as tables_num from log.vgroups_info where ts > $from and ts <= $to and database_name = '$database' interval($interval) fill(null) group by vgroup_id" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Tables Number Foreach VGroups", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "description": "所有普通表的个数(包括超级表的子表)", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 70 + }, + "id": 98, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_tables_per_database{cluster=\"$cluster\",database=\"$database\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(tables_num) from (select last(ts) as ts, sum(tables_num) as tables_num from log.vgroups_info where ts > $from and ts <= $to and database_name = '$database' group by ts)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Tables", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "超级表的所有子表个数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 73 + }, + "id": 97, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_tables_per_database{cluster=\"$cluster\",database=\"$database\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "show $database.stables;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Sub Tables", + "transformations": [], + "type": "stat" + } + ], + "repeat": "database", + "title": "Database: [ $database ]", + "type": "row" + }, + { + "collapsed": true, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 20, + "panels": [ + { + "datasource": "Prometheus", + "description": "启动时长", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "Down", + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 0, + "y": 69 + }, + "id": 120, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "disk_used", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_uptime{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(uptime) as uptime from log.dnodes_info where dnode_ep = '$fqdn'", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Uptime", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "lastNotNull" + ] + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "是否有MNode", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 1, + "text": "No" + }, + "1": { + "index": 0, + "text": "Yes" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 4, + "y": 69 + }, + "id": 121, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_has_mnode{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(has_mnode) as has_mnode from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Has MNode?", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "CPU 核数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "Down", + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 6, + "y": 69 + }, + "id": 122, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_cpu_cores{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(cpu_cores) as uptime from log.dnodes_info where dnode_ep = '$fqdn' interval ($interval)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Cores", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "VNodes 数量", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 8, + "y": 69 + }, + "id": 123, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_vnodes_num{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(vnodes_num) as vnodes_num from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "VNodes Number", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "Master VNodes 数量", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 10, + "y": 69 + }, + "id": 124, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_masters{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(masters) as masters from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "VNodes Masters", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "当前 taosd 线程 CPU 使用率", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 0.5 + }, + { + "color": "red", + "value": 0.8 + } + ] + }, + "unit": "percent" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "last(cpu_taosd)" + }, + "properties": [ + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 12, + "y": 69 + }, + "id": 45, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "mem_taosd", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_cpu_engine{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Table", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(cpu_engine) from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current CPU Usage of taosd", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "当前 taosd 线程 内存 使用率", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decmbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 16, + "y": 69 + }, + "id": 66, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "/^taosd$/", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "memory", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_mem_engine{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "taosd", + "queryType": "SQL", + "refId": "A", + "sql": "select last(mem_engine) as taosd, last(mem_total) as total from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "memory", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_mem_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "total", + "queryType": "SQL", + "refId": "B", + "sql": "select last(mem_engine) as taosd, last(mem_total) as total from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Memory Usage of taosd", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 75 + }, + { + "color": "red", + "value": 80 + }, + { + "color": "dark-red", + "value": 95 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 20, + "y": 69 + }, + "id": 51, + "options": { + "displayMode": "gradient", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "disk_used", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_disks_info_datadir_l0_used/taos_disks_info_datadir_l0_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "level0", + "queryType": "SQL", + "refId": "A", + "sql": "select last(disk_used) as used, last(disk_total) as total, last(disk_used) / last(disk_total) as percent from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "disk_used", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_disks_info_datadir_l1_used/taos_disks_info_datadir_l1_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level1", + "queryType": "SQL", + "refId": "B", + "sql": "select last(disk_used) as used, last(disk_total) as total, last(disk_used) / last(disk_total) as percent from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "disk_used", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_disks_info_datadir_l2_used/taos_disks_info_datadir_l2_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level2", + "queryType": "SQL", + "refId": "C", + "sql": "select last(disk_used) as used, last(disk_total) as total, last(disk_used) / last(disk_total) as percent from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk Used", + "transformations": [], + "type": "bargauge" + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "CPU 资源占用情况。", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 75 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideZero": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_cpu_engine{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "taosd", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(cpu_engine) as taosd, avg(cpu_system) as system from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_cpu_system{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "system", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(cpu_engine) as taosd, avg(cpu_system) as system from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "CPU Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "percent", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "内存资源占用情况", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 75 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 42, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_mem_engine{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "taosd", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(mem_engine) as taosd, avg(mem_system) as system, avg(mem_total) as total from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_mem_system{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "system", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(mem_engine) as taosd, avg(mem_system) as system, avg(mem_total) as total from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_mem_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "total", + "queryType": "SQL", + "refId": "C", + "sql": "select avg(mem_engine) as taosd, avg(mem_system) as system, avg(mem_total) as total from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "RAM Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "decmbytes", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 86 + }, + "hiddenSeries": false, + "id": 117, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "hideZero": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "sideWidth": 400, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:84", + "alias": "/percent/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l0_used{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level0_used", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(datadir_l0_used) as level0_used, avg(datadir_l0_total) as level0_total, avg(datadir_l1_used) as level1_used, avg(datadir_l1_total) as level1_total, avg(datadir_l2_used) as level2_used, avg(datadir_l2_total) as level2_total from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l1_used{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level1_used", + "queryType": "SQL", + "refId": "D", + "sql": "select avg(datadir_l0_used)/avg(datadir_l0_total) as level0_percent, avg(datadir_l1_used)/avg(datadir_l1_total) as level1_percent , avg(datadir_l2_used)/avg(datadir_l2_total) as level2_percent from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l2_used{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level2_used", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(datadir_l0_used)/avg(datadir_l0_total) as level0_percent, avg(datadir_l1_used)/avg(datadir_l1_total) as level1_percent , avg(datadir_l2_used)/avg(datadir_l2_total) as level2_percent from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l0_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level0_total", + "queryType": "SQL", + "refId": "C", + "sql": "select avg(datadir_l0_used) as level0_used, avg(datadir_l0_total) as level0_total, avg(datadir_l1_used) as level1_used, avg(datadir_l1_total) as level1_total, avg(datadir_l2_used) as level2_used, avg(datadir_l2_total) as level2_total from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l1_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level1_total", + "queryType": "SQL", + "refId": "E", + "sql": "select avg(datadir_l0_used) as level0_used, avg(datadir_l0_total) as level0_total, avg(datadir_l1_used) as level1_used, avg(datadir_l1_total) as level1_total, avg(datadir_l2_used) as level2_used, avg(datadir_l2_total) as level2_total from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l2_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level2_total", + "queryType": "SQL", + "refId": "F", + "sql": "select avg(datadir_l0_used) as level0_used, avg(datadir_l0_total) as level0_total, avg(datadir_l1_used) as level1_used, avg(datadir_l1_total) as level1_total, avg(datadir_l2_used) as level2_used, avg(datadir_l2_total) as level2_total from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Used", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "gbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percentunit", + "label": "Disk Used", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 86 + }, + "hiddenSeries": false, + "id": 64, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "sideWidth": 400, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "level0", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "deriv(rate(taos_disks_info_datadir_l0_used{cluster=\"$cluster\", dnode_ep=\"$fqdn\"}[1m])[5m:1s]) * 1024", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level0", + "queryType": "SQL", + "refId": "A", + "sql": "select derivative(value, 1m, 0) from (select avg(datadir_l0_used) * 1024 as value from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null))", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "level1", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "deriv(rate(taos_disks_info_datadir_l1_used{cluster=\"$cluster\", dnode_ep=\"$fqdn\"}[1m])[5m:1s]) * 1024", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level1", + "queryType": "SQL", + "refId": "B", + "sql": "select derivative(value, 1m, 0) from (select avg(datadir_l1_used) * 1024 as value from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null))", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "level1", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "deriv(rate(taos_disks_info_datadir_l2_used{cluster=\"$cluster\", dnode_ep=\"$fqdn\"}[1m])[5m:1s]) * 1024", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level2", + "queryType": "SQL", + "refId": "C", + "sql": "select derivative(value, 1m, 0) from (select avg(datadir_l1_used) * 1024 as value from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null))", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Used Increasing Rate per Minute", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "MBs", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percentunit", + "label": "Disk Used", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "io", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 96 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 119, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:302", + "alias": "/last/", + "dashLength": 5, + "dashes": true, + "spaceLength": 5 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_io_write_disk{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "io_write_disk", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(io_read_disk) as io_read_taosd, avg(io_write_disk) as io_write_taosd from log.dnodes_info where dnode_ep = '$fqdn' and ts >= now-1h and ts < now interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_io_read_disk{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "io_read_disk", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(io_read_disk) as io_read_taosd, avg(io_write_disk) as io_write_taosd from log.dnodes_info where dnode_ep = '$fqdn' and ts >= now-1h and ts < now interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "Disk IO", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "MBs", + "label": "IO Rate", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "io", + "fieldConfig": { + "defaults": { + "links": [], + "unit": "Mbits" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 96 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 118, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:347", + "alias": "/in/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_net_in{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "net_in", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(net_in) as net_in, avg(net_out) as net_out from log.dnodes_info where dnode_ep = '$fqdn' and ts >= now-1h and ts < now interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_net_out{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "net_out", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(net_in) as net_in, avg(net_out) as net_out from log.dnodes_info where dnode_ep = '$fqdn' and ts >= now-1h and ts < now interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "Net", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "Mbits", + "label": "IO Rate", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "repeat": "fqdn", + "title": "DNode Usage [ $fqdn ]", + "type": "row" + }, + { + "collapsed": true, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 35 + }, + "id": 63, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "displayName": "Logins Per Minute", + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 10 + }, + "hiddenSeries": false, + "id": 61, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:67", + "alias": "logins", + "nullPointMode": "null as zero" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "logins", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "count_over_time(taos_logs_content{cluster=\"$cluster\"}[1m])", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select count(*) from log.logs where ts >= $from and ts < $to interval (1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Login Counts per Minute", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:74", + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:75", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Login History", + "type": "row" + } + ], + "schemaVersion": 31, + "style": "dark", + "tags": [ + "TDengine", + "Prometheus", + "TaosKeeper" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": "localhost", + "value": "localhost" + }, + "datasource": null, + "definition": "label_values(taos_dn_cpu_taosd, cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(taos_dn_cpu_taosd, cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "tdengine:6030", + "value": "tdengine:6030" + }, + "datasource": null, + "definition": "label_values(taos_dn_cpu_taosd{cluster=\"$cluster\"}, fqdn)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "name": "fqdn", + "options": [], + "query": { + "query": "label_values(taos_dn_cpu_taosd{cluster=\"$cluster\"}, fqdn)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "1", + "value": "1" + }, + "datasource": null, + "definition": "label_values(taos_dn_cpu_taosd{cluster=\"$cluster\"}, dnodeid)", + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": "with DNode ID:", + "multi": false, + "name": "dnodeid", + "options": [], + "query": { + "query": "label_values(taos_dn_cpu_taosd{cluster=\"$cluster\"}, dnodeid)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": null, + "definition": "label_values(taos_vgroups_info_status{cluster=\"$cluster\"}, database_name)", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": null, + "multi": true, + "name": "database", + "options": [], + "query": { + "query": "label_values(taos_vgroups_info_status{cluster=\"$cluster\"}, database_name)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "tdengine:6030", + "value": "tdengine:6030" + }, + "datasource": null, + "definition": "label_values(taos_cluster_info_first_ep{cluster=\"$cluster\"}, value)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "firstEp", + "options": [], + "query": { + "query": "label_values(taos_cluster_info_first_ep{cluster=\"$cluster\"}, value)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "2.4.0.4", + "value": "2.4.0.4" + }, + "datasource": null, + "definition": "label_values(taos_cluster_info_version{cluster=\"$cluster\"}, value)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "version", + "options": [], + "query": { + "query": "label_values(taos_cluster_info_version{cluster=\"$cluster\"}, value)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "auto": true, + "auto_count": 100, + "auto_min": "10s", + "current": { + "selected": false, + "text": "5s", + "value": "5s" + }, + "description": null, + "error": null, + "hide": 0, + "label": null, + "name": "interval", + "options": [ + { + "selected": false, + "text": "auto", + "value": "$__auto_interval_interval" + }, + { + "selected": true, + "text": "5s", + "value": "5s" + }, + { + "selected": false, + "text": "10s", + "value": "10s" + }, + { + "selected": false, + "text": "15s", + "value": "15s" + }, + { + "selected": false, + "text": "20s", + "value": "20s" + }, + { + "selected": false, + "text": "30s", + "value": "30s" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "2m", + "value": "2m" + }, + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + } + ], + "query": "5s,10s,15s,20s,30s,1m,2m,5m,10m,30m,1h,6h,12h,1d", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "TaosKeeper Prometheus Dashboard", + "uid": "rSFM0Fxnk", + "version": 62 +} \ No newline at end of file diff --git a/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.png b/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.png new file mode 100644 index 00000000000..a608c98b930 Binary files /dev/null and b/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.png differ diff --git a/tools/keeper/db/connector.go b/tools/keeper/db/connector.go new file mode 100644 index 00000000000..a8f106e9527 --- /dev/null +++ b/tools/keeper/db/connector.go @@ -0,0 +1,177 @@ +package db + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "os" + "strings" + "time" + + "github.com/sirupsen/logrus" + "github.com/taosdata/driver-go/v3/common" + + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +type Connector struct { + db *sql.DB +} + +type Data struct { + Head []string `json:"head"` + Data [][]interface{} `json:"data"` +} + +var dbLogger = log.GetLogger("DB ") + +func NewConnector(username, password, host string, port int, usessl bool) (*Connector, error) { + var protocol string + if usessl { + protocol = "https" + } else { + protocol = "http" + } + dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: util.GetQidOwn()}) + dbLogger.Tracef("connect to adapter, host:%s, port:%d, usessl:%v", host, port, usessl) + + db, err := sql.Open("taosRestful", fmt.Sprintf("%s:%s@%s(%s:%d)/?skipVerify=true", username, password, protocol, host, port)) + if err != nil { + dbLogger.Errorf("connect to adapter failed, host:%s, port:%d, usessl:%v, error:%s", host, port, usessl, err) + return nil, err + } + + dbLogger.Tracef("connect to adapter success, host:%s, port:%d, usessl:%v", host, port, usessl) + return &Connector{db: db}, nil +} + +func NewConnectorWithDb(username, password, host string, port int, dbname string, usessl bool) (*Connector, error) { + var protocol string + if usessl { + protocol = "https" + } else { + protocol = "http" + } + + dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: util.GetQidOwn()}) + dbLogger.Tracef("connect to adapter, host:%s, port:%d, usessl:%v", host, port, usessl) + + db, err := sql.Open("taosRestful", fmt.Sprintf("%s:%s@%s(%s:%d)/%s?skipVerify=true", username, password, protocol, host, port, dbname)) + if err != nil { + dbLogger.Errorf("connect to adapter failed, host:%s, port:%d, db:%s, usessl:%v, error:%s", host, port, dbname, usessl, err) + return nil, err + } + + dbLogger.Tracef("connect to adapter success, host:%s, port:%d, db:%s, usessl:%v", host, port, dbname, usessl) + return &Connector{db: db}, nil +} + +func (c *Connector) Exec(ctx context.Context, sql string, qid uint64) (int64, error) { + dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: qid}) + ctx = context.WithValue(ctx, common.ReqIDKey, int64(qid)) + + dbLogger.Tracef("call adapter to execute sql:%s", sql) + startTime := time.Now() + res, err := c.db.ExecContext(ctx, sql) + + endTime := time.Now() + latency := endTime.Sub(startTime) + + if err != nil { + if strings.Contains(err.Error(), "Authentication failure") { + dbLogger.Error("Authentication failure") + ctxLog, cancelLog := context.WithTimeout(context.Background(), 3*time.Second) + defer cancelLog() + log.Close(ctxLog) + os.Exit(1) + } + dbLogger.Errorf("latency:%v, sql:%s, err:%s", latency, sql, err) + return 0, err + } + + rowsAffected, err := res.RowsAffected() + if err != nil { + dbLogger.Errorf("latency:%v, err:%s", latency, err) + return rowsAffected, err + } + + dbLogger.Tracef("response ok, rowsAffected:%v, latency:%v", rowsAffected, latency) + + return rowsAffected, err +} + +func logData(data *Data, logger *logrus.Entry) { + if data == nil { + logger.Tracef("No data to display") + return + } + + jsonData, err := json.Marshal(data) + if err != nil { + logger.Errorf("Failed to marshal data to JSON: %v", err) + return + } + logger.Tracef("query result data:%s", jsonData) +} + +func (c *Connector) Query(ctx context.Context, sql string, qid uint64) (*Data, error) { + dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: qid}) + ctx = context.WithValue(ctx, common.ReqIDKey, int64(qid)) + + dbLogger.Tracef("call adapter to execute query, sql:%s", sql) + + startTime := time.Now() + rows, err := c.db.QueryContext(ctx, sql) + + endTime := time.Now() + latency := endTime.Sub(startTime) + + if err != nil { + if strings.Contains(err.Error(), "Authentication failure") { + dbLogger.Error("Authentication failure") + ctxLog, cancelLog := context.WithTimeout(context.Background(), 3*time.Second) + defer cancelLog() + log.Close(ctxLog) + os.Exit(1) + } + dbLogger.Errorf("latency:%v, sql:%s, err:%s", latency, sql, err) + return nil, err + } + + dbLogger.Tracef("response ok, latency:%v, sql:%s", latency, sql) + + data := &Data{} + data.Head, err = rows.Columns() + columnCount := len(data.Head) + if err != nil { + dbLogger.Errorf("get columns error, msg:%s", err) + return nil, err + } + scanData := make([]interface{}, columnCount) + for rows.Next() { + tmp := make([]interface{}, columnCount) + for i := 0; i < columnCount; i++ { + scanData[i] = &tmp[i] + } + err = rows.Scan(scanData...) + if err != nil { + rows.Close() + dbLogger.Errorf("rows scan error, msg:%s", err) + return nil, err + } + data.Data = append(data.Data, tmp) + } + + if dbLogger.Logger.IsLevelEnabled(logrus.TraceLevel) { + logData(data, dbLogger) + } + return data, nil +} + +func (c *Connector) Close() error { + return c.db.Close() +} diff --git a/tools/keeper/db/empty_test.go b/tools/keeper/db/empty_test.go new file mode 100644 index 00000000000..52e32e1f891 --- /dev/null +++ b/tools/keeper/db/empty_test.go @@ -0,0 +1,8 @@ +package db + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/docker-compose.yml b/tools/keeper/docker-compose.yml new file mode 100644 index 00000000000..f7f43fe112b --- /dev/null +++ b/tools/keeper/docker-compose.yml @@ -0,0 +1,29 @@ +version: "3.7" + +services: + tdengine: + image: tdengine/tdengine:3.0.1.6 + environment: + TZ: Asia/Shanghai + TAOS_FQDN: tdengine + volumes: + - taosdata:/var/lib/taos + taoskeeper: + build: ./ + depends_on: + - tdengine + environment: + TDENGINE_HOST: tdengine + TDENGINE_PORT: 6041 + volumes: + - ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml + ports: + - 6043:6043 + prometheus: + image: prom/prometheus + volumes: + - ./prometheus/:/etc/prometheus/ + ports: + - 9090:9090 +volumes: + taosdata: diff --git a/tools/keeper/examples/metrics.toml b/tools/keeper/examples/metrics.toml new file mode 100644 index 00000000000..9dbfea2323d --- /dev/null +++ b/tools/keeper/examples/metrics.toml @@ -0,0 +1,38 @@ +prefix = "taos" +cluster = "localhost" +database = "log" +explicit = false + +[tables.restful_info] +[tables.slowquery] +[tables.cluster_info] +[tables.grants_info] +[tables.disks_info] + +[tables.logs] +explicit = true +[tables.logs.metrics.content] +type = "info" +help = "login types or messages" +[tables.logs.metrics.level] +type = "gauge" +help = "login level" + +[tables.dnodes_info] +[tables.dnodes_info.metrics.has_mnode] +type = "gauge" +help = "check if the node has mnode" + +[tables.vgroups_info] +column_as_variables = ["database_name", "dnode_roles", "dnode_ids"] +explicit = false + +[tables.vgroups_info.metrics.tables_num] +type = "gauge" +help = "Tables count of the vgroup" +[tables.vgroups_info.metrics.online_vnodes] +type = "gauge" +help = "Online v-nodes of the v-group" +[tables.vgroups_info.metrics.status] +type = "info" +help = "Status of the v-group" diff --git a/tools/keeper/go.mod b/tools/keeper/go.mod new file mode 100644 index 00000000000..f8edf2709b4 --- /dev/null +++ b/tools/keeper/go.mod @@ -0,0 +1,81 @@ +module github.com/taosdata/taoskeeper + +go 1.18 + +require ( + github.com/BurntSushi/toml v0.4.1 + github.com/gin-gonic/gin v1.9.1 + github.com/kardianos/service v1.2.1 + github.com/panjf2000/ants/v2 v2.4.6 + github.com/prometheus/client_golang v1.12.2 + github.com/shirou/gopsutil/v3 v3.22.4 + github.com/shopspring/decimal v1.3.1 + github.com/sirupsen/logrus v1.8.1 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.12.0 + github.com/stretchr/testify v1.9.0 + github.com/taosdata/driver-go/v3 v3.5.8 + github.com/taosdata/file-rotatelogs/v2 v2.5.2 + github.com/taosdata/go-utils v0.0.0-20211022070036-018cc5f2432a +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/bytedance/sonic v1.11.2 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect + github.com/chenzhuoyu/iasm v0.9.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect + github.com/gin-contrib/cors v1.6.0 // indirect + github.com/gin-contrib/gzip v0.0.3 // indirect + github.com/gin-contrib/pprof v1.3.0 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.19.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/lestrrat-go/strftime v1.0.6 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/subosito/gotenv v1.3.0 // indirect + github.com/tklauser/go-sysconf v0.3.10 // indirect + github.com/tklauser/numcpus v0.4.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect + golang.org/x/arch v0.7.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/ini.v1 v1.66.4 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/tools/keeper/go.sum b/tools/keeper/go.sum new file mode 100644 index 00000000000..8f6e9bd13af --- /dev/null +++ b/tools/keeper/go.sum @@ -0,0 +1,771 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= +github.com/bytedance/sonic v1.11.2 h1:ywfwo0a/3j9HR8wsYGWsIWl2mvRsI950HyoxiBERw5A= +github.com/bytedance/sonic v1.11.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= +github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0= +github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deathowl/go-metrics-prometheus v0.0.0-20200518174047-74482eab5bfb/go.mod h1:kZ9Xvhj+PTMJ415unU/sutrnWDVqG0PDS/Sl4Rt3xkE= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= +github.com/gin-contrib/cors v1.3.1/go.mod h1:jjEJ4268OPZUcU7k9Pm653S7lXUGcqMADzFA61xsmDk= +github.com/gin-contrib/cors v1.6.0 h1:0Z7D/bVhE6ja07lI8CTjTonp6SB07o8bNuFyRbsBUQg= +github.com/gin-contrib/cors v1.6.0/go.mod h1:cI+h6iOAyxKRtUtC6iF/Si1KSFvGm/gK+kshxlCi8ro= +github.com/gin-contrib/gzip v0.0.3 h1:etUaeesHhEORpZMp18zoOhepboiWnFtXrBZxszWUn4k= +github.com/gin-contrib/gzip v0.0.3/go.mod h1:YxxswVZIqOvcHEQpsSn+QF5guQtO1dCfy0shBPy4jFc= +github.com/gin-contrib/pprof v1.3.0 h1:G9eK6HnbkSqDZBYbzG4wrjCsA4e+cvYAHUZw6W+W9K0= +github.com/gin-contrib/pprof v1.3.0/go.mod h1:waMjT1H9b179t3CxuG1cV3DHpga6ybizwfBaM5OXaB0= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= +github.com/gin-gonic/gin v1.6.2/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.7.2/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4= +github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kardianos/service v1.2.1 h1:AYndMsehS+ywIS6RB9KOlcXzteWUzxgMgBymJD7+BYk= +github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8= +github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is= +github.com/lestrrat-go/strftime v1.0.6 h1:CFGsDEt1pOpFNU+TJB0nhz9jl+K0hZSLE205AhTIGQQ= +github.com/lestrrat-go/strftime v1.0.6/go.mod h1:f7jQKgV5nnJpYgdEasS+/y7EsTb8ykN2z68n3TtcTaw= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= +github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/panjf2000/ants/v2 v2.4.6 h1:drmj9mcygn2gawZ155dRbo+NfXEfAssjZNU1qoIb4gQ= +github.com/panjf2000/ants/v2 v2.4.6/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= +github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/shirou/gopsutil/v3 v3.22.4 h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI= +github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= +github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= +github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= +github.com/taosdata/driver-go/v2 v2.0.1-0.20211018081904-0a2a3ef6c829/go.mod h1:W7pu74rSvDmGjJPO6fzp+GCtwOelrMgXEhPD0aQJ1xw= +github.com/taosdata/driver-go/v3 v3.5.8 h1:JT5lNFUCOHD9Hs4Phjg8RBkGOWlePRnpGqq8kIRHT98= +github.com/taosdata/driver-go/v3 v3.5.8/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU= +github.com/taosdata/file-rotatelogs/v2 v2.5.2 h1:6ryjwDdKqQtWrkVq9OKj4gvMING/f+fDluMAAe2DIXQ= +github.com/taosdata/file-rotatelogs/v2 v2.5.2/go.mod h1:Qm99Lh0iMZouGgyy++JgTqKvP5FQw1ruR5jkWF7e1n0= +github.com/taosdata/go-utils v0.0.0-20211022070036-018cc5f2432a h1:WGFREiuYBrTXTS9GVQQpDvVgGRyByfo0V5//o7tv/ho= +github.com/taosdata/go-utils v0.0.0-20211022070036-018cc5f2432a/go.mod h1:hlvGgM/HN3AqWMajvMQe80qoLNJ4KIxs8YOVqEqnxUo= +github.com/tidwall/gjson v1.9.1/go.mod h1:jydLKE7s8J0+1/5jC4eXcuFlzKizGrCKvLmBVX/5oXc= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= +gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/tools/keeper/infrastructure/config/audit.go b/tools/keeper/infrastructure/config/audit.go new file mode 100644 index 00000000000..10f3a6aa1e0 --- /dev/null +++ b/tools/keeper/infrastructure/config/audit.go @@ -0,0 +1,6 @@ +package config + +type AuditConfig struct { + Enable bool `toml:"enable"` + Database Database `toml:"database"` +} diff --git a/tools/keeper/infrastructure/config/config.go b/tools/keeper/infrastructure/config/config.go new file mode 100644 index 00000000000..d3e884ba8f9 --- /dev/null +++ b/tools/keeper/infrastructure/config/config.go @@ -0,0 +1,294 @@ +package config + +import ( + "fmt" + "io/fs" + "os" + "runtime" + "time" + + "github.com/spf13/pflag" + "github.com/spf13/viper" + "github.com/taosdata/go-utils/web" + "github.com/taosdata/taoskeeper/util/pool" + "github.com/taosdata/taoskeeper/version" +) + +var Name = fmt.Sprintf("%skeeper", version.CUS_PROMPT) + +const ReqIDKey = "QID" +const ModelKey = "model" + +type Config struct { + InstanceID uint8 + Cors web.CorsConfig `toml:"cors"` + Port int `toml:"port"` + LogLevel string `toml:"loglevel"` + GoPoolSize int `toml:"gopoolsize"` + RotationInterval string `toml:"RotationInterval"` + TDengine TDengineRestful `toml:"tdengine"` + Metrics MetricsConfig `toml:"metrics"` + Env Environment `toml:"environment"` + Audit AuditConfig `toml:"audit"` + Log Log `mapstructure:"-"` + + Transfer string + FromTime string + Drop string +} + +type TDengineRestful struct { + Host string `toml:"host"` + Port int `toml:"port"` + Username string `toml:"username"` + Password string `toml:"password"` + Usessl bool `toml:"usessl"` +} + +var Conf *Config + +func InitConfig() *Config { + viper.SetConfigType("toml") + viper.SetConfigName(Name) + viper.AddConfigPath("/etc/taos") + + var cp *string + switch runtime.GOOS { + case "windows": + viper.AddConfigPath(fmt.Sprintf("C:\\%s\\cfg", version.CUS_NAME)) + cp = pflag.StringP("config", "c", "", fmt.Sprintf("config path default C:\\%s\\cfg\\%s.toml", version.CUS_NAME, Name)) + default: + viper.AddConfigPath(fmt.Sprintf("/etc/%s", version.CUS_PROMPT)) + cp = pflag.StringP("config", "c", "", fmt.Sprintf("config path default /etc/%s/%s.toml", version.CUS_PROMPT, Name)) + } + + transfer := pflag.StringP("transfer", "", "", "run "+Name+" in command mode, only support old_taosd_metric. transfer old metrics data to new tables and exit") + fromTime := pflag.StringP("fromTime", "", "2020-01-01T00:00:00+08:00", "parameter of transfer, example: 2020-01-01T00:00:00+08:00") + drop := pflag.StringP("drop", "", "", "run "+Name+" in command mode, only support old_taosd_metric_stables. ") + + v := pflag.BoolP("version", "V", false, "Print the version and exit") + help := pflag.BoolP("help", "h", false, "Print this help message and exit") + + pflag.Parse() + + if *help { + fmt.Fprintf(os.Stderr, "Usage of %s v%s:\n", Name, version.Version) + pflag.PrintDefaults() + os.Exit(0) + } + + if *v { + fmt.Printf("%s version: %s\n", Name, version.Version) + fmt.Printf("git: %s\n", version.Gitinfo) + fmt.Printf("build: %s\n", version.BuildInfo) + os.Exit(0) + } + + if *cp != "" { + viper.SetConfigFile(*cp) + } + + viper.SetEnvPrefix(Name) + err := viper.BindPFlags(pflag.CommandLine) + if err != nil { + panic(err) + } + viper.AutomaticEnv() + + gotoStep := false +ReadConfig: + if err := viper.ReadInConfig(); err != nil { + _, isConfigNotFoundError := err.(viper.ConfigFileNotFoundError) + _, isPathError := err.(*fs.PathError) + if isConfigNotFoundError || isPathError { + fmt.Println("config file not found") + + if !gotoStep { + fmt.Println("use keeper.toml instead") + viper.SetConfigName("keeper") + gotoStep = true + goto ReadConfig + } + } else { + panic(err) + } + } + + // if old format, change to new format + if !viper.IsSet("metrics.database.name") { + databaseName := viper.GetString("metrics.database") + viper.Set("metrics.database.name", databaseName) + viper.Set("metrics.database.options", viper.Get("metrics.databaseoptions")) + } + + var conf Config + if err = viper.Unmarshal(&conf); err != nil { + panic(err) + } + + conf.Transfer = *transfer + conf.FromTime = *fromTime + conf.Drop = *drop + + conf.Cors.Init() + pool.Init(conf.GoPoolSize) + conf.Log.SetValue() + + // set log level default value: info + if conf.LogLevel == "" { + conf.LogLevel = "info" + } + if viper.IsSet("log.level") { + conf.LogLevel = conf.Log.Level + } else { + viper.Set("log.level", "") + } + + if !viper.IsSet("logLevel") { + viper.Set("logLevel", "") + } + + Conf = &conf + return &conf +} + +func init() { + viper.SetDefault("instanceId", 64) + _ = viper.BindEnv("instanceId", "TAOS_KEEPER_INSTANCE_ID") + pflag.Int("instanceId", 64, `instance ID. Env "TAOS_KEEPER_INSTANCE_ID"`) + + viper.SetDefault("port", 6043) + _ = viper.BindEnv("port", "TAOS_KEEPER_PORT") + pflag.IntP("port", "P", 6043, `http port. Env "TAOS_KEEPER_PORT"`) + + _ = viper.BindEnv("logLevel", "TAOS_KEEPER_LOG_LEVEL") + pflag.String("logLevel", "info", `log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL"`) + + viper.SetDefault("gopoolsize", 50000) + _ = viper.BindEnv("gopoolsize", "TAOS_KEEPER_POOL_SIZE") + pflag.Int("gopoolsize", 50000, `coroutine size. Env "TAOS_KEEPER_POOL_SIZE"`) + + viper.SetDefault("RotationInterval", "15s") + _ = viper.BindEnv("RotationInterval", "TAOS_KEEPER_ROTATION_INTERVAL") + pflag.StringP("RotationInterval", "R", "15s", `interval for refresh metrics, such as "300ms", Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Env "TAOS_KEEPER_ROTATION_INTERVAL"`) + + viper.SetDefault("tdengine.host", "127.0.0.1") + _ = viper.BindEnv("tdengine.host", "TAOS_KEEPER_TDENGINE_HOST") + pflag.String("tdengine.host", "127.0.0.1", `TDengine server's ip. Env "TAOS_KEEPER_TDENGINE_HOST"`) + + viper.SetDefault("tdengine.port", 6041) + _ = viper.BindEnv("tdengine.port", "TAOS_KEEPER_TDENGINE_PORT") + pflag.Int("tdengine.port", 6041, `TDengine REST server(taosAdapter)'s port. Env "TAOS_KEEPER_TDENGINE_PORT"`) + + viper.SetDefault("tdengine.username", "root") + _ = viper.BindEnv("tdengine.username", "TAOS_KEEPER_TDENGINE_USERNAME") + pflag.String("tdengine.username", "root", `TDengine server's username. Env "TAOS_KEEPER_TDENGINE_USERNAME"`) + + viper.SetDefault("tdengine.password", "taosdata") + _ = viper.BindEnv("tdengine.password", "TAOS_KEEPER_TDENGINE_PASSWORD") + pflag.String("tdengine.password", "taosdata", `TDengine server's password. Env "TAOS_KEEPER_TDENGINE_PASSWORD"`) + + viper.SetDefault("tdengine.usessl", false) + _ = viper.BindEnv("tdengine.usessl", "TAOS_KEEPER_TDENGINE_USESSL") + pflag.Bool("tdengine.usessl", false, `TDengine server use ssl or not. Env "TAOS_KEEPER_TDENGINE_USESSL"`) + + viper.SetDefault("metrics.prefix", "") + _ = viper.BindEnv("metrics.prefix", "TAOS_KEEPER_METRICS_PREFIX") + pflag.String("metrics.prefix", "", `prefix in metrics names. Env "TAOS_KEEPER_METRICS_PREFIX"`) + + viper.SetDefault("metrics.database.name", "log") + _ = viper.BindEnv("metrics.database.name", "TAOS_KEEPER_METRICS_DATABASE") + pflag.String("metrics.database.name", "log", `database for storing metrics data. Env "TAOS_KEEPER_METRICS_DATABASE"`) + + viper.SetDefault("metrics.database.options.vgroups", 1) + _ = viper.BindEnv("metrics.database.options.vgroups", "TAOS_KEEPER_METRICS_VGROUPS") + pflag.Int("metrics.database.options.vgroups", 1, `database option vgroups for audit database. Env "TAOS_KEEPER_METRICS_VGROUPS"`) + + viper.SetDefault("metrics.database.options.buffer", 64) + _ = viper.BindEnv("metrics.database.options.buffer", "TAOS_KEEPER_METRICS_BUFFER") + pflag.Int("metrics.database.options.buffer", 64, `database option buffer for audit database. Env "TAOS_KEEPER_METRICS_BUFFER"`) + + viper.SetDefault("metrics.database.options.keep", 90) + _ = viper.BindEnv("metrics.database.options.keep", "TAOS_KEEPER_METRICS_KEEP") + pflag.Int("metrics.database.options.keep", 90, `database option buffer for audit database. Env "TAOS_KEEPER_METRICS_KEEP"`) + + viper.SetDefault("metrics.database.options.cachemodel", "both") + _ = viper.BindEnv("metrics.database.options.cachemodel", "TAOS_KEEPER_METRICS_CACHEMODEL") + pflag.String("metrics.database.options.cachemodel", "both", `database option cachemodel for audit database. Env "TAOS_KEEPER_METRICS_CACHEMODEL"`) + + viper.SetDefault("metrics.tables", []string{}) + _ = viper.BindEnv("metrics.tables", "TAOS_KEEPER_METRICS_TABLES") + pflag.StringArray("metrics.tables", []string{}, `export some tables that are not super table, multiple values split with white space. Env "TAOS_KEEPER_METRICS_TABLES"`) + + viper.SetDefault("environment.incgroup", false) + _ = viper.BindEnv("environment.incgroup", "TAOS_KEEPER_ENVIRONMENT_INCGROUP") + pflag.Bool("environment.incgroup", false, `whether running in cgroup. Env "TAOS_KEEPER_ENVIRONMENT_INCGROUP"`) + + initLog() + + if version.IsEnterprise == "true" { + initAudit() + } +} + +func initLog() { + switch runtime.GOOS { + case "windows": + viper.SetDefault("log.path", fmt.Sprintf("C:\\%s\\log", version.CUS_NAME)) + _ = viper.BindEnv("log.path", "TAOS_KEEPER_LOG_PATH") + pflag.String("log.path", fmt.Sprintf("C:\\%s\\log", version.CUS_NAME), `log path. Env "TAOS_KEEPER_LOG_PATH"`) + default: + viper.SetDefault("log.path", fmt.Sprintf("/var/log/%s", version.CUS_PROMPT)) + _ = viper.BindEnv("log.path", "TAOS_KEEPER_LOG_PATH") + pflag.String("log.path", fmt.Sprintf("/var/log/%s", version.CUS_PROMPT), `log path. Env "TAOS_KEEPER_LOG_PATH"`) + } + + _ = viper.BindEnv("log.level", "TAOS_KEEPER_LOG_LEVEL") + pflag.String("log.level", "info", `log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL"`) + + viper.SetDefault("log.rotationCount", 5) + _ = viper.BindEnv("log.rotationCount", "TAOS_KEEPER_LOG_ROTATION_COUNT") + pflag.Uint("log.rotationCount", 5, `log rotation count. Env "TAOS_KEEPER_LOG_ROTATION_COUNT"`) + + viper.SetDefault("log.keepDays", 30) + _ = viper.BindEnv("log.keepDays", "TAOS_KEEPER_LOG_KEEP_DAYS") + pflag.Uint("log.keepDays", 30, `log retention days, must be a positive integer. Env "TAOS_KEEPER_LOG_KEEP_DAYS"`) + + viper.SetDefault("log.rotationTime", time.Hour*24) + _ = viper.BindEnv("log.rotationTime", "TAOS_KEEPER_LOG_ROTATION_TIME") + pflag.Duration("log.rotationTime", time.Hour*24, `deprecated: log rotation time always 24 hours. Env "TAOS_KEEPER_LOG_ROTATION_TIME"`) + + viper.SetDefault("log.rotationSize", "1GB") + _ = viper.BindEnv("log.rotationSize", "TAOS_KEEPER_LOG_ROTATION_SIZE") + pflag.String("log.rotationSize", "1GB", `log rotation size(KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_ROTATION_SIZE"`) + + viper.SetDefault("log.compress", false) + _ = viper.BindEnv("log.compress", "TAOS_KEEPER_LOG_COMPRESS") + pflag.Bool("log.compress", false, `whether to compress old log. Env "TAOS_KEEPER_LOG_COMPRESS"`) + + viper.SetDefault("log.reservedDiskSize", "1GB") + _ = viper.BindEnv("log.reservedDiskSize", "TAOS_KEEPER_LOG_RESERVED_DISK_SIZE") + pflag.String("log.reservedDiskSize", "1GB", `reserved disk size for log dir (KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_RESERVED_DISK_SIZE"`) +} + +func initAudit() { + viper.SetDefault("audit.enable", "true") + _ = viper.BindEnv("audit.enable", "TAOS_KEEPER_AUDIT_ENABLE") + pflag.String("audit.enable", "true", `database for enable audit data. Env "TAOS_KEEPER_AUDIT_ENABLE"`) + + viper.SetDefault("audit.database.name", "audit") + _ = viper.BindEnv("audit.database.name", "TAOS_KEEPER_AUDIT_DATABASE") + pflag.String("audit.database.name", "audit", `database for storing audit data. Env "TAOS_KEEPER_AUDIT_DATABASE"`) + + viper.SetDefault("audit.database.options.vgroups", 1) + _ = viper.BindEnv("audit.database.options.vgroups", "TAOS_KEEPER_AUDIT_VGROUPS") + pflag.Int("audit.database.options.vgroups", 1, `database option vgroups for audit database. Env "TAOS_KEEPER_AUDIT_VGROUPS"`) + + viper.SetDefault("audit.database.options.buffer", 16) + _ = viper.BindEnv("audit.database.options.buffer", "TAOS_KEEPER_AUDIT_BUFFER") + pflag.Int("audit.database.options.buffer", 16, `database option buffer for audit database. Env "TAOS_KEEPER_AUDIT_BUFFER"`) + + viper.SetDefault("audit.database.options.cachemodel", "both") + _ = viper.BindEnv("audit.database.options.cachemodel", "TAOS_KEEPER_AUDIT_CACHEMODEL") + pflag.String("audit.database.options.cachemodel", "both", `database option cachemodel for audit database. Env "TAOS_KEEPER_AUDIT_CACHEMODEL"`) +} diff --git a/tools/keeper/infrastructure/config/log.go b/tools/keeper/infrastructure/config/log.go new file mode 100644 index 00000000000..dc67d877e70 --- /dev/null +++ b/tools/keeper/infrastructure/config/log.go @@ -0,0 +1,29 @@ +package config + +import ( + "time" + + "github.com/spf13/viper" +) + +type Log struct { + Level string + Path string + RotationCount uint + RotationTime time.Duration + RotationSize uint + KeepDays uint + Compress bool + ReservedDiskSize uint +} + +func (l *Log) SetValue() { + l.Level = viper.GetString("log.level") + l.Path = viper.GetString("log.path") + l.RotationCount = viper.GetUint("log.rotationCount") + l.RotationTime = viper.GetDuration("log.rotationTime") + l.RotationSize = viper.GetSizeInBytes("log.rotationSize") + l.KeepDays = viper.GetUint("log.keepDays") + l.Compress = viper.GetBool("log.compress") + l.ReservedDiskSize = viper.GetSizeInBytes("log.reservedDiskSize") +} diff --git a/tools/keeper/infrastructure/config/metric_test.go b/tools/keeper/infrastructure/config/metric_test.go new file mode 100644 index 00000000000..5d20cdc5ec9 --- /dev/null +++ b/tools/keeper/infrastructure/config/metric_test.go @@ -0,0 +1,85 @@ +package config_test + +import ( + "fmt" + "io" + "os" + "runtime" + "testing" + + "github.com/BurntSushi/toml" + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/version" +) + +func TestConfig(t *testing.T) { + data := ` +# Start with debug middleware for gin +debug = true +# Listen port, default is 6043 +port = 9000 +# log level +loglevel = "error" +# go pool size +gopoolsize = 5000 +# interval for TDengine metrics +RotationInterval = "10s" +[tdengine] +address = "http://localhost:6041" +authtype = "Basic" +username = "root" +password = "taosdata" +` + var c config.Config + _, err := toml.Decode(data, &c) + if err != nil { + t.Error(err) + return + } + assert.EqualValues(t, c, c) + fmt.Print(c) +} + +func TestBakConfig(t *testing.T) { + isOk := copyConfigFile() + if isOk { + config.Name = "aaa" + config.InitConfig() + config.Name = "taoskeeper" + } +} + +func copyConfigFile() bool { + var sourceFile string + var destinationFile string + switch runtime.GOOS { + case "windows": + sourceFile = fmt.Sprintf("C:\\%s\\cfg\\%s.toml", version.CUS_NAME, "taoskeeper") + destinationFile = fmt.Sprintf("C:\\%s\\cfg\\%s.toml", version.CUS_NAME, "keeper") + default: + sourceFile = fmt.Sprintf("/etc/%s/%s.toml", version.CUS_PROMPT, "taoskeeper") + destinationFile = fmt.Sprintf("/etc/%s/%s.toml", version.CUS_PROMPT, "keeper") + } + _, err := os.Stat(sourceFile) + if os.IsNotExist(err) { + return false + } + + source, err := os.Open(sourceFile) //open the source file + if err != nil { + panic(err) + } + defer source.Close() + + destination, err := os.Create(destinationFile) //create the destination file + if err != nil { + panic(err) + } + defer destination.Close() + _, err = io.Copy(destination, source) //copy the contents of source to destination file + if err != nil { + panic(err) + } + return true +} diff --git a/tools/keeper/infrastructure/config/metrics.go b/tools/keeper/infrastructure/config/metrics.go new file mode 100644 index 00000000000..c41544fc399 --- /dev/null +++ b/tools/keeper/infrastructure/config/metrics.go @@ -0,0 +1,29 @@ +package config + +type MetricsConfig struct { + Cluster string `toml:"cluster"` + Prefix string `toml:"prefix"` + Database Database `toml:"database"` + Tables []string `toml:"tables"` +} + +type TaosAdapter struct { + Address []string `toml:"address"` +} + +type Metric struct { + Alias string `toml:"alias"` + Help string `toml:"help"` + Unit string `toml:"unit"` + Type string `toml:"type"` + Labels map[string]string `toml:"labels"` +} + +type Environment struct { + InCGroup bool `toml:"incgroup"` +} + +type Database struct { + Name string `toml:"name"` + Options map[string]interface{} `toml:"options"` +} diff --git a/tools/keeper/infrastructure/log/empty_test.go b/tools/keeper/infrastructure/log/empty_test.go new file mode 100644 index 00000000000..468c02173ba --- /dev/null +++ b/tools/keeper/infrastructure/log/empty_test.go @@ -0,0 +1,8 @@ +package log + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/infrastructure/log/log.go b/tools/keeper/infrastructure/log/log.go new file mode 100644 index 00000000000..0a54e99eb93 --- /dev/null +++ b/tools/keeper/infrastructure/log/log.go @@ -0,0 +1,278 @@ +package log + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/sirupsen/logrus" + rotatelogs "github.com/taosdata/file-rotatelogs/v2" + "github.com/taosdata/taoskeeper/infrastructure/config" + + "github.com/taosdata/taoskeeper/version" +) + +var logger = logrus.New() +var ServerID = randomID() +var globalLogFormatter = &TaosLogFormatter{} +var finish = make(chan struct{}) +var exist = make(chan struct{}) + +var bufferPool = &defaultPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, +} + +type defaultPool struct { + pool *sync.Pool +} + +func (p *defaultPool) Put(buf *bytes.Buffer) { + buf.Reset() + p.pool.Put(buf) +} + +func (p *defaultPool) Get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +type FileHook struct { + formatter logrus.Formatter + writer io.Writer + buf *bytes.Buffer + sync.Mutex +} + +func NewFileHook(formatter logrus.Formatter, writer io.WriteCloser) *FileHook { + fh := &FileHook{formatter: formatter, writer: writer, buf: &bytes.Buffer{}} + ticker := time.NewTicker(time.Second * 5) + go func() { + for { + select { + case <-ticker.C: + //can be optimized by tryLock + fh.Lock() + if fh.buf.Len() > 0 { + fh.flush() + } + fh.Unlock() + case <-exist: + fh.Lock() + fh.flush() + fh.Unlock() + writer.Close() + ticker.Stop() + close(finish) + return + } + } + }() + return fh +} + +func (f *FileHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +func (f *FileHook) Fire(entry *logrus.Entry) error { + if entry.Buffer == nil { + entry.Buffer = bufferPool.Get() + defer func() { + bufferPool.Put(entry.Buffer) + entry.Buffer = nil + }() + } + data, err := f.formatter.Format(entry) + if err != nil { + return err + } + f.Lock() + f.buf.Write(data) + if f.buf.Len() > 1024 || entry.Level == logrus.FatalLevel || entry.Level == logrus.PanicLevel { + err = f.flush() + } + f.Unlock() + return err +} + +func (f *FileHook) flush() error { + _, err := f.writer.Write(f.buf.Bytes()) + f.buf.Reset() + return err +} + +var once sync.Once + +func ConfigLog() { + once.Do(func() { + err := SetLevel(config.Conf.LogLevel) + if err != nil { + panic(err) + } + writer, err := rotatelogs.New( + filepath.Join(config.Conf.Log.Path, fmt.Sprintf("%skeeper_%d_%%Y%%m%%d.log", version.CUS_PROMPT, config.Conf.InstanceID)), + rotatelogs.WithRotationCount(config.Conf.Log.RotationCount), + rotatelogs.WithRotationTime(time.Hour*24), + rotatelogs.WithRotationSize(int64(config.Conf.Log.RotationSize)), + rotatelogs.WithReservedDiskSize(int64(config.Conf.Log.ReservedDiskSize)), + rotatelogs.WithRotateGlobPattern(filepath.Join(config.Conf.Log.Path, fmt.Sprintf("%skeeper_%d_*.log*", version.CUS_PROMPT, config.Conf.InstanceID))), + rotatelogs.WithCompress(config.Conf.Log.Compress), + rotatelogs.WithCleanLockFile(filepath.Join(config.Conf.Log.Path, fmt.Sprintf(".%skeeper_%d_rotate_lock", version.CUS_PROMPT, config.Conf.InstanceID))), + rotatelogs.ForceNewFile(), + rotatelogs.WithMaxAge(time.Hour*24*time.Duration(config.Conf.Log.KeepDays)), + ) + if err != nil { + panic(err) + } + fmt.Fprintln(writer, "==================================================") + fmt.Fprintln(writer, " new log file") + fmt.Fprintln(writer, "==================================================") + fmt.Fprintf(writer, "config:%+v\n", config.Conf) + + fmt.Fprintf(writer, "%-45s%v\n", "version", version.Version) + fmt.Fprintf(writer, "%-45s%v\n", "gitinfo", version.CommitID) + fmt.Fprintf(writer, "%-45s%v\n", "buildinfo", version.BuildInfo) + + hook := NewFileHook(globalLogFormatter, writer) + logger.AddHook(hook) + }) +} + +func SetLevel(level string) error { + l, err := logrus.ParseLevel(level) + if err != nil { + return err + } + logger.SetLevel(l) + return nil +} + +func GetLogger(model string) *logrus.Entry { + return logger.WithFields(logrus.Fields{config.ModelKey: model}) +} + +func init() { + logrus.SetBufferPool(bufferPool) + logger.SetFormatter(globalLogFormatter) + logger.SetOutput(os.Stdout) +} + +func randomID() string { + return fmt.Sprintf("%08d", os.Getpid()) +} + +type TaosLogFormatter struct { +} + +func (t *TaosLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + b.Reset() + b.WriteString(entry.Time.Format("01/02 15:04:05.000000")) + b.WriteByte(' ') + b.WriteString(ServerID) + b.WriteByte(' ') + v, exist := entry.Data[config.ModelKey] + if exist && v != nil { + b.WriteString(v.(string)) + b.WriteByte(' ') + } else { + b.WriteString("CLI ") + } + switch entry.Level { + case logrus.PanicLevel: + b.WriteString("PANIC ") + case logrus.FatalLevel: + b.WriteString("FATAL ") + case logrus.ErrorLevel: + b.WriteString("ERROR ") + case logrus.WarnLevel: + b.WriteString("WARN ") + case logrus.InfoLevel: + b.WriteString("INFO ") + case logrus.DebugLevel: + b.WriteString("DEBUG ") + case logrus.TraceLevel: + b.WriteString("TRACE ") + } + + // request id + v, exist = entry.Data[config.ReqIDKey] + if exist && v != nil { + b.WriteString(config.ReqIDKey) + b.WriteByte(':') + fmt.Fprintf(b, "0x%x ", v) + } + if len(entry.Message) > 0 && entry.Message[len(entry.Message)-1] == '\n' { + b.WriteString(entry.Message[:len(entry.Message)-1]) + } else { + b.WriteString(entry.Message) + } + // sort the keys + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + if k == config.ModelKey || k == config.ReqIDKey { + continue + } + keys = append(keys, k) + } + for _, k := range keys { + v := entry.Data[k] + if k == config.ReqIDKey && v == nil { + continue + } + b.WriteString(", ") + b.WriteString(k) + b.WriteByte(':') + fmt.Fprintf(b, "%v", v) + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func IsDebug() bool { + return logger.IsLevelEnabled(logrus.DebugLevel) +} + +func GetLogLevel() logrus.Level { + return logger.Level +} + +var zeroTime = time.Time{} +var zeroDuration = time.Duration(0) + +func GetLogNow(isDebug bool) time.Time { + if isDebug { + return time.Now() + } + return zeroTime +} +func GetLogDuration(isDebug bool, s time.Time) time.Duration { + if isDebug { + return time.Since(s) + } + return zeroDuration +} + +func Close(ctx context.Context) { + close(exist) + select { + case <-finish: + return + case <-ctx.Done(): + return + } +} diff --git a/tools/keeper/infrastructure/log/log_test.go b/tools/keeper/infrastructure/log/log_test.go new file mode 100644 index 00000000000..656cda4bbce --- /dev/null +++ b/tools/keeper/infrastructure/log/log_test.go @@ -0,0 +1,23 @@ +package log + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/infrastructure/config" +) + +func TestConfigLog(t *testing.T) { + config.InitConfig() + config.Conf.LogLevel = "debug" + ConfigLog() + debug, _ := logrus.ParseLevel("debug") + assert.Equal(t, logger.Level, debug) + assert.Equal(t, true, IsDebug()) + fmt.Print(GetLogNow(true), GetLogDuration(true, time.Now())) + Close(context.Background()) +} diff --git a/tools/keeper/infrastructure/log/web.go b/tools/keeper/infrastructure/log/web.go new file mode 100644 index 00000000000..4aa244448ba --- /dev/null +++ b/tools/keeper/infrastructure/log/web.go @@ -0,0 +1,55 @@ +package log + +import ( + "time" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/util" +) + +func GinLog() gin.HandlerFunc { + logger := GetLogger("WEB") + + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + logger := logger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + statusCode := c.Writer.Status() + + startTime := time.Now() + c.Next() + endTime := time.Now() + latencyTime := endTime.Sub(startTime) + reqMethod := c.Request.Method + reqUri := c.Request.RequestURI + + clientIP := c.ClientIP() + + if statusCode != 200 { + logger.Errorf("finish request, status_code:%3d, latency:%v, client_ip:%s, method:%s, uri:%s", statusCode, latencyTime, clientIP, reqMethod, reqUri) + return + } + logger.Infof("finish request, status_code:%3d, latency:%v, client_ip:%s, method:%s, uri:%s", statusCode, latencyTime, clientIP, reqMethod, reqUri) + } +} + +type recoverLog struct { + logger logrus.FieldLogger +} + +func (r *recoverLog) Write(p []byte) (n int, err error) { + r.logger.Errorln(string(p)) + return len(p), nil +} + +func GinRecoverLog() gin.HandlerFunc { + logger := GetLogger("WEB") + return func(c *gin.Context) { + writer := &recoverLog{logger: logger} + gin.RecoveryWithWriter(writer)(c) + } +} diff --git a/tools/keeper/main.go b/tools/keeper/main.go new file mode 100644 index 00000000000..5f7d47cb1cf --- /dev/null +++ b/tools/keeper/main.go @@ -0,0 +1,8 @@ +package main + +import "github.com/taosdata/taoskeeper/system" + +func main() { + r := system.Init() + system.Start(r) +} diff --git a/tools/keeper/monitor/collect.go b/tools/keeper/monitor/collect.go new file mode 100644 index 00000000000..652ae1f1ce7 --- /dev/null +++ b/tools/keeper/monitor/collect.go @@ -0,0 +1,99 @@ +package monitor + +import ( + "math" + "os" + "runtime" + + "github.com/shirou/gopsutil/v3/mem" + "github.com/shirou/gopsutil/v3/process" + "github.com/taosdata/taoskeeper/util" +) + +type SysCollector interface { + CpuPercent() (float64, error) + MemPercent() (float64, error) +} + +type NormalCollector struct { + p *process.Process +} + +func NewNormalCollector() (*NormalCollector, error) { + p, err := process.NewProcess(int32(os.Getpid())) + if err != nil { + return nil, err + } + return &NormalCollector{p: p}, nil +} + +func (n *NormalCollector) CpuPercent() (float64, error) { + cpuPercent, err := n.p.Percent(0) + if err != nil { + return 0, err + } + return cpuPercent / float64(runtime.NumCPU()), nil +} + +func (n *NormalCollector) MemPercent() (float64, error) { + memPercent, err := n.p.MemoryPercent() + if err != nil { + return 0, err + } + return float64(memPercent), nil +} + +const ( + CGroupCpuQuotaPath = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + CGroupCpuPeriodPath = "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + CGroupMemLimitPath = "/sys/fs/cgroup/memory/memory.limit_in_bytes" +) + +type CGroupCollector struct { + p *process.Process + cpuCore float64 + totalMemory uint64 +} + +func NewCGroupCollector() (*CGroupCollector, error) { + p, err := process.NewProcess(int32(os.Getpid())) + if err != nil { + return nil, err + } + cpuPeriod, err := util.ReadUint(CGroupCpuPeriodPath) + if err != nil { + return nil, err + } + cpuQuota, err := util.ReadUint(CGroupCpuQuotaPath) + if err != nil { + return nil, err + } + cpuCore := float64(cpuQuota) / float64(cpuPeriod) + limitMemory, err := util.ReadUint(CGroupMemLimitPath) + if err != nil { + return nil, err + } + machineMemory, err := mem.VirtualMemory() + if err != nil { + return nil, err + } + totalMemory := uint64(math.Min(float64(limitMemory), float64(machineMemory.Total))) + return &CGroupCollector{p: p, cpuCore: cpuCore, totalMemory: totalMemory}, nil +} + +func (c *CGroupCollector) CpuPercent() (float64, error) { + cpuPercent, err := c.p.Percent(0) + if err != nil { + return 0, err + } + cpuPercent = cpuPercent / c.cpuCore + return cpuPercent, nil +} + +func (c *CGroupCollector) MemPercent() (float64, error) { + memInfo, err := c.p.MemoryInfo() + if err != nil { + return 0, err + } + return 100 * float64(memInfo.RSS) / float64(c.totalMemory), nil +} diff --git a/tools/keeper/monitor/empty_test.go b/tools/keeper/monitor/empty_test.go new file mode 100644 index 00000000000..689acfac4c3 --- /dev/null +++ b/tools/keeper/monitor/empty_test.go @@ -0,0 +1,8 @@ +package monitor + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/monitor/monitor.go b/tools/keeper/monitor/monitor.go new file mode 100644 index 00000000000..6f3083e8660 --- /dev/null +++ b/tools/keeper/monitor/monitor.go @@ -0,0 +1,89 @@ +package monitor + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/taosdata/taoskeeper/api" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" + "github.com/taosdata/taoskeeper/util/pool" +) + +var logger = log.GetLogger("MON") + +func StartMonitor(identity string, conf *config.Config, reporter *api.Reporter) { + if len(identity) == 0 { + hostname, err := os.Hostname() + if err != nil { + logger.Errorf("can not get hostname, error:%s", err) + } + if len(hostname) > 40 { + hostname = hostname[:40] + } + identity = fmt.Sprintf("%s:%d", hostname, conf.Port) + } + + systemStatus := make(chan SysStatus) + _ = pool.GoroutinePool.Submit(func() { + var ( + cpuPercent float64 + memPercent float64 + totalReport int + ) + + for status := range systemStatus { + if status.CpuError == nil { + cpuPercent = status.CpuPercent + } + if status.MemError == nil { + memPercent = status.MemPercent + } + + totalResp := reporter.GetTotalRep() + for i := 0; i < 3; i++ { + totalReport = totalResp.Load().(int) + if totalResp.CompareAndSwap(totalReport, 0) { + break + } + logger.Warn("Reset keeper_monitor total resp via cas fail! Maybe to many concurrent ") + reporter.GetTotalRep().Store(0) + } + + var kn string + if len(identity) <= util.MAX_TABLE_NAME_LEN { + kn = util.ToValidTableName(identity) + } else { + kn = util.GetMd5HexStr(identity) + } + + sql := fmt.Sprintf("insert into `km_%s` using keeper_monitor tags ('%s') values ( now, "+ + " %f, %f, %d)", kn, identity, cpuPercent, memPercent, totalReport) + conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, + conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl) + if err != nil { + logger.Errorf("connect to database error, msg:%s", err) + return + } + + ctx := context.Background() + if _, err = conn.Exec(ctx, sql, util.GetQidOwn()); err != nil { + logger.Errorf("execute sql:%s, error:%s", sql, err) + } + + if err := conn.Close(); err != nil { + logger.Errorf("close connection error, msg:%s", err) + } + } + }) + SysMonitor.Register(systemStatus) + interval, err := time.ParseDuration(conf.RotationInterval) + if err != nil { + panic(err) + } + Start(interval, conf.Env.InCGroup) +} diff --git a/tools/keeper/monitor/monitor_test.go b/tools/keeper/monitor/monitor_test.go new file mode 100644 index 00000000000..b2b860dcaa9 --- /dev/null +++ b/tools/keeper/monitor/monitor_test.go @@ -0,0 +1,58 @@ +package monitor + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/go-utils/web" + "github.com/taosdata/taoskeeper/api" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/util" + + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" +) + +func TestStart(t *testing.T) { + conf := config.InitConfig() + if conf == nil { + panic("config error") + } + conf.Env.InCGroup = true + cpuCgroupDir := "/sys/fs/cgroup/cpu" + if _, err := os.Stat(cpuCgroupDir); os.IsNotExist(err) { + conf.Env.InCGroup = false + } + log.ConfigLog() + router := web.CreateRouter(false, &conf.Cors, false) + conf.Metrics.Database.Name = "monitor" + reporter := api.NewReporter(conf) + reporter.Init(router) + conf.RotationInterval = "1s" + StartMonitor("", conf, reporter) + time.Sleep(2 * time.Second) + for k, _ := range SysMonitor.outputs { + SysMonitor.Deregister(k) + } + + conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl) + assert.NoError(t, err) + conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn()) + +} + +func TestParseUint(t *testing.T) { + num, err := util.ParseUint("-1", 10, 8) + assert.Equal(t, nil, err) + assert.Equal(t, uint64(0), num) + num, err = util.ParseUint("0", 10, 8) + assert.Equal(t, nil, err) + assert.Equal(t, uint64(0), num) + num, err = util.ParseUint("257", 10, 8) + assert.Equal(t, "strconv.ParseUint: parsing \"257\": value out of range", err.Error()) + assert.Equal(t, uint64(0), num) +} diff --git a/tools/keeper/monitor/system.go b/tools/keeper/monitor/system.go new file mode 100644 index 00000000000..7d5ef5bd549 --- /dev/null +++ b/tools/keeper/monitor/system.go @@ -0,0 +1,97 @@ +package monitor + +import ( + "math" + "runtime" + "sync" + "time" + + "github.com/taosdata/taoskeeper/util/pool" +) + +type SysStatus struct { + CollectTime time.Time + CpuPercent float64 + CpuError error + MemPercent float64 + MemError error + GoroutineCounts int + ThreadCounts int +} + +type sysMonitor struct { + sync.Mutex + collectDuration time.Duration + collector SysCollector + status *SysStatus + outputs map[chan<- SysStatus]struct{} + ticker *time.Ticker +} + +func (s *sysMonitor) collect() { + s.status.CollectTime = time.Now() + s.status.CpuPercent, s.status.CpuError = s.collector.CpuPercent() + s.status.MemPercent, s.status.MemError = s.collector.MemPercent() + s.status.GoroutineCounts = runtime.NumGoroutine() + s.status.ThreadCounts, _ = runtime.ThreadCreateProfile(nil) + // skip when inf or nan + if math.IsInf(s.status.CpuPercent, 0) || math.IsNaN(s.status.CpuPercent) || + math.IsInf(s.status.MemPercent, 0) || math.IsNaN(s.status.MemPercent) { + return + } + + s.Lock() + for output := range s.outputs { + select { + case output <- *s.status: + default: + } + } + s.Unlock() +} + +func (s *sysMonitor) Register(c chan<- SysStatus) { + s.Lock() + if s.outputs == nil { + s.outputs = map[chan<- SysStatus]struct{}{ + c: {}, + } + } else { + s.outputs[c] = struct{}{} + } + s.Unlock() +} + +func (s *sysMonitor) Deregister(c chan<- SysStatus) { + s.Lock() + if s.outputs != nil { + delete(s.outputs, c) + } + s.Unlock() +} + +var SysMonitor = &sysMonitor{status: &SysStatus{}} + +func Start(collectDuration time.Duration, inCGroup bool) { + SysMonitor.collectDuration = collectDuration + if inCGroup { + collector, err := NewCGroupCollector() + if err != nil { + logger.Errorf("new normal group controller error, msg:%s", err) + } + SysMonitor.collector = collector + } else { + collector, err := NewNormalCollector() + if err != nil { + logger.Errorf("new normal controller error, msg:%s", err) + } + SysMonitor.collector = collector + } + SysMonitor.collect() + SysMonitor.ticker = time.NewTicker(SysMonitor.collectDuration) + pool.GoroutinePool.Submit(func() { + for range SysMonitor.ticker.C { + SysMonitor.collect() + } + }) +} diff --git a/tools/keeper/process/builder.go b/tools/keeper/process/builder.go new file mode 100644 index 00000000000..d6e37534bf9 --- /dev/null +++ b/tools/keeper/process/builder.go @@ -0,0 +1,55 @@ +package process + +import ( + "context" + "fmt" + + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var builderLogger = log.GetLogger("BLD") + +func ExpandMetricsFromConfig(ctx context.Context, conn *db.Connector, cfg *config.MetricsConfig) (tables map[string]struct{}, err error) { + tables = make(map[string]struct{}) + for _, name := range cfg.Tables { + builderLogger.Debug("normal table: ", name) + + _, exist := tables[name] + if exist { + builderLogger.Debug(name, "is exist in config") + continue + } + tables[name] = struct{}{} + } + + sql := fmt.Sprintf(GetStableNameListSql(), cfg.Database.Name) + data, err := conn.Query(ctx, sql, util.GetQidOwn()) + if err != nil { + return nil, err + } + builderLogger.Debugf("show stables:%s", sql) + + for _, info := range data.Data { + name := info[0].(string) + builderLogger.Debug("stable:", info) + + _, exist := tables[name] + if exist { + builderLogger.Debug(name, "is exist in config") + continue + } + tables[name] = struct{}{} + } + return +} + +func GetStableNameListSql() string { + return "select stable_name from information_schema.ins_stables " + + " where db_name = '%s' " + + " and (stable_name not like 'taosx\\_%%')" + + " and (stable_name not like 'taosadapter%%')" + + " and (stable_name != 'temp_dir' and stable_name != 'data_dir')" +} diff --git a/tools/keeper/process/handle.go b/tools/keeper/process/handle.go new file mode 100644 index 00000000000..980902daca3 --- /dev/null +++ b/tools/keeper/process/handle.go @@ -0,0 +1,666 @@ +package process + +import ( + "context" + "errors" + "fmt" + "math" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + taosError "github.com/taosdata/driver-go/v3/errors" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" + "github.com/taosdata/taoskeeper/util/pool" +) + +var logger = log.GetLogger("HND") + +var metricNameMap = map[string]string{ + "taosd_cluster_basic_first_ep": "cluster_info_first_ep", + "taosd_cluster_basic_first_ep_dnode_id": "cluster_info_first_ep_dnode_id", + "taosd_cluster_basic_cluster_version": "cluster_info_version", + + "taosd_cluster_info_cluster_uptime": "cluster_info_master_uptime", + "taosd_cluster_info_dbs_total": "cluster_info_dbs_total", + "taosd_cluster_info_tbs_total": "cluster_info_tbs_total", + "taosd_cluster_info_stbs_total": "cluster_info_stbs_total", + "taosd_cluster_info_dnodes_total": "cluster_info_dnodes_total", + "taosd_cluster_info_dnodes_alive": "cluster_info_dnodes_alive", + "taosd_cluster_info_mnodes_total": "cluster_info_mnodes_total", + "taosd_cluster_info_mnodes_alive": "cluster_info_mnodes_alive", + "taosd_cluster_info_vgroups_total": "cluster_info_vgroups_total", + "taosd_cluster_info_vgroups_alive": "cluster_info_vgroups_alive", + "taosd_cluster_info_vnodes_total": "cluster_info_vnodes_total", + "taosd_cluster_info_vnodes_alive": "cluster_info_vnodes_alive", + "taosd_cluster_info_connections_total": "cluster_info_connections_total", + "taosd_cluster_info_topics_total": "cluster_info_topics_total", + "taosd_cluster_info_streams_total": "cluster_info_streams_total", + + "taosd_cluster_info_grants_expire_time": "grants_info_expire_time", + "taosd_cluster_info_grants_timeseries_used": "grants_info_timeseries_used", + "taosd_cluster_info_grants_timeseries_total": "grants_info_timeseries_total", + + "taosd_dnodes_info_uptime": "dnodes_info_uptime", + "taosd_dnodes_info_cpu_engine": "dnodes_info_cpu_engine", + "taosd_dnodes_info_cpu_system": "dnodes_info_cpu_system", + "taosd_dnodes_info_cpu_cores": "dnodes_info_cpu_cores", + "taosd_dnodes_info_mem_engine": "dnodes_info_mem_engine", + "taosd_dnodes_info_mem_free": "dnodes_info_mem_system", + "taosd_dnodes_info_mem_total": "dnodes_info_mem_total", + "taosd_dnodes_info_disk_engine": "dnodes_info_disk_engine", + "taosd_dnodes_info_disk_used": "dnodes_info_disk_used", + "taosd_dnodes_info_disk_total": "dnodes_info_disk_total", + "taosd_dnodes_info_system_net_in": "dnodes_info_net_in", + "taosd_dnodes_info_system_net_out": "dnodes_info_net_out", + "taosd_dnodes_info_io_read": "dnodes_info_io_read", + "taosd_dnodes_info_io_write": "dnodes_info_io_write", + "taosd_dnodes_info_io_read_disk": "dnodes_info_io_read_disk", + "taosd_dnodes_info_io_write_disk": "dnodes_info_io_write_disk", + "taosd_dnodes_info_vnodes_num": "dnodes_info_vnodes_num", + "taosd_dnodes_info_masters": "dnodes_info_masters", + "taosd_dnodes_info_has_mnode": "dnodes_info_has_mnode", + "taosd_dnodes_info_has_qnode": "dnodes_info_has_qnode", + "taosd_dnodes_info_has_snode": "dnodes_info_has_snode", + "taosd_dnodes_info_has_bnode": "dnodes_info_has_bnode", + "taosd_dnodes_info_errors": "dnodes_info_errors", + "taosd_dnodes_info_error_log_count": "log_summary_error", + "taosd_dnodes_info_info_log_count": "log_summary_info", + "taosd_dnodes_info_debug_log_count": "log_summary_debug", + "taosd_dnodes_info_trace_log_count": "log_summary_trace", + + "taosd_dnodes_status_status": "d_info_status", + + "taosd_mnodes_info_role": "m_info_role", +} + +var metricTypeMap = map[string]CollectType{ + "taosd_cluster_basic_first_ep": Info, + "taosd_cluster_basic_first_ep_dnode_id": Counter, + "taosd_cluster_basic_cluster_version": Info, + + "taosd_cluster_info_cluster_uptime": Gauge, + "taosd_cluster_info_dbs_total": Counter, + "taosd_cluster_info_tbs_total": Counter, + "taosd_cluster_info_stbs_total": Counter, + "taosd_cluster_info_dnodes_total": Counter, + "taosd_cluster_info_dnodes_alive": Counter, + "taosd_cluster_info_mnodes_total": Counter, + "taosd_cluster_info_mnodes_alive": Counter, + "taosd_cluster_info_vgroups_total": Counter, + "taosd_cluster_info_vgroups_alive": Counter, + "taosd_cluster_info_vnodes_total": Counter, + "taosd_cluster_info_vnodes_alive": Counter, + "taosd_cluster_info_connections_total": Counter, + "taosd_cluster_info_topics_total": Counter, + "taosd_cluster_info_streams_total": Counter, + + "taosd_cluster_info_grants_expire_time": Counter, + "taosd_cluster_info_grants_timeseries_used": Counter, + "taosd_cluster_info_grants_timeseries_total": Counter, + + "taosd_dnodes_info_uptime": Gauge, + "taosd_dnodes_info_cpu_engine": Gauge, + "taosd_dnodes_info_cpu_system": Gauge, + "taosd_dnodes_info_cpu_cores": Gauge, + "taosd_dnodes_info_mem_engine": Counter, + "taosd_dnodes_info_mem_free": Counter, + "taosd_dnodes_info_mem_total": Counter, + "taosd_dnodes_info_disk_engine": Counter, + "taosd_dnodes_info_disk_used": Counter, + "taosd_dnodes_info_disk_total": Counter, + "taosd_dnodes_info_system_net_in": Gauge, + "taosd_dnodes_info_system_net_out": Gauge, + "taosd_dnodes_info_io_read": Gauge, + "taosd_dnodes_info_io_write": Gauge, + "taosd_dnodes_info_io_read_disk": Gauge, + "taosd_dnodes_info_io_write_disk": Gauge, + "taosd_dnodes_info_vnodes_num": Counter, + "taosd_dnodes_info_masters": Counter, + "taosd_dnodes_info_has_mnode": Counter, + "taosd_dnodes_info_has_qnode": Counter, + "taosd_dnodes_info_has_snode": Counter, + "taosd_dnodes_info_has_bnode": Counter, + "taosd_dnodes_info_errors": Counter, + "taosd_dnodes_info_error_log_count": Counter, + "taosd_dnodes_info_info_log_count": Counter, + "taosd_dnodes_info_debug_log_count": Counter, + "taosd_dnodes_info_trace_log_count": Counter, + + "taosd_dnodes_status_status": Info, + + "taosd_mnodes_info_role": Info, +} + +type CollectType string + +const ( + Counter CollectType = "counter" + Gauge CollectType = "gauge" + Info CollectType = "info" + Summary CollectType = "summary" +) + +type Processor struct { + prefix string + db string + tableMap map[string]*Table //tableName:*Table{} + metricMap map[string]*Metric //Fqname:*Metric{} + tableList []string + ctx context.Context + rotationInterval time.Duration + exitChan chan struct{} + dbConn *db.Connector + summaryTable map[string]*Table + tables map[string]struct{} +} + +func (p *Processor) Describe(descs chan<- *prometheus.Desc) { + for _, metric := range p.metricMap { + descs <- metric.Desc + } +} + +func (p *Processor) Collect(metrics chan<- prometheus.Metric) { + for _, metric := range p.metricMap { + logger.Tracef("metric name:%v", metric.FQName) + + switch metric.Type { + case Gauge: + gv := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: metric.FQName, + Help: metric.Help, + ConstLabels: metric.ConstLabels, + }, metric.Variables) + for _, value := range metric.GetValue() { + if value.Value == nil { + continue + } + g := gv.With(value.Label) + g.Set(value.Value.(float64)) + metrics <- g + } + case Counter: + cv := prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: metric.FQName, + Help: metric.Help, + ConstLabels: metric.ConstLabels, + }, metric.Variables) + for _, value := range metric.GetValue() { + if value.Value == nil { + continue + } + v := i2float(value.Value) + if v < 0 { + logger.Warningf("negative value for prometheus counter. label %v value %v", + value.Label, value.Value) + continue + } + c := cv.With(value.Label) + c.Add(v) + metrics <- c + } + case Info: + lbs := []string{"value"} + lbs = append(lbs, metric.Variables...) + gf := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: metric.FQName, + Help: metric.Help, + ConstLabels: metric.ConstLabels, + }, lbs) + for _, value := range metric.GetValue() { + if value == nil { + continue + } + v := make(map[string]string, len(value.Label)+1) + v["value"] = value.Value.(string) + for k, l := range value.Label { + v[k] = l + } + g := gf.With(v) + g.Set(1) + metrics <- g + } + case Summary: + } + } +} + +type Table struct { + tsName string + Variables []string + ColumnList []string +} + +type Metric struct { + sync.RWMutex + FQName string + Help string + Type CollectType + ColType int + ConstLabels map[string]string + Variables []string + Desc *prometheus.Desc + LastValue []*Value +} + +func (m *Metric) SetValue(v []*Value) { + m.Lock() + defer m.Unlock() + m.LastValue = v +} + +func (m *Metric) GetValue() []*Value { + m.RLock() + defer m.RUnlock() + return m.LastValue +} + +type Value struct { + Label map[string]string + Value interface{} +} + +func NewProcessor(conf *config.Config) *Processor { + + conn, err := db.NewConnector(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl) + if err != nil { + panic(err) + } + interval, err := time.ParseDuration(conf.RotationInterval) + if err != nil { + panic(err) + } + ctx := context.Background() + tables, err := ExpandMetricsFromConfig(ctx, conn, &conf.Metrics) + if err != nil { + panic(err) + } + p := &Processor{ + prefix: conf.Metrics.Prefix, + db: conf.Metrics.Database.Name, + tableMap: map[string]*Table{}, + metricMap: map[string]*Metric{}, + ctx: ctx, + rotationInterval: interval, + exitChan: make(chan struct{}), + dbConn: conn, + summaryTable: map[string]*Table{"taosadapter_restful_http_request_summary_milliseconds": nil}, + tables: tables, + } + p.Prepare() + p.Process() + return p +} + +func (p *Processor) Prepare() { + locker := sync.RWMutex{} + wg := sync.WaitGroup{} + wg.Add(len(p.tables)) + + for tn := range p.tables { + tableName := tn + + err := pool.GoroutinePool.Submit(func() { + defer wg.Done() + data, err := p.dbConn.Query(p.ctx, fmt.Sprintf("describe %s", p.withDBName(tableName)), util.GetQidOwn()) + if err != nil { + var tdEngineError *taosError.TaosError + if errors.As(err, &tdEngineError) { + logger.Errorf("table %s not exist, skip it, error:%s", tableName, err) + } else { + logger.Errorf("could not get table %s metadata, skip it, error:%s", tableName, err) + } + return + } + + tags := make([]string, 0, len(data.Data)) + columns := make([]string, 0, len(data.Data)) + typeList := make([]string, 0, len(data.Data)) + columnMap := make(map[string]struct{}, len(data.Data)) + variablesMap := make(map[string]struct{}, len(data.Data)) + for _, info := range data.Data { + if info[3].(string) != "" { + variable := info[0].(string) + tags = append(tags, variable) + variablesMap[variable] = struct{}{} + } else { + column := info[0].(string) + columns = append(columns, column) + typeList = append(typeList, info[1].(string)) + columnMap[column] = struct{}{} + } + } + + // metrics := make([]*Metric, 0, len(columns)) + // newMetrics := make(map[string]*Metric, len(columns)) + columnList := make([]string, 0, len(columns)) + + timestampColumn := "ts" + _, exist := p.summaryTable[tableName] + for i, column := range columns { + if _, columnExist := variablesMap[column]; columnExist { + continue + } + + if typeList[i] == "TIMESTAMP" { + timestampColumn = column + continue + } + + columnName, metricType := "", Summary + if !exist { + columnName = column + + if _, ok := metricTypeMap[tableName+"_"+columnName]; ok { + metricType = metricTypeMap[tableName+"_"+columnName] + } else { + metricType = exchangeDBType(typeList[i]) + } + + // 为了兼容性,硬编码,后续要优化 + if strings.HasSuffix(columnName, "role") { + metricType = Info + } + } + + labels := make(map[string]string) + + fqName := p.buildFQName(tableName, columnName) + pDesc := prometheus.NewDesc(fqName, "", nil, labels) + metric := &Metric{ + Type: metricType, + Desc: pDesc, + FQName: fqName, + Help: "", + ConstLabels: labels, + Variables: tags, + } + // metrics = append(metrics, metric) + // newMetrics[column] = metric + + locker.Lock() + p.metricMap[fqName] = metric + locker.Unlock() + + columnList = append(columnList, column) + } + + t := &Table{ + tsName: timestampColumn, + Variables: tags, + ColumnList: columnList, + } + locker.Lock() + p.tableMap[tableName] = t + p.tableList = append(p.tableList, tableName) + locker.Unlock() + + }) + if err != nil { + panic(err) + } + } + + wg.Wait() +} + +func (p *Processor) withDBName(tableName string) string { + b := pool.BytesPoolGet() + b.WriteString(p.db) + b.WriteByte('.') + b.WriteString(tableName) + return b.String() +} + +func (p *Processor) Process() { + // 首先清空所有指标值 + for _, metric := range p.metricMap { + metric.SetValue(nil) + } + + for _, tableName := range p.tableList { + tagIndex := 0 + hasTag := false + b := pool.BytesPoolGet() + b.WriteString("select ") + + table := p.tableMap[tableName] + columns := table.ColumnList + + for i, column := range columns { + b.WriteString("last_row(`" + column + "`) as `" + column + "`") + if i != len(columns)-1 { + b.WriteByte(',') + } + } + + if len(table.Variables) > 0 { + tagIndex = len(columns) + for _, tag := range table.Variables { + b.WriteString(", last_row(`" + tag + "`) as `" + tag + "`") + } + } + + b.WriteString(" from ") + b.WriteString(p.withDBName(tableName)) + + b.WriteString(" WHERE " + p.tableMap[tableName].tsName + " > (NOW() - 1m) ") + + if len(table.Variables) > 0 { + tagIndex = len(columns) + b.WriteString(" group by ") + for i, tag := range table.Variables { + b.WriteString("`" + tag + "`") + if i != len(table.Variables)-1 { + b.WriteByte(',') + } + } + } + sql := b.String() + pool.BytesPoolPut(b) + data, err := p.dbConn.Query(p.ctx, sql, util.GetQidOwn()) + logger.Debug(sql) + if err != nil { + logger.WithError(err).Errorln("select data sql:", sql) + continue + } + if tagIndex > 0 { + hasTag = true + } + if len(data.Data) == 0 { + continue + } + values := make([][]*Value, len(table.ColumnList)) + for _, row := range data.Data { + label := map[string]string{} + valuesMap := make(map[string]interface{}) + colEndIndex := len(columns) + if hasTag { + for i := tagIndex; i < len(data.Head); i++ { + if row[i] != nil { + label[data.Head[i]] = fmt.Sprintf("%v", row[i]) + } + } + } + // values array to map + for i := 0; i < colEndIndex; i++ { + valuesMap[columns[i]] = row[i] + } + for i, column := range table.ColumnList { + var v interface{} + metric := p.metricMap[p.buildFQName(tableName, column)] + switch metric.Type { + case Info: + _, isFloat := valuesMap[column].(float64) + if strings.HasSuffix(column, "role") && valuesMap[column] != nil && isFloat { + v = getRoleStr(valuesMap[column].(float64)) + break + } + if strings.HasSuffix(column, "status") && valuesMap[column] != nil && isFloat { + v = getStatusStr(valuesMap[column].(float64)) + break + } + + if valuesMap[column] != nil { + v = i2string(valuesMap[column]) + } else { + v = nil + } + case Counter, Gauge, Summary: + if valuesMap[column] != nil { + v = i2float(valuesMap[column]) + if column == "cluster_uptime" { + v = i2float(valuesMap[column]) / 86400 + } + } else { + v = nil + } + } + values[i] = append(values[i], &Value{ + Label: label, + Value: v, + }) + } + } + + for i, column := range table.ColumnList { + metric := p.metricMap[p.buildFQName(tableName, column)] + for _, value := range values[i] { + logger.Tracef("set metric:%s, Label:%v, Value:%v", column, value.Label, value.Value) + } + if metric.GetValue() != nil { + values[i] = append(values[i], metric.GetValue()...) + } + metric.SetValue(values[i]) + } + } +} + +func (p *Processor) buildFQName(tableName string, column string) string { + + // keep same metric name + tempFQName := tableName + "_" + column + if _, ok := metricNameMap[tempFQName]; ok { + return p.prefix + "_" + metricNameMap[tempFQName] + } + + b := pool.BytesPoolGet() + b.WriteString(p.prefix) + b.WriteByte('_') + + b.WriteString(tableName) + + if column != "" { + b.WriteByte('_') + b.WriteString(column) + } + + fqName := b.String() + pool.BytesPoolPut(b) + + return fqName +} + +func (p *Processor) GetMetric() map[string]*Metric { + return p.metricMap +} + +func (p *Processor) Close() error { + close(p.exitChan) + return p.dbConn.Close() +} + +func getRoleStr(v float64) string { + rounded := math.Round(v) + integer := int(rounded) + + switch integer { + case 0: + return "offline" + case 100: + return "follower" + case 101: + return "candidate" + case 102: + return "leader" + case 103: + return "error" + case 104: + return "learner" + } + return "unknown" +} + +func getStatusStr(v float64) string { + rounded := math.Round(v) + integer := int(rounded) + + switch integer { + case 0: + return "offline" + case 1: + return "ready" + } + return "unknown" +} + +func exchangeDBType(t string) CollectType { + switch t { + case "BOOL", "FLOAT", "DOUBLE": + return Gauge + case "TINYINT", "SMALLINT", "INT", "BIGINT", "TINYINT UNSIGNED", "SMALLINT UNSIGNED", "INT UNSIGNED", "BIGINT UNSIGNED": + return Counter + case "BINARY", "NCHAR", "VARCHAR": + return Info + default: + panic("unsupported type") + } +} + +func i2string(value interface{}) string { + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + panic("unexpected type to string") + } +} + +func i2float(value interface{}) float64 { + switch v := value.(type) { + case int8: + return float64(v) + case int16: + return float64(v) + case int32: + return float64(v) + case int64: + return float64(v) + case uint8: + return float64(v) + case uint16: + return float64(v) + case uint32: + return float64(v) + case uint64: + return float64(v) + case float64: + return v + case float32: + return float64(v) + case bool: + if v { + return 1 + } + return 0 + default: + panic("unexpected type to float64") + } +} diff --git a/tools/keeper/process/handle_test.go b/tools/keeper/process/handle_test.go new file mode 100644 index 00000000000..bfd5c369b4c --- /dev/null +++ b/tools/keeper/process/handle_test.go @@ -0,0 +1,121 @@ +package process + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_i2string(t *testing.T) { + tests := []struct { + value interface{} + expected string + }{ + {"abc", "abc"}, + {"abcdef", "abcdef"}, + {[]byte{97, 98, 99, 100, 101, 102}, "abcdef"}, + } + + for _, tt := range tests { + res := i2string(tt.value) + assert.Equal(t, tt.expected, res) + } +} + +func Test_i2string_panic(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Errorf("Expected panic for unexpected type, but did not panic") + } + }() + + i2string(12345) +} + +func Test_i2float(t *testing.T) { + tests := []struct { + value interface{} + expected float64 + }{ + {int8(1), 1.0}, + {int16(1), 1.0}, + {int32(1), 1.0}, + {int64(1), 1.0}, + {uint8(1), 1.0}, + {uint16(1), 1.0}, + {uint32(1), 1.0}, + {uint64(1), 1.0}, + {float32(1.5), 1.5}, + {float64(1.5), 1.5}, + {true, 1.0}, + {false, 0.0}, + } + + for _, tt := range tests { + res := i2float(tt.value) + assert.Equal(t, tt.expected, res) + } +} + +func Test_i2float_panic(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Errorf("Expected panic for unexpected type, but did not panic") + } + }() + + i2float("unexpected type") +} + +func Test_getRoleStr(t *testing.T) { + tests := []struct { + value float64 + expected string + }{ + {0, "offline"}, + {99.5, "follower"}, + {100, "follower"}, + {100.4, "follower"}, + {100.5, "candidate"}, + {101, "candidate"}, + {101.4, "candidate"}, + {101.5, "leader"}, + {102, "leader"}, + {102.4, "leader"}, + {102.5, "error"}, + {103, "error"}, + {104, "learner"}, + {99.4, "unknown"}, + {105, "unknown"}, + {-1, "unknown"}, + {150, "unknown"}, + } + + for _, tt := range tests { + res := getRoleStr(tt.value) + assert.Equal(t, tt.expected, res) + } +} + +func Test_getStatusStr(t *testing.T) { + tests := []struct { + value float64 + expected string + }{ + {-0.4, "offline"}, + {0, "offline"}, + {0.4, "offline"}, + {0.5, "ready"}, + {1, "ready"}, + {1.4, "ready"}, + {1.5, "unknown"}, + {2, "unknown"}, + {-0.5, "unknown"}, + {-1, "unknown"}, + } + + for _, tt := range tests { + res := getStatusStr(tt.value) + assert.Equal(t, tt.expected, res) + } +} diff --git a/tools/keeper/prometheus/prometheus.yml b/tools/keeper/prometheus/prometheus.yml new file mode 100644 index 00000000000..397d566d918 --- /dev/null +++ b/tools/keeper/prometheus/prometheus.yml @@ -0,0 +1,13 @@ +global: + scrape_interval: 5s + +scrape_configs: + - job_name: "prometheus" + static_configs: + - targets: ["localhost:9090"] + - job_name: "taoskeeper" + static_configs: + - targets: ["taoskeeper:6043"] + - job_name: "node" + static_configs: + - targets: ["nodeexporter:9100"] diff --git a/tools/keeper/system/empty_test.go b/tools/keeper/system/empty_test.go new file mode 100644 index 00000000000..a4d4777d329 --- /dev/null +++ b/tools/keeper/system/empty_test.go @@ -0,0 +1,8 @@ +package system + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/system/program.go b/tools/keeper/system/program.go new file mode 100644 index 00000000000..b8f1d8943fe --- /dev/null +++ b/tools/keeper/system/program.go @@ -0,0 +1,146 @@ +package system + +import ( + "context" + "fmt" + "net/http" + "os" + "strconv" + "time" + + "github.com/kardianos/service" + "github.com/taosdata/go-utils/web" + "github.com/taosdata/taoskeeper/api" + "github.com/taosdata/taoskeeper/cmd" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/monitor" + "github.com/taosdata/taoskeeper/process" + "github.com/taosdata/taoskeeper/version" +) + +var logger = log.GetLogger("PRG") + +func Init() *http.Server { + conf := config.InitConfig() + log.ConfigLog() + + if len(conf.Transfer) > 0 || len(conf.Drop) > 0 { + cmd := cmd.NewCommand(conf) + cmd.Process(conf) + os.Exit(0) + return nil + } + + router := web.CreateRouter(false, &conf.Cors, false) + router.Use(log.GinLog()) + router.Use(log.GinRecoverLog()) + + reporter := api.NewReporter(conf) + reporter.Init(router) + monitor.StartMonitor(conf.Metrics.Cluster, conf, reporter) + + go func() { + // wait for monitor to all metric received + time.Sleep(time.Second * 35) + + processor := process.NewProcessor(conf) + node := api.NewNodeExporter(processor) + node.Init(router) + + if version.IsEnterprise == "true" { + zabbix := api.NewZabbix(processor) + zabbix.Init(router) + } + }() + + checkHealth := api.NewCheckHealth(version.Version) + checkHealth.Init(router) + + if version.IsEnterprise == "true" { + if conf.Audit.Enable { + audit, err := api.NewAudit(conf) + if err != nil { + panic(err) + } + if err = audit.Init(router); err != nil { + panic(err) + } + } + } + + adapter := api.NewAdapter(conf) + if err := adapter.Init(router); err != nil { + panic(err) + } + + gen_metric := api.NewGeneralMetric(conf) + if err := gen_metric.Init(router); err != nil { + panic(err) + } + + server := &http.Server{ + Addr: ":" + strconv.Itoa(conf.Port), + Handler: router, + } + + return server +} + +func Start(server *http.Server) { + prg := newProgram(server) + svcConfig := &service.Config{ + Name: "taoskeeper", + DisplayName: "taoskeeper", + Description: "taosKeeper is a tool for TDengine that exports monitoring metrics", + } + s, err := service.New(prg, svcConfig) + if err != nil { + logger.Fatal(err) + } + err = s.Run() + if err != nil { + logger.Fatal(err) + } +} + +type program struct { + server *http.Server +} + +func newProgram(server *http.Server) *program { + return &program{server: server} +} + +func (p *program) Start(s service.Service) error { + if service.Interactive() { + logger.Info("Running in terminal.") + } else { + logger.Info("Running under service manager.") + } + + server := p.server + go func() { + if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + panic(fmt.Errorf("taoskeeper start up fail! %v", err)) + } + }() + return nil +} + +func (p *program) Stop(s service.Service) error { + logger.Println("Shutdown WebServer ...") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if err := p.server.Shutdown(ctx); err != nil { + logger.Println("WebServer Shutdown error:", err) + } + + logger.Println("Server exiting") + ctxLog, cancelLog := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelLog() + logger.Println("Flushing Log") + log.Close(ctxLog) + return nil +} diff --git a/tools/keeper/system/program_test.go b/tools/keeper/system/program_test.go new file mode 100644 index 00000000000..63fbb630141 --- /dev/null +++ b/tools/keeper/system/program_test.go @@ -0,0 +1,45 @@ +package system + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" + + "github.com/kardianos/service" + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/util" +) + +func TestInit(t *testing.T) { + server := Init() + assert.NotNil(t, server) + + conn, err := db.NewConnectorWithDb(config.Conf.TDengine.Username, config.Conf.TDengine.Password, config.Conf.TDengine.Host, config.Conf.TDengine.Port, config.Conf.Metrics.Database.Name, config.Conf.TDengine.Usessl) + assert.NoError(t, err) + conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", config.Conf.Metrics.Database.Name), util.GetQidOwn()) + conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", config.Conf.Audit.Database.Name), util.GetQidOwn()) +} + +func Test_program(t *testing.T) { + server := &http.Server{} + prg := newProgram(server) + svcConfig := &service.Config{ + Name: "taoskeeper", + DisplayName: "taoskeeper", + Description: "taosKeeper is a tool for TDengine that exports monitoring metrics", + } + svc, err := service.New(prg, svcConfig) + assert.NoError(t, err) + + err = prg.Start(svc) + assert.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + err = prg.Stop(svc) + assert.NoError(t, err) +} diff --git a/tools/keeper/taoskeeper.service b/tools/keeper/taoskeeper.service new file mode 100644 index 00000000000..d8478bc59bd --- /dev/null +++ b/tools/keeper/taoskeeper.service @@ -0,0 +1,19 @@ +[Unit] +Description=TaosKeeper - TDengine Metrics Exporter for Kinds of Collectors +Documentation=https://www.taosdata.com +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/bin/taoskeeper +TimeoutSec=0 +RestartSec=2 +StandardOutput=null +StandardError=journal +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/tools/keeper/telegraf.conf b/tools/keeper/telegraf.conf new file mode 100644 index 00000000000..aa2e8e0b350 --- /dev/null +++ b/tools/keeper/telegraf.conf @@ -0,0 +1,6 @@ +[[inputs.prometheus]] +# An array of urls to scrape metrics from. +urls = ["${TAOSKEEPER}"] + +[[outputs.file]] +files = ["stdout"] diff --git a/tools/keeper/telegraf.yml b/tools/keeper/telegraf.yml new file mode 100644 index 00000000000..a02e9f669b5 --- /dev/null +++ b/tools/keeper/telegraf.yml @@ -0,0 +1,9 @@ +version: "3.6" +services: + telegraf: + image: telegraf:1.20-alpine + hostname: telegraf + volumes: + - ./telegraf.conf:/etc/telegraf/telegraf.conf:ro + environment: + TAOSKEEPER: http://taoskeeper:6043/metrics diff --git a/tools/keeper/util/empty_test.go b/tools/keeper/util/empty_test.go new file mode 100644 index 00000000000..5d828667217 --- /dev/null +++ b/tools/keeper/util/empty_test.go @@ -0,0 +1,8 @@ +package util + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/util/pool/antpool.go b/tools/keeper/util/pool/antpool.go new file mode 100644 index 00000000000..7a4ecd46dea --- /dev/null +++ b/tools/keeper/util/pool/antpool.go @@ -0,0 +1,15 @@ +package pool + +import ( + "github.com/panjf2000/ants/v2" +) + +var GoroutinePool *ants.Pool + +func Init(size int) { + var err error + GoroutinePool, err = ants.NewPool(size) + if err != nil { + panic(err) + } +} diff --git a/tools/keeper/util/pool/bytes.go b/tools/keeper/util/pool/bytes.go new file mode 100644 index 00000000000..0fc44f77b87 --- /dev/null +++ b/tools/keeper/util/pool/bytes.go @@ -0,0 +1,23 @@ +package pool + +import ( + "bytes" + "sync" +) + +var bytesBufferPool sync.Pool + +func init() { + bytesBufferPool.New = func() interface{} { + return &bytes.Buffer{} + } +} + +func BytesPoolGet() *bytes.Buffer { + return bytesBufferPool.Get().(*bytes.Buffer) +} + +func BytesPoolPut(b *bytes.Buffer) { + b.Reset() + bytesBufferPool.Put(b) +} diff --git a/tools/keeper/util/pool/empty_test.go b/tools/keeper/util/pool/empty_test.go new file mode 100644 index 00000000000..dcbca2d11d9 --- /dev/null +++ b/tools/keeper/util/pool/empty_test.go @@ -0,0 +1,8 @@ +package pool + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/util/util.go b/tools/keeper/util/util.go new file mode 100644 index 00000000000..a739c237605 --- /dev/null +++ b/tools/keeper/util/util.go @@ -0,0 +1,154 @@ +package util + +import ( + "crypto/md5" + "encoding/hex" + "os" + "strconv" + "strings" + "sync/atomic" + "time" + "unicode" + + "github.com/taosdata/taoskeeper/infrastructure/config" +) + +// https://github.com/containerd/cgroups/blob/main/utils.go +var globalCounter64 uint64 +var globalCounter32 uint32 + +var MAX_TABLE_NAME_LEN = 190 + +func init() { + atomic.StoreUint64(&globalCounter64, 0) + atomic.StoreUint32(&globalCounter32, 0) +} + +func ReadUint(path string) (uint64, error) { + v, err := os.ReadFile(path) + if err != nil { + return 0, err + } + return ParseUint(strings.TrimSpace(string(v)), 10, 64) +} + +func ParseUint(s string, base, bitSize int) (uint64, error) { + v, err := strconv.ParseUint(s, base, bitSize) + if err != nil { + intValue, intErr := strconv.ParseInt(s, base, bitSize) + // 1. Handle negative values greater than MinInt64 (and) + // 2. Handle negative values lesser than MinInt64 + if intErr == nil && intValue < 0 { + return 0, nil + } else if intErr != nil && + intErr.(*strconv.NumError).Err == strconv.ErrRange && + intValue < 0 { + return 0, nil + } + return 0, err + } + return v, nil +} + +func EscapeInfluxProtocol(s string) string { + s = strings.TrimSuffix(s, "\\") + s = strings.ReplaceAll(s, ",", "\\,") + s = strings.ReplaceAll(s, "=", "\\=") + s = strings.ReplaceAll(s, " ", "\\ ") + s = strings.ReplaceAll(s, "\"", "\\\"") + return s +} + +func GetCfg() *config.Config { + c := &config.Config{ + InstanceID: 64, + Port: 6043, + LogLevel: "trace", + TDengine: config.TDengineRestful{ + Host: "127.0.0.1", + Port: 6041, + Username: "root", + Password: "taosdata", + Usessl: false, + }, + Metrics: config.MetricsConfig{ + Database: config.Database{ + Name: "keeper_test_log", + Options: map[string]interface{}{}, + }, + }, + Log: config.Log{ + Level: "trace", + Path: "/var/log/taos", + RotationCount: 10, + RotationTime: 24 * time.Hour, + RotationSize: 1073741824, + Compress: true, + ReservedDiskSize: 1073741824, + }, + } + return c +} + +func SafeSubstring(s string, n int) string { + if len(s) > n { + return s[:n] + } + return s +} + +func GetQid(qidStr string) uint64 { + if qidStr == "" || !strings.HasPrefix(qidStr, "0x") { + qid32 := atomic.AddUint32(&globalCounter32, 1) + qid64 := uint64(qid32) << 8 + return qid64 + } + + qid, err := strconv.ParseUint(qidStr[2:], 16, 64) + if err != nil { + qid32 := atomic.AddUint32(&globalCounter32, 1) + qid64 := uint64(qid32) << 8 + return qid64 + } + + // clear the last byte + qid = qid &^ 0xFF + + return qid +} + +func GetQidOwn() uint64 { + + id := atomic.AddUint64(&globalCounter64, 1) + + if id > 0x00ffffffffffffff { + atomic.StoreUint64(&globalCounter64, 1) + id = 1 + } + qid64 := uint64(config.Conf.InstanceID)<<56 | id + return qid64 +} + +func GetMd5HexStr(str string) string { + sum := md5.Sum([]byte(str)) + return hex.EncodeToString(sum[:]) +} + +func isValidChar(r rune) bool { + return unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' +} + +func ToValidTableName(input string) string { + var builder strings.Builder + + for _, r := range input { + if isValidChar(r) { + builder.WriteRune(unicode.ToLower(r)) + } else { + builder.WriteRune('_') + } + } + + result := builder.String() + return result +} diff --git a/tools/keeper/version/version.go b/tools/keeper/version/version.go new file mode 100644 index 00000000000..c29a40c58e7 --- /dev/null +++ b/tools/keeper/version/version.go @@ -0,0 +1,11 @@ +package version + +var Version = "0.0.0.0" +var Gitinfo = "unknown" +var BuildInfo = "1970-01-01 00:00:00 +08:00" +var CommitID = "unknown" + +var CUS_NAME = "TDengine" +var CUS_PROMPT = "taos" + +var IsEnterprise = "false" diff --git a/tools/keeper/zbx_taos_keeper_templates.xml b/tools/keeper/zbx_taos_keeper_templates.xml new file mode 100644 index 00000000000..04e260cd218 --- /dev/null +++ b/tools/keeper/zbx_taos_keeper_templates.xml @@ -0,0 +1,111 @@ + + + 5.0 + 2021-12-06T05:55:45Z + + + taos + + + + + + \ No newline at end of file diff --git a/tools/shell/CMakeLists.txt b/tools/shell/CMakeLists.txt index fd46870ac54..4a8e0b9d34c 100644 --- a/tools/shell/CMakeLists.txt +++ b/tools/shell/CMakeLists.txt @@ -49,3 +49,32 @@ target_include_directories( ) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) + +# +# generator library shell_ut for uint test +# + +IF(TD_LINUX) + # include + include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc) + # shell_ut library + add_library(shell_ut STATIC ${SHELL_SRC}) + + IF(TD_WEBSOCKET) + ADD_DEPENDENCIES(shell_ut taosws-rs) + ENDIF() + target_link_libraries(shell_ut PUBLIC taos ${LINK_WEBSOCKET} ${LINK_JEMALLOC} ${LINK_ARGP}) + target_link_libraries(shell_ut PRIVATE os common transport geometry util) + + # util depends + target_link_directories( + shell_ut + PUBLIC "${TD_SOURCE_DIR}/contrib/lzma2" + PUBLIC "${TD_SOURCE_DIR}/contrib/pcre2" + ) + + # unit test + IF(${BUILD_TEST}) + ADD_SUBDIRECTORY(test) + ENDIF(${BUILD_TEST}) +ENDIF() diff --git a/tools/shell/inc/shellAuto.h b/tools/shell/inc/shellAuto.h index bcf500fefcb..c9d631f4b27 100644 --- a/tools/shell/inc/shellAuto.h +++ b/tools/shell/inc/shellAuto.h @@ -16,6 +16,10 @@ #ifndef __SHELL_AUTO__ #define __SHELL_AUTO__ +#ifdef __cplusplus +extern "C" { +#endif + #include "shellInt.h" #define TAB_KEY 0x09 @@ -47,4 +51,15 @@ void showAD(bool end); // show all commands help void showHelp(); + +// +// for unit test +// +bool fieldOptionsArea(char* p); +bool isCreateFieldsArea(char* p); + +#ifdef __cplusplus +} +#endif + #endif diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 4638f2ad74b..442329674da 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -102,7 +102,7 @@ void shellPrintHelp() { #include #endif -const char *argp_program_version = version; +const char *argp_program_version = td_version; #ifdef CUS_EMAIL const char *argp_program_bug_address = CUS_EMAIL; #else @@ -440,11 +440,11 @@ int32_t shellParseArgs(int32_t argc, char *argv[]) { #ifdef TD_ENTERPRISE snprintf(shell.info.programVersion, sizeof(shell.info.programVersion), "%s\n%s version: %s compatible_version: %s\ngit: %s\ngitOfInternal: %s\nbuild: %s", TD_PRODUCT_NAME, - CUS_PROMPT, version, compatible_version, gitinfo, gitinfoOfInternal, buildinfo); + CUS_PROMPT, td_version, td_compatible_version, td_gitinfo, td_gitinfoOfInternal, td_buildinfo); #else snprintf(shell.info.programVersion, sizeof(shell.info.programVersion), - "%s\n%s version: %s compatible_version: %s\ngit: %s\nbuild: %s", TD_PRODUCT_NAME, CUS_PROMPT, version, - compatible_version, gitinfo, buildinfo); + "%s\n%s version: %s compatible_version: %s\ngit: %s\nbuild: %s", TD_PRODUCT_NAME, CUS_PROMPT, td_version, + td_compatible_version, td_gitinfo, td_buildinfo); #endif #if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32) diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 65ae9fad54f..959e2d6d62d 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -46,6 +46,7 @@ typedef struct SWord { int32_t len; struct SWord* next; bool free; // if true need free + bool end; // if true is last keyword } SWord; typedef struct { @@ -95,59 +96,62 @@ SWords shellCommands[] = { " ;", 0, 0, NULL}, {"create dnode ", 0, 0, NULL}, {"create index on ()", 0, 0, NULL}, - {"create mnode on dnode ;", 0, 0, NULL}, - {"create qnode on dnode ;", 0, 0, NULL}, + {"create mnode on dnode ;", 0, 0, NULL}, + {"create qnode on dnode ;", 0, 0, NULL}, {"create stream into as select", 0, 0, NULL}, // 26 append sub sql {"create topic as select", 0, 0, NULL}, // 27 append sub sql - {"create function as outputtype language ", 0, 0, NULL}, - {"create or replace as outputtype language ", 0, 0, NULL}, - {"create aggregate function as outputtype bufsize language ", 0, 0, NULL}, - {"create or replace aggregate function as outputtype bufsize language ", 0, 0, NULL}, + {"create tsma on function", 0, 0, NULL}, + {"create recursive tsma on interval(", 0, 0, NULL}, + {"create function as outputtype language ;", 0, 0, NULL}, + {"create or replace as outputtype language ;", 0, 0, NULL}, + {"create aggregate function as outputtype bufsize language ;", 0, 0, NULL}, + {"create or replace aggregate function as outputtype bufsize language ;", 0, 0, NULL}, {"create user pass sysinfo 0;", 0, 0, NULL}, {"create user pass sysinfo 1;", 0, 0, NULL}, #ifdef TD_ENTERPRISE {"create view as select", 0, 0, NULL}, {"compact database ", 0, 0, NULL}, #endif - {"describe ", 0, 0, NULL}, + {"describe ;", 0, 0, NULL}, {"delete from where ", 0, 0, NULL}, - {"drop database ", 0, 0, NULL}, - {"drop index ", 0, 0, NULL}, - {"drop table ", 0, 0, NULL}, - {"drop dnode ", 0, 0, NULL}, - {"drop mnode on dnode ;", 0, 0, NULL}, - {"drop qnode on dnode ;", 0, 0, NULL}, - {"drop user ;", 0, 0, NULL}, + {"drop database ;", 0, 0, NULL}, + {"drop index ;", 0, 0, NULL}, + {"drop table ;", 0, 0, NULL}, + {"drop dnode ;", 0, 0, NULL}, + {"drop mnode on dnode ;", 0, 0, NULL}, + {"drop qnode on dnode ;", 0, 0, NULL}, + {"drop user ;", 0, 0, NULL}, // 40 - {"drop function ;", 0, 0, NULL}, + {"drop function ;", 0, 0, NULL}, {"drop consumer group on ", 0, 0, NULL}, - {"drop topic ;", 0, 0, NULL}, - {"drop stream ;", 0, 0, NULL}, - {"explain select", 0, 0, NULL}, // 44 append sub sql - {"flush database ;", 0, 0, NULL}, + {"drop topic ;", 0, 0, NULL}, + {"drop stream ;", 0, 0, NULL}, + {"drop tsma ;", 0, 0, NULL}, + {"explain select ", 0, 0, NULL}, // 44 append sub sql + {"flush database ;", 0, 0, NULL}, {"help;", 0, 0, NULL}, - {"grant all on to ;", 0, 0, NULL}, - {"grant read on to ;", 0, 0, NULL}, - {"grant write on to ;", 0, 0, NULL}, - {"kill connection ;", 0, 0, NULL}, + {"grant all on to ;", 0, 0, NULL}, + {"grant read on to ;", 0, 0, NULL}, + {"grant write on to ;", 0, 0, NULL}, + {"kill connection ;", 0, 0, NULL}, {"kill query ", 0, 0, NULL}, {"kill transaction ", 0, 0, NULL}, #ifdef TD_ENTERPRISE - {"merge vgroup ", 0, 0, NULL}, + {"merge vgroup ;", 0, 0, NULL}, #endif - {"pause stream ;", 0, 0, NULL}, + {"pause stream ;", 0, 0, NULL}, #ifdef TD_ENTERPRISE - {"redistribute vgroup dnode ;", 0, 0, NULL}, + {"redistribute vgroup dnode ;", 0, 0, NULL}, #endif - {"resume stream ;", 0, 0, NULL}, + {"resume stream ;", 0, 0, NULL}, {"reset query cache;", 0, 0, NULL}, - {"restore dnode ;", 0, 0, NULL}, - {"restore vnode on dnode ;", 0, 0, NULL}, - {"restore mnode on dnode ;", 0, 0, NULL}, - {"restore qnode on dnode ;", 0, 0, NULL}, - {"revoke all on from ;", 0, 0, NULL}, - {"revoke read on from ;", 0, 0, NULL}, - {"revoke write on from ;", 0, 0, NULL}, + {"restore dnode ;", 0, 0, NULL}, + {"restore vnode on dnode ;", 0, 0, NULL}, + {"restore mnode on dnode ;", 0, 0, NULL}, + {"restore qnode on dnode ;", 0, 0, NULL}, + {"revoke all on from ;", 0, 0, NULL}, + {"revoke read on from ;", 0, 0, NULL}, + {"revoke write on from ;", 0, 0, NULL}, {"select * from ", 0, 0, NULL}, {"select client_version();", 0, 0, NULL}, // 60 @@ -160,15 +164,17 @@ SWords shellCommands[] = { {"select timezone();", 0, 0, NULL}, {"set max_binary_display_width ", 0, 0, NULL}, {"show apps;", 0, 0, NULL}, + {"show alive;", 0, 0, NULL}, {"show create database \\G;", 0, 0, NULL}, {"show create stable \\G;", 0, 0, NULL}, {"show create table \\G;", 0, 0, NULL}, #ifdef TD_ENTERPRISE {"show create view \\G;", 0, 0, NULL}, -#endif - {"show connections;", 0, 0, NULL}, {"show compact", 0, 0, NULL}, {"show compacts;", 0, 0, NULL}, + +#endif + {"show connections;", 0, 0, NULL}, {"show cluster;", 0, 0, NULL}, {"show cluster alive;", 0, 0, NULL}, {"show cluster machines;", 0, 0, NULL}, @@ -190,16 +196,17 @@ SWords shellCommands[] = { {"show subscriptions;", 0, 0, NULL}, {"show tables;", 0, 0, NULL}, {"show tables like", 0, 0, NULL}, - {"show table distributed ", 0, 0, NULL}, - {"show tags from ", 0, 0, NULL}, - {"show tags from ", 0, 0, NULL}, - {"show table tags from ", 0, 0, NULL}, + {"show table distributed ;", 0, 0, NULL}, + {"show tags from ;", 0, 0, NULL}, + {"show table tags from ;", 0, 0, NULL}, {"show topics;", 0, 0, NULL}, {"show transactions;", 0, 0, NULL}, + {"show tsmas;", 0, 0, NULL}, {"show users;", 0, 0, NULL}, {"show variables;", 0, 0, NULL}, {"show local variables;", 0, 0, NULL}, - {"show vnodes ", 0, 0, NULL}, + {"show vnodes;", 0, 0, NULL}, + {"show vnodes on dnode ;", 0, 0, NULL}, {"show vgroups;", 0, 0, NULL}, {"show consumers;", 0, 0, NULL}, {"show grants;", 0, 0, NULL}, @@ -207,22 +214,26 @@ SWords shellCommands[] = { {"show grants logs;", 0, 0, NULL}, #ifdef TD_ENTERPRISE {"show views;", 0, 0, NULL}, - {"split vgroup ", 0, 0, NULL}, + {"show arbgroups;", 0, 0, NULL}, + {"split vgroup ;", 0, 0, NULL}, + {"s3migrate database ;", 0, 0, NULL}, #endif {"insert into values(", 0, 0, NULL}, {"insert into using tags(", 0, 0, NULL}, {"insert into using values(", 0, 0, NULL}, {"insert into file ", 0, 0, NULL}, - {"trim database ", 0, 0, NULL}, - {"s3migrate database ", 0, 0, NULL}, - {"use ", 0, 0, NULL}, + {"trim database ;", 0, 0, NULL}, + {"use ;", 0, 0, NULL}, {"quit", 0, 0, NULL}}; +// where keyword char* keywords[] = { - "and ", "asc ", "desc ", "from ", "fill(", "limit ", "where ", + "where ", "and ", "asc ", "desc ", "from ", "fill(", "limit ", "interval(", "order by ", "order by ", "offset ", "or ", "group by ", "now()", "session(", "sliding ", "slimit ", "soffset ", "state_window(", "today() ", "union all select ", - "partition by "}; + "partition by ", "match", "nmatch ", "between ", "like ", "is null ", "is not null ", + "event_window ", "count_window(" +}; char* functions[] = { "count(", "sum(", @@ -255,6 +266,20 @@ char* functions[] = { "timezone(", "timetruncate(", "twa(", "to_unixtimestamp(", "unique(", "upper(", + "pi(", "round(", + "truncate(", "exp(", + "ln(", "mod(", + "rand(", "sign(", + "degrees(", "radians(", + "greatest(", "least(", + "char_length(", "char(", + "ascii(", "position(", + "trim(", "replace(", + "repeat(", "substring(", + "substring_index(","timediff(", + "week(", "weekday(", + "weekofyear(", "dayofweek(", + "stddev_pop(", "var_pop(" }; char* tb_actions[] = { @@ -275,7 +300,7 @@ char* db_options[] = {"keep ", "cachesize ", "comp ", "duration ", - "wal_fsync_period", + "wal_fsync_period ", "maxrows ", "minrows ", "pages ", @@ -284,17 +309,22 @@ char* db_options[] = {"keep ", "wal_level ", "vgroups ", "single_stable ", - "s3_chunksize ", - "s3_keeplocal ", - "s3_compact ", + "s3_chunksize ", + "s3_keeplocal ", + "s3_compact ", "wal_retention_period ", "wal_roll_period ", "wal_retention_size ", - "wal_segment_size "}; +#ifdef TD_ENTERPRISE + "encrypt_algorithm " +#endif + "keep_time_offset ", + "wal_segment_size " +}; char* alter_db_options[] = {"cachemodel ", "replica ", "keep ", "stt_trigger ", "wal_retention_period ", "wal_retention_size ", "cachesize ", - "s3_keeplocal ", "s3_compact ", + "s3_keeplocal ", "s3_compact ", "wal_fsync_period ", "buffer ", "pages " ,"wal_level "}; char* data_types[] = {"timestamp", "int", @@ -304,6 +334,7 @@ char* data_types[] = {"timestamp", "int", "bigint", "bigint unsigned", "smallint", "smallint unsigned", "tinyint", "tinyint unsigned", + "geometry(64)", "varbinary(16)", "bool", "json"}; char* key_tags[] = {"tags("}; @@ -319,10 +350,20 @@ char* key_systable[] = { char* udf_language[] = {"\'Python\'", "\'C\'"}; +char* field_options[] = { + "encode ", "compress ", "level ", + "\'lz4\' ", "\'zlib\' ", "\'zstd\' ", "\'xz\' ", "\'tsz\' ", "\'disabled\' ", // compress + "\'simple8b\' ", "\'delta-i\' ", "\'delta-d\' ", "\'bit-packing\' ", + "\'high\' ", "\'medium\' ", "\'low\' ", + "comment ", + "primary key " +}; + // global keys can tips on anywhere char* global_keys[] = { "tbname", - "now", + "now", + "vgroups", "_wstart", "_wend", "_wduration", @@ -354,27 +395,29 @@ bool waitAutoFill = false; #define WT_VAR_STREAM 6 #define WT_VAR_UDFNAME 7 #define WT_VAR_VGROUPID 8 +#define WT_VAR_TSMA 9 -#define WT_FROM_DB_MAX 8 // max get content from db +#define WT_FROM_DB_MAX 9 // max get content from db #define WT_FROM_DB_CNT (WT_FROM_DB_MAX + 1) -#define WT_VAR_ALLTABLE 9 -#define WT_VAR_FUNC 10 -#define WT_VAR_KEYWORD 11 -#define WT_VAR_TBACTION 12 -#define WT_VAR_DBOPTION 13 -#define WT_VAR_ALTER_DBOPTION 14 -#define WT_VAR_DATATYPE 15 -#define WT_VAR_KEYTAGS 16 -#define WT_VAR_ANYWORD 17 -#define WT_VAR_TBOPTION 18 -#define WT_VAR_USERACTION 19 -#define WT_VAR_KEYSELECT 20 -#define WT_VAR_SYSTABLE 21 -#define WT_VAR_LANGUAGE 22 -#define WT_VAR_GLOBALKEYS 23 - -#define WT_VAR_CNT 24 +#define WT_VAR_ALLTABLE 10 +#define WT_VAR_FUNC 11 +#define WT_VAR_KEYWORD 12 +#define WT_VAR_TBACTION 13 +#define WT_VAR_DBOPTION 14 +#define WT_VAR_ALTER_DBOPTION 15 +#define WT_VAR_DATATYPE 16 +#define WT_VAR_KEYTAGS 17 +#define WT_VAR_ANYWORD 18 +#define WT_VAR_TBOPTION 19 +#define WT_VAR_USERACTION 20 +#define WT_VAR_KEYSELECT 21 +#define WT_VAR_SYSTABLE 22 +#define WT_VAR_LANGUAGE 23 +#define WT_VAR_GLOBALKEYS 24 +#define WT_VAR_FIELD_OPTIONS 25 + +#define WT_VAR_CNT 26 #define WT_TEXT 0xFF @@ -387,12 +430,17 @@ TdThreadMutex tiresMutex; TdThread* threads[WT_FROM_DB_CNT]; // obtain var name with sql from server char varTypes[WT_VAR_CNT][64] = { + // get from db "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", ""}; + "", "", "", + // get from code + "", "", "", "", "", "", + "", "", "", "", "", "", "", + "", "", ""}; char varSqls[WT_FROM_DB_CNT][64] = {"show databases;", "show stables;", "show tables;", "show dnodes;", - "show users;", "show topics;", "show streams;", "show functions;", "show vgroups;"}; + "show users;", "show topics;", "show streams;", "show functions;", + "show vgroups;", "show tsmas;"}; // var words current cursor, if user press any one key except tab, cursorVar can be reset to -1 int cursorVar = -1; @@ -534,6 +582,7 @@ void showHelp() { select timezone();\n\ set max_binary_display_width ...\n\ show apps;\n\ + show alive;\n\ show create database ;\n\ show create stable ;\n\ show create table ;\n\ @@ -567,7 +616,8 @@ void showHelp() { show users;\n\ show variables;\n\ show local variables;\n\ - show vnodes \n\ + show vnodes;\n\ + show vnodes on dnode ;\n\ show vgroups;\n\ show consumers;\n\ show grants;\n\ @@ -588,8 +638,10 @@ void showHelp() { create view as select ...\n\ redistribute vgroup dnode ;\n\ split vgroup ;\n\ + s3migrate database ;\n\ show compacts;\n\ show compact \n\ + show arbgroups;\n\ show views;\n\ show create view ;"); #endif @@ -648,7 +700,12 @@ SWord* addWord(const char* p, int32_t len, bool pattern) { // check format if (pattern && len > 0) { - word->type = wordType(p, len); + if (p[len - 1] == ';') { + word->type = wordType(p, len - 1); + word->end = true; + } else { + word->type = wordType(p, len); + } } else { word->type = WT_TEXT; } @@ -756,6 +813,7 @@ bool shellAutoInit() { GenerateVarType(WT_VAR_SYSTABLE, key_systable, sizeof(key_systable) / sizeof(char*)); GenerateVarType(WT_VAR_LANGUAGE, udf_language, sizeof(udf_language) / sizeof(char*)); GenerateVarType(WT_VAR_GLOBALKEYS, global_keys, sizeof(global_keys) / sizeof(char*)); + GenerateVarType(WT_VAR_FIELD_OPTIONS, field_options, sizeof(field_options) / sizeof(char*)); return true; } @@ -1254,9 +1312,9 @@ void printScreen(TAOS* con, SShellCmd* cmd, SWords* match) { const char* str = NULL; int strLen = 0; + SWord* word = MATCH_WORD(match); if (firstMatchIndex == curMatchIndex && lastWordBytes == -1) { // first press tab - SWord* word = MATCH_WORD(match); str = word->word + match->matchLen; strLen = word->len - match->matchLen; lastMatchIndex = firstMatchIndex; @@ -1264,8 +1322,6 @@ void printScreen(TAOS* con, SShellCmd* cmd, SWords* match) { } else { if (lastWordBytes == -1) return; deleteCount(cmd, lastWordBytes); - - SWord* word = MATCH_WORD(match); str = word->word; strLen = word->len; // set current to last @@ -1273,8 +1329,22 @@ void printScreen(TAOS* con, SShellCmd* cmd, SWords* match) { lastWordBytes = word->len; } - // insert new - shellInsertStr(cmd, (char*)str, strLen); + if (word->end && str[strLen - 1] != ';') { + // append end ';' + char* p = taosMemoryCalloc(strLen + 8, 1); + if (p) { + tstrncpy(p, str, strLen + 1); + tstrncpy(p + strLen, ";", 1 + 1); + lastWordBytes += 1; + shellInsertStr(cmd, (char*)p, strLen + 1); + taosMemoryFree(p); + } else { + shellInsertStr(cmd, (char*)str, strLen); + } + } else { + // insert new + shellInsertStr(cmd, (char*)str, strLen); + } } // main key press tab , matched return true else false @@ -1648,38 +1718,69 @@ bool matchSelectQuery(TAOS* con, SShellCmd* cmd) { return appendAfterSelect(con, cmd, p, len); } -// if is input create fields or tags area, return true -bool isCreateFieldsArea(char* p) { - // put to while, support like create table st(ts timestamp, bin1 binary(16), bin2 + blank + TAB - char* p1 = taosStrdup(p); - bool ret = false; - while (1) { - char* left = strrchr(p1, '('); - if (left == NULL) { - // like 'create table st' - ret = false; - break; - } +// is fields option area +bool fieldOptionsArea(char* p) { + char* p1 = strrchr(p, '('); + char* p2 = strrchr(p, ','); + if (p1 == NULL && p2 == NULL) { + return false; + } - char* right = strrchr(p1, ')'); - if (right == NULL) { - // like 'create table st( ' - ret = true; - break; - } + // find tags + if (strstr(p, " tags") != NULL) { + return false; + } - if (left > right) { - // like 'create table st( ts timestamp, age int) tags(area ' - ret = true; - break; + if (p2 == NULL) { + // first field area + p2 = p1; + } + + // find blank count + int32_t cnt = 0; + while (p2) { + p2 = strchr(p2, ' '); + if (p2) { + // get prev char + char prec = *(p2 - 1); + if (prec != ',' && prec != '(') { + // blank if before comma, not calc count. like st(ts timestamp, age int + BLANK + TAB only two blank + cnt++; + } + + // continue blank is one blank + while (p2[1] != 0 && p2[1] == ' ') { + // move next if blank again + p2 += 1; + } + p2 += 1; } + } + + // like create table st(ts timestamp TAB-KEY or st(ts timestamp , age int TAB-KEY + return cnt >= 2; +} - // set string end by small for next strrchr search - *left = 0; +// if is input create fields or tags area, return true +bool isCreateFieldsArea(char* p) { + int32_t n = 0; // count + char* p1 = p; + while (*p1 != 0) { + switch (*p1) { + case '(': + ++n; + break; + case ')': + --n; + break; + default: + break; + } + // move next + ++p1; } - taosMemoryFree(p1); - return ret; + return n > 0; } bool matchCreateTable(TAOS* con, SShellCmd* cmd) { @@ -1718,7 +1819,13 @@ bool matchCreateTable(TAOS* con, SShellCmd* cmd) { // check in create fields or tags input area if (isCreateFieldsArea(ps)) { - ret = fillWithType(con, cmd, last, WT_VAR_DATATYPE); + if (fieldOptionsArea(ps)) { + // fill field options + ret = fillWithType(con, cmd, last, WT_VAR_FIELD_OPTIONS); + } else { + // fill field + ret = fillWithType(con, cmd, last, WT_VAR_DATATYPE); + } } // tags @@ -1726,7 +1833,7 @@ bool matchCreateTable(TAOS* con, SShellCmd* cmd) { // find only one ')' , can insert tags char* p1 = strchr(ps, ')'); if (p1) { - if (strchr(p1 + 1, ')') == NULL && strstr(p1 + 1, "tags") == NULL) { + if (strstr(p1 + 1, "tags") == NULL) { // can insert tags keyword ret = fillWithType(con, cmd, last, WT_VAR_KEYTAGS); } diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 2a583f948e7..6d56aa7fe27 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -22,6 +22,8 @@ #include "shellAuto.h" #include "shellInt.h" +SShellObj shell = {0}; + typedef struct { const char *sql; bool vertical; @@ -56,7 +58,7 @@ static void shellWriteHistory(); static void shellPrintError(TAOS_RES *tres, int64_t st); static bool shellIsCommentLine(char *line); static void shellSourceFile(const char *file); -static bool shellGetGrantInfo(char *buf); +static bool shellGetGrantInfo(char* buf); static void shellCleanup(void *arg); static void *shellCancelHandler(void *arg); diff --git a/tools/shell/src/shellMain.c b/tools/shell/src/shellMain.c index 71acf23e41f..fc6ba0f7d89 100644 --- a/tools/shell/src/shellMain.c +++ b/tools/shell/src/shellMain.c @@ -17,8 +17,7 @@ #include "shellInt.h" #include "shellAuto.h" -SShellObj shell = {0}; - +extern SShellObj shell; void shellCrashHandler(int signum, void *sigInfo, void *context) { taosIgnSignal(SIGTERM); diff --git a/tools/shell/src/shellNettest.c b/tools/shell/src/shellNettest.c index 2e5ec7bc24b..d1ecf503d24 100644 --- a/tools/shell/src/shellNettest.c +++ b/tools/shell/src/shellNettest.c @@ -34,7 +34,7 @@ static void shellWorkAsClient() { rpcInit.user = "_dnd"; rpcInit.timeToGetConn = tsTimeToGetAvailableConn; - taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + taosVersionStrToInt(td_version, &rpcInit.compatibilityVer); clientRpc = rpcOpen(&rpcInit); if (clientRpc == NULL) { printf("failed to init net test client since %s\r\n", terrstr()); @@ -125,7 +125,7 @@ static void shellWorkAsServer() { rpcInit.connType = TAOS_CONN_SERVER; rpcInit.idleTime = tsShellActivityTimer * 1000; - taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + taosVersionStrToInt(td_version, &rpcInit.compatibilityVer); void *serverRpc = rpcOpen(&rpcInit); if (serverRpc == NULL) { diff --git a/tools/shell/test/CMakeLists.txt b/tools/shell/test/CMakeLists.txt new file mode 100644 index 00000000000..1eb6c709ab3 --- /dev/null +++ b/tools/shell/test/CMakeLists.txt @@ -0,0 +1,25 @@ + +MESSAGE(STATUS "build taos-CLI unit test") + +IF(NOT TD_DARWIN) + # GoogleTest requires at least C++11 + SET(CMAKE_CXX_STANDARD 11) + AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) + + ADD_EXECUTABLE(shellTest ${SOURCE_LIST}) + TARGET_LINK_LIBRARIES( + shellTest + PRIVATE shell_ut gtest os common transport geometry util + ) + + target_include_directories( + shell_ut + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inc" + ) + + + add_test( + NAME shellTest + COMMAND shellTest + ) +ENDIF() diff --git a/tools/shell/test/shellTest.cpp b/tools/shell/test/shellTest.cpp new file mode 100644 index 00000000000..cf0ec503fe3 --- /dev/null +++ b/tools/shell/test/shellTest.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include "shellAuto.h" + +TEST(fieldOptionsArea, autoTabTest) { + printf("hellow world SHELL tab test\n"); + + // str false + const char *s0[] = { + "create table st(ts ", + "create table st(ts timestamp, age ", + "create table st(ts timestamp, age", + "create table st(ts timestamp, age int , name ", + "create table st(ts timestamp, age int , name binary(16)", + "create table st(ts timestamp, age int , name binary(16) ) tags( ", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int, addr ", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int,addr varbinary", + "create table st(ts timestamp, age int, name binary(16)) tags(area int , addr varbinary(32)", + "create table st( ts timestamp, age int, name binary(16)) tags( area int, addr", + "create table st (ts timestamp , age int, name binary(16) , area int,", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int ,addr varbinary", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) level " + "'high' , no i", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) encode " + "'simple8b' level 'high', no in", + }; + + // str true + const char *s1[] = { + "create table st(ts timestamp ", + "create table st(ts timestamp, age int ", + "create table st(ts timestamp, age int ", + "create table st(ts timestamp, age int , name binary(16) ", + "create table st(ts timestamp, age int , name binary(16) ", + "create table st(ts timestamp, age int , name binary(16) , addr varbinary( 32 ) ", + "create table st(ts timestamp, age int , name binary(16) ,area int, addr varbinary(32) ", + "create table st(ts timestamp, age int , name binary(16), area int,addr varbinary(32) ", + "create table st(ts timestamp, age int, name binary(16) , area int,addr varbinary(32) ", + "create table st( ts timestamp, age int, name binary(16) ,area int,addr varbinary(32) ", + "create table st (ts timestamp , age int, name binary(16), area int,addr varbinary(32) ", + "create table st (ts timestamp , age int, name binary(16), area int , addr varbinary(32) compress 'zlib' ", + "create table st (ts timestamp , age int, name binary(16), area int , addr varbinary(32) level 'high' ", + "create table st (ts timestamp , age int, name binary(16) , area int , addr varbinary(32) encode 'simple8b' " + "level 'high' ", + }; + + // s0 is false + for (int32_t i = 0; i < sizeof(s0) / sizeof(char *); i++) { + printf("s0 i=%d fieldOptionsArea %s expect false \n", i, s0[i]); + ASSERT(fieldOptionsArea((char *)s0[i]) == false); + } + + // s1 is true + for (int32_t i = 0; i < sizeof(s1) / sizeof(char *); i++) { + printf("s1 i=%d fieldOptionsArea %s expect true \n", i, s1[i]); + ASSERT(fieldOptionsArea((char *)s1[i]) == true); + } +} + +TEST(isCreateFieldsArea, autoTabTest) { + printf("hellow world SHELL tab test\n"); + + // str false + const char *s0[] = { + "create table st(ts )", + "create table st(ts timestamp, age) ", + "create table st(ts timestamp, age)", + "create table st(ts timestamp, age int , name binary(16) )", + "create table st(ts timestamp, age int , name binary(16))", + "create table st(ts timestamp, age int , name binary(16) ) tags( )", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int, addr )", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int,addr varbinary)", + "create table st(ts timestamp, age int, name binary(16)) tags(area int , addr varbinary(32))", + "create table st( ts timestamp, age int, name binary(16)) tags( area int, addr int)", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int,addr varbinary(32) )", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int ,addr varbinary(14))", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) level " + "'high' )", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) encode " + "'simple8b' level 'high' ) ", + }; + + // str true + const char *s1[] = { + "create table st(ts timestamp ", + "create table st(ts timestamp, age int ", + "create table st(ts timestamp, age int ,", + "create table st(ts timestamp, age int , name binary(16), ", + "create table st(ts timestamp, age int , name binary(16) ", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int ", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int, addr varbinary(32) ", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int,addr varbinary(32)", + "create table st(ts timestamp, age int, name binary(16)) tags(area int,addr varbinary(32) ", + "create table st( ts timestamp, age int, name binary(16)) tags(area int,addr varbinary(32) ", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int, addr varbinary(32) ", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) compress " + "'zlib' ", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) level " + "'high' ", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) encode " + "'simple8b' level 'high' ", + }; + + // s0 is false + for (int32_t i = 0; i < sizeof(s0) / sizeof(char *); i++) { + printf("s0 i=%d isCreateFieldsArea %s expect false. \n", i, s0[i]); + ASSERT(isCreateFieldsArea((char *)s0[i]) == false); + } + + // s1 is true + for (int32_t i = 0; i < sizeof(s1) / sizeof(char *); i++) { + printf("s1 i=%d isCreateFieldsArea %s expect true. \n", i, s1[i]); + ASSERT(isCreateFieldsArea((char *)s1[i]) == true); + } +} + +int main(int argc, char **argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} \ No newline at end of file diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c index 55fa3362211..a8d4fafb03f 100644 --- a/utils/test/c/sml_test.c +++ b/utils/test/c/sml_test.c @@ -105,6 +105,113 @@ int smlProcess_telnet_Test() { return code; } +int smlProcess_telnet0_Test() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + + TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1"); + taos_free_result(pRes); + + pRes = taos_query(taos, "use sml_db"); + taos_free_result(pRes); + + const char *sql1[] = {"sysif.bytes.out 1479496100 1.3E0 host=web01 interface=eth0"}; + pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_TELNET_PROTOCOL, + TSDB_SML_TIMESTAMP_NANO_SECONDS); + printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); + int code = taos_errno(pRes); + ASSERT(code == 0); + taos_free_result(pRes); + + const char *sql2[] = {"sysif.bytes.out 1479496700 1.6E0 host=web01 interface=eth0"}; + pRes = taos_schemaless_insert(taos, (char **)sql2, sizeof(sql2) / sizeof(sql2[0]), TSDB_SML_TELNET_PROTOCOL, + TSDB_SML_TIMESTAMP_NANO_SECONDS); + printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); + code = taos_errno(pRes); + ASSERT(code == 0); + taos_free_result(pRes); + + const char *sql3[] = {"sysif.bytes.out 1479496300 1.1E0 interface=eth0 host=web01"}; + pRes = taos_schemaless_insert(taos, (char **)sql3, sizeof(sql3) / sizeof(sql3[0]), TSDB_SML_TELNET_PROTOCOL, + TSDB_SML_TIMESTAMP_NANO_SECONDS); + printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); + code = taos_errno(pRes); + ASSERT(code == 0); + taos_free_result(pRes); + + taos_close(taos); + + return code; +} + +int smlProcess_json0_Test() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + + TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db"); + taos_free_result(pRes); + + pRes = taos_query(taos, "use sml_db"); + taos_free_result(pRes); + + const char *sql[] = { + "[{\"metric\":\"syscpu.nice\",\"timestamp\":1662344045,\"value\":9,\"tags\":{\"host\":\"web02\",\"dc\":4}}]"}; + + char *sql1[1] = {0}; + for (int i = 0; i < 1; i++) { + sql1[i] = taosMemoryCalloc(1, 1024); + ASSERT(sql1[i] != NULL); + (void)strncpy(sql1[i], sql[i], 1023); + } + + pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_JSON_PROTOCOL, + TSDB_SML_TIMESTAMP_NANO_SECONDS); + int code = taos_errno(pRes); + if (code != 0) { + printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); + } else { + printf("%s result:success\n", __FUNCTION__); + } + taos_free_result(pRes); + + for (int i = 0; i < 1; i++) { + taosMemoryFree(sql1[i]); + } + ASSERT(code == 0); + + + const char *sql2[] = { + "[{\"metric\":\"syscpu.nice\",\"timestamp\":1662344041,\"value\":13,\"tags\":{\"host\":\"web01\",\"dc\":1}" + "},{\"metric\":\"syscpu.nice\",\"timestamp\":1662344042,\"value\":9,\"tags\":{\"host\":\"web02\",\"dc\":4}" + "}]", + }; + + char *sql3[1] = {0}; + for (int i = 0; i < 1; i++) { + sql3[i] = taosMemoryCalloc(1, 1024); + ASSERT(sql3[i] != NULL); + (void)strncpy(sql3[i], sql2[i], 1023); + } + + pRes = taos_schemaless_insert(taos, (char **)sql3, sizeof(sql3) / sizeof(sql3[0]), TSDB_SML_JSON_PROTOCOL, + TSDB_SML_TIMESTAMP_NANO_SECONDS); + code = taos_errno(pRes); + if (code != 0) { + printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); + } else { + printf("%s result:success\n", __FUNCTION__); + } + taos_free_result(pRes); + + for (int i = 0; i < 1; i++) { + taosMemoryFree(sql3[i]); + } + + ASSERT(code == 0); + + taos_close(taos); + + return code; +} + int smlProcess_json1_Test() { TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); @@ -1775,6 +1882,21 @@ int sml_td24559_Test() { pRes = taos_query(taos, "create database if not exists td24559"); taos_free_result(pRes); + const char *sql1[] = { + "sttb,t1=1 f1=283i32,f2=g\"\" 1632299372000", + "sttb,t1=1 f2=G\"Point(4.343 89.342)\",f1=106i32 1632299373000", + }; + + pRes = taos_query(taos, "use td24559"); + taos_free_result(pRes); + + pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_MILLI_SECONDS); + int code = taos_errno(pRes); + printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes)); + ASSERT(code); + taos_free_result(pRes); + const char *sql[] = { "stb,t1=1 f1=283i32,f2=g\"Point(4.343 89.342)\" 1632299372000", "stb,t1=1 f2=G\"Point(4.343 89.342)\",f1=106i32 1632299373000", @@ -1788,7 +1910,7 @@ int sml_td24559_Test() { pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); - int code = taos_errno(pRes); + code = taos_errno(pRes); printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes)); taos_free_result(pRes); @@ -2136,7 +2258,8 @@ int main(int argc, char *argv[]) { taos_options(TSDB_OPTION_CONFIGDIR, argv[1]); } - int ret = 0; + int ret = smlProcess_json0_Test(); + ASSERT(!ret); ret = sml_ts5528_test(); ASSERT(!ret); ret = sml_td29691_Test(); @@ -2173,6 +2296,8 @@ int main(int argc, char *argv[]) { ASSERT(!ret); ret = smlProcess_telnet_Test(); ASSERT(!ret); + ret = smlProcess_telnet0_Test(); + ASSERT(!ret); ret = smlProcess_json1_Test(); ASSERT(!ret); ret = smlProcess_json2_Test(); diff --git a/utils/test/c/tmq_taosx_ci.c b/utils/test/c/tmq_taosx_ci.c index 3a79a3763cb..cd70dd88f59 100644 --- a/utils/test/c/tmq_taosx_ci.c +++ b/utils/test/c/tmq_taosx_ci.c @@ -79,6 +79,7 @@ static void msg_process(TAOS_RES* msg) { } else { taosFprintfFile(g_fp, result); taosFprintfFile(g_fp, "\n"); + taosFsyncFile(g_fp); } } } @@ -132,7 +133,7 @@ int buildDatabase(TAOS* pConn, TAOS_RES* pRes) { pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)"); if (taos_errno(pRes) != 0) { - printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes)); + printf("failed to create child table ct0, reason:%s\n", taos_errstr(pRes)); return -1; } taos_free_result(pRes); @@ -175,7 +176,7 @@ int buildDatabase(TAOS* pConn, TAOS_RES* pRes) { pRes = taos_query( pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, " - "'ddd') ct0 values(1626006833603, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')"); + "'ddd') ct0 values(1626006833603, 4, 3, 'hwj') ct1 values(1626006833703, 23, 32, 's21ds')"); if (taos_errno(pRes) != 0) { printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes)); return -1; @@ -189,6 +190,41 @@ int buildDatabase(TAOS* pConn, TAOS_RES* pRes) { } taos_free_result(pRes); + pRes = taos_query(pConn, "insert into ct1 values(1736006813600, -32222, 43, 'ewb', 99)"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "alter table st1 drop column c4"); + if (taos_errno(pRes) != 0) { + printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into ct1 values(1736006833600, -4223, 344, 'bfs')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "alter table st1 add column c4 bigint"); + if (taos_errno(pRes) != 0) { + printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into ct1 values(1766006833600, -4432, 4433, 'e23wb', 9349)"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)"); if (taos_errno(pRes) != 0) { printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); @@ -596,6 +632,7 @@ tmq_t* build_consumer() { tmq_conf_set(conf, "enable.auto.commit", "true"); tmq_conf_set(conf, "auto.offset.reset", "earliest"); tmq_conf_set(conf, "msg.consume.excluded", "1"); +// tmq_conf_set(conf, "session.timeout.ms", "1000000"); // tmq_conf_set(conf, "max.poll.interval.ms", "20000"); if (g_conf.snapShot) { @@ -636,6 +673,7 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 5000); if (tmqmessage) { cnt++; + printf("cnt:%d\n", cnt); msg_process(tmqmessage); taos_free_result(tmqmessage); } else { @@ -844,6 +882,8 @@ void initLogFile() { "{\"name\":\"t1\",\"type\":4,\"value\":3000}],\"createList\":[]}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\"," "\"colType\":5}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":6,\"colName\":\"c4\"}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":7,\"colName\":\"c3\"," "\"colType\":8,\"colLength\":64}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":1,\"colName\":\"t2\"," @@ -991,6 +1031,8 @@ void initLogFile() { "{\"name\":\"t1\",\"type\":4,\"value\":3000}],\"createList\":[]}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\"," "\"colType\":5}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":6,\"colName\":\"c4\"}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":7,\"colName\":\"c3\"," "\"colType\":8,\"colLength\":64}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":1,\"colName\":\"t2\"," diff --git a/utils/test/c/write_raw_block_test.c b/utils/test/c/write_raw_block_test.c index 8ed997bc921..ae4a606e6e6 100644 --- a/utils/test/c/write_raw_block_test.c +++ b/utils/test/c/write_raw_block_test.c @@ -19,196 +19,77 @@ #include "taos.h" #include "types.h" -int buildStable(TAOS* pConn) { - TAOS_RES* pRes = taos_query(pConn, - "CREATE STABLE `meters` (`ts` TIMESTAMP, `current` INT, `voltage` INT, `phase` FLOAT) TAGS " - "(`groupid` INT, `location` VARCHAR(16))"); - if (taos_errno(pRes) != 0) { - printf("failed to create super table meters, reason:%s\n", taos_errstr(pRes)); - return -1; - } +TAOS* pConn = NULL; +void action(char* sql) { + TAOS_RES* pRes = taos_query(pConn, sql); + ASSERT(taos_errno(pRes) == 0); taos_free_result(pRes); - - pRes = taos_query(pConn, "create table d0 using meters tags(1, 'San Francisco')"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table d0, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "insert into d0 (ts, current) values (now, 120)"); - if (taos_errno(pRes) != 0) { - printf("failed to insert into table d0, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table d1 using meters tags(2, 'San Francisco')"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table d1, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table d2 using meters tags(3, 'San Francisco')"); - if (taos_errno(pRes) != 0) { - printf("failed to create child table d2, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table ntba(ts timestamp, addr binary(32))"); - if (taos_errno(pRes) != 0) { - printf("failed to create ntba, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create table ntbb(ts timestamp, addr binary(8))"); - if (taos_errno(pRes) != 0) { - printf("failed to create ntbb, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "insert into ntba values(now,'123456789abcdefg123456789')"); - if (taos_errno(pRes) != 0) { - printf("failed to insert table ntba, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "insert into ntba values(now + 1s,'hello')"); - if (taos_errno(pRes) != 0) { - printf("failed to insert table ntba, reason:%s\n", taos_errstr(pRes)); - return -1; - } - taos_free_result(pRes); - - return 0; } -int32_t init_env() { - TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); - if (pConn == NULL) { - return -1; - } - int32_t ret = -1; - - TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_raw"); - if (taos_errno(pRes) != 0) { - printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes)); - goto END; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "create database if not exists db_raw vgroups 2"); - if (taos_errno(pRes) != 0) { - printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes)); - goto END; - } - taos_free_result(pRes); - - pRes = taos_query(pConn, "use db_raw"); - if (taos_errno(pRes) != 0) { - printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes)); - goto END; - } - taos_free_result(pRes); - - buildStable(pConn); - - pRes = taos_query(pConn, "select * from d0"); - if (taos_errno(pRes) != 0) { - printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes)); - goto END; - } - void *data = NULL; +int32_t test_write_raw_block(char* query, char* dst) { + TAOS_RES* pRes = taos_query(pConn, query); + ASSERT(taos_errno(pRes) == 0); + void* data = NULL; int32_t numOfRows = 0; int error_code = taos_fetch_raw_block(pRes, &numOfRows, &data); - if(error_code !=0 ){ - printf("error fetch raw block, reason:%s\n", taos_errstr(pRes)); - goto END; - } - - taos_write_raw_block(pConn, numOfRows, data, "d1"); + ASSERT(error_code == 0); + error_code = taos_write_raw_block(pConn, numOfRows, data, dst); taos_free_result(pRes); + return error_code; +} - pRes = taos_query(pConn, "select ts,phase from d0"); - if (taos_errno(pRes) != 0) { - printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes)); - goto END; - } - error_code = taos_fetch_raw_block(pRes, &numOfRows, &data); - if(error_code !=0 ){ - printf("error fetch raw block, reason:%s\n", taos_errstr(pRes)); - goto END; - } +int32_t test_write_raw_block_with_fields(char* query, char* dst) { + TAOS_RES* pRes = taos_query(pConn, query); + ASSERT(taos_errno(pRes) == 0); + void* data = NULL; + int32_t numOfRows = 0; + int error_code = taos_fetch_raw_block(pRes, &numOfRows, &data); + ASSERT(error_code == 0); int numFields = taos_num_fields(pRes); - TAOS_FIELD *fields = taos_fetch_fields(pRes); - taos_write_raw_block_with_fields(pConn, numOfRows, data, "d2", fields, numFields); + TAOS_FIELD* fields = taos_fetch_fields(pRes); + error_code = taos_write_raw_block_with_fields(pConn, numOfRows, data, dst, fields, numFields); taos_free_result(pRes); + return error_code; +} - // check error msg - pRes = taos_query(pConn, "select * from ntba"); - if (taos_errno(pRes) != 0) { - printf("error select * from ntba, reason:%s\n", taos_errstr(pRes)); - goto END; - } - - data = NULL; - numOfRows = 0; - error_code = taos_fetch_raw_block(pRes, &numOfRows, &data); - if(error_code !=0 ){ - printf("error fetch select * from ntba, reason:%s\n", taos_errstr(pRes)); - goto END; - } - error_code = taos_write_raw_block(pConn, numOfRows, data, "ntbb"); - if(error_code == 0) { - printf(" taos_write_raw_block to ntbb expect failed , but success!\n"); - goto END; - } - - // pass NULL return last error code describe - const char* err = tmq_err2str(error_code); - printf("write_raw_block return code =0x%x err=%s\n", error_code, err); - if(strcmp(err, "success") == 0) { - printf("expect failed , but error string is success! err=%s\n", err); - goto END; - } - - // no exist table - error_code = taos_write_raw_block(pConn, numOfRows, data, "no-exist-table"); - if(error_code == 0) { - printf(" taos_write_raw_block to no-exist-table expect failed , but success!\n"); - goto END; - } - - err = tmq_err2str(error_code); - printf("write_raw_block no exist table return code =0x%x err=%s\n", error_code, err); - if(strcmp(err, "success") == 0) { - printf("expect failed write no exist table, but error string is success! err=%s\n", err); - goto END; - } - - // success - ret = 0; - -END: - // free - if(pRes) taos_free_result(pRes); - if(pConn) taos_close(pConn); - return ret; +void init_env() { + pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT(pConn); + + action("drop database if exists db_raw"); + action("create database if not exists db_raw vgroups 2"); + action("use db_raw"); + + action( + "CREATE STABLE `meters` (`ts` TIMESTAMP, `current` INT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, " + "`location` VARCHAR(16))"); + action("create table d0 using meters tags(1, 'San Francisco')"); + action("create table d1 using meters tags(2, 'San Francisco')"); + action("create table d2 using meters tags(3, 'San Francisco')"); + action("insert into d0 (ts, current) values (now, 120)"); + + action("create table ntba(ts timestamp, addr binary(32))"); + action("create table ntbb(ts timestamp, addr binary(8))"); + action("create table ntbc(ts timestamp, addr binary(8), c2 int)"); + + action("insert into ntba values(now,'123456789abcdefg123456789')"); + action("insert into ntbb values(now + 1s,'hello')"); + action("insert into ntbc values(now + 13s, 'sdf', 123)"); } int main(int argc, char* argv[]) { - printf("test write_raw_block...\n"); - int ret = init_env(); - if (ret < 0) { - printf("test write_raw_block failed.\n"); - return ret; - } - printf("test write_raw_block ok.\n"); + printf("test write_raw_block start.\n"); + init_env(); + ASSERT(test_write_raw_block("select * from d0", "d1") == 0); // test schema same + ASSERT(test_write_raw_block("select * from ntbb", "ntba") == 0); // test schema compatible + ASSERT(test_write_raw_block("select * from ntbb", "ntbc") == 0); // test schema small + ASSERT(test_write_raw_block("select * from ntbc", "ntbb") == 0); // test schema bigger + ASSERT(test_write_raw_block("select * from ntba", "ntbb") != 0); // test schema mismatch + ASSERT(test_write_raw_block("select * from ntba", "no-exist-table") != 0); // test no exist table + ASSERT(test_write_raw_block("select addr from ntba", "ntbb") != 0); // test without ts + ASSERT(test_write_raw_block_with_fields("select ts,phase from d0", "d2") == 0); // test with fields + + printf("test write_raw_block end.\n"); return 0; } \ No newline at end of file