diff --git a/CHANGELOG.md b/CHANGELOG.md index cf84414658344d..cfc3a375ec6932 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,13 +6,37 @@ **Merged pull requests:** +- Windows fixes \(chart labels and warnings\) [\#18796](https://github.com/netdata/netdata/pull/18796) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#18794](https://github.com/netdata/netdata/pull/18794) ([netdatabot](https://github.com/netdatabot)) +- Add ref to dyncfg [\#18793](https://github.com/netdata/netdata/pull/18793) ([Ancairon](https://github.com/Ancairon)) +- systemd-journal; support querying archived files [\#18792](https://github.com/netdata/netdata/pull/18792) ([ktsaou](https://github.com/ktsaou)) +- Ιmplementation to add logs integrations [\#18791](https://github.com/netdata/netdata/pull/18791) ([Ancairon](https://github.com/Ancairon)) +- Fix broken claiming via kickstart on some systems. [\#18789](https://github.com/netdata/netdata/pull/18789) ([Ferroin](https://github.com/Ferroin)) +- Fix atomic builtins test that currently fails for llvm+compiler\_rt when gcc is not present [\#18788](https://github.com/netdata/netdata/pull/18788) ([StormBytePP](https://github.com/StormBytePP)) +- fix\(netdata-updater.sh\): ensure `--non-interactive` flag is passed during self-update [\#18786](https://github.com/netdata/netdata/pull/18786) ([ilyam8](https://github.com/ilyam8)) +- Windows Network Interfaces Charts and Alerts [\#18785](https://github.com/netdata/netdata/pull/18785) ([ktsaou](https://github.com/ktsaou)) +- Document ML enabled `auto` [\#18784](https://github.com/netdata/netdata/pull/18784) ([stelfrag](https://github.com/stelfrag)) +- Bump github.com/redis/go-redis/v9 from 9.6.1 to 9.6.2 in /src/go [\#18783](https://github.com/netdata/netdata/pull/18783) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Update README.md, fix a typo [\#18781](https://github.com/netdata/netdata/pull/18781) ([BobConanDev](https://github.com/BobConanDev)) +- fix\(go.d/apcupsd\): fix ups\_load value divided by 100 [\#18780](https://github.com/netdata/netdata/pull/18780) ([ilyam8](https://github.com/ilyam8)) +- unify claiming response json [\#18777](https://github.com/netdata/netdata/pull/18777) ([ktsaou](https://github.com/ktsaou)) +- fix\(go.d/sd/netlisteners\): fix exec deadline exceeded check [\#18774](https://github.com/netdata/netdata/pull/18774) ([ilyam8](https://github.com/ilyam8)) +- Sqlite upgrade to version 3.46.1 [\#18772](https://github.com/netdata/netdata/pull/18772) ([stelfrag](https://github.com/stelfrag)) +- Regenerate integrations.js [\#18771](https://github.com/netdata/netdata/pull/18771) ([netdatabot](https://github.com/netdatabot)) +- Bump github.com/bmatcuk/doublestar/v4 from 4.6.1 to 4.7.1 in /src/go [\#18768](https://github.com/netdata/netdata/pull/18768) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/sijms/go-ora/v2 from 2.8.20 to 2.8.22 in /src/go [\#18767](https://github.com/netdata/netdata/pull/18767) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/vmware/govmomi from 0.43.0 to 0.44.1 in /src/go [\#18766](https://github.com/netdata/netdata/pull/18766) ([dependabot[bot]](https://github.com/apps/dependabot)) - SPAWN SERVER: close all open fds on callback [\#18764](https://github.com/netdata/netdata/pull/18764) ([ktsaou](https://github.com/ktsaou)) - Adjust option \(Windows claim\) [\#18763](https://github.com/netdata/netdata/pull/18763) ([thiagoftsm](https://github.com/thiagoftsm)) +- NetFramework \(Part I\) [\#18762](https://github.com/netdata/netdata/pull/18762) ([thiagoftsm](https://github.com/thiagoftsm)) +- Expand ml enabled option [\#18761](https://github.com/netdata/netdata/pull/18761) ([stelfrag](https://github.com/stelfrag)) +- Fix storing of repeat field [\#18760](https://github.com/netdata/netdata/pull/18760) ([stelfrag](https://github.com/stelfrag)) - local-listeners without libmnl [\#18759](https://github.com/netdata/netdata/pull/18759) ([ktsaou](https://github.com/ktsaou)) - fix\(proc.plugin/zfs\): fix arcstats.pm [\#18758](https://github.com/netdata/netdata/pull/18758) ([ilyam8](https://github.com/ilyam8)) - fix\(go.d/sd/net\_listeners\): exit if local-listeners constantly times out [\#18757](https://github.com/netdata/netdata/pull/18757) ([ilyam8](https://github.com/ilyam8)) - Regenerate integrations.js [\#18756](https://github.com/netdata/netdata/pull/18756) ([netdatabot](https://github.com/netdatabot)) - Update metadata.yaml [\#18755](https://github.com/netdata/netdata/pull/18755) ([Ancairon](https://github.com/Ancairon)) +- Remove the overview section from cloud notif. integrations [\#18754](https://github.com/netdata/netdata/pull/18754) ([Ancairon](https://github.com/Ancairon)) - Add Ubuntu 24.10 and Fedora 41 to CI. [\#18753](https://github.com/netdata/netdata/pull/18753) ([Ferroin](https://github.com/Ferroin)) - Simplify sentence on cloud notification integrations [\#18750](https://github.com/netdata/netdata/pull/18750) ([Ancairon](https://github.com/Ancairon)) - Regenerate integrations.js [\#18749](https://github.com/netdata/netdata/pull/18749) ([netdatabot](https://github.com/netdatabot)) @@ -54,6 +78,7 @@ - Update windows documentation [\#18703](https://github.com/netdata/netdata/pull/18703) ([Ancairon](https://github.com/Ancairon)) - Detect when swap is disabled when agent is running [\#18702](https://github.com/netdata/netdata/pull/18702) ([stelfrag](https://github.com/stelfrag)) - Bump golang.org/x/net from 0.29.0 to 0.30.0 in /src/go [\#18701](https://github.com/netdata/netdata/pull/18701) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Load chart labels on demand [\#18699](https://github.com/netdata/netdata/pull/18699) ([stelfrag](https://github.com/stelfrag)) - fix system-info disk space in LXC [\#18696](https://github.com/netdata/netdata/pull/18696) ([ilyam8](https://github.com/ilyam8)) - fix ram usage calculation in LXC [\#18695](https://github.com/netdata/netdata/pull/18695) ([ilyam8](https://github.com/ilyam8)) - cgroups.plugin: call `setresuid` before spawn server init [\#18694](https://github.com/netdata/netdata/pull/18694) ([ilyam8](https://github.com/ilyam8)) @@ -61,6 +86,7 @@ - go.d/nvidia\_smi: use configured "timeout" in loop mode [\#18692](https://github.com/netdata/netdata/pull/18692) ([ilyam8](https://github.com/ilyam8)) - fix\(cgroups.plugin\): handle containers no env vars [\#18691](https://github.com/netdata/netdata/pull/18691) ([daniel-sampliner](https://github.com/daniel-sampliner)) - MSSQL Metrics \(Part II\). [\#18689](https://github.com/netdata/netdata/pull/18689) ([thiagoftsm](https://github.com/thiagoftsm)) +- Log to windows [\#18688](https://github.com/netdata/netdata/pull/18688) ([ktsaou](https://github.com/ktsaou)) - fix sanitization issues [\#18687](https://github.com/netdata/netdata/pull/18687) ([ktsaou](https://github.com/ktsaou)) - Regenerate integrations.js [\#18686](https://github.com/netdata/netdata/pull/18686) ([netdatabot](https://github.com/netdatabot)) - go.d/chrony: collect serverstats using chronyc [\#18685](https://github.com/netdata/netdata/pull/18685) ([ilyam8](https://github.com/ilyam8)) @@ -277,7 +303,6 @@ - go.d boinc [\#18398](https://github.com/netdata/netdata/pull/18398) ([ilyam8](https://github.com/ilyam8)) - remove python.d/boinc [\#18397](https://github.com/netdata/netdata/pull/18397) ([ilyam8](https://github.com/ilyam8)) - fix warnings in Dockerfile [\#18395](https://github.com/netdata/netdata/pull/18395) ([NicolasCARPi](https://github.com/NicolasCARPi)) -- Use existing ACLK event loop for cloud queries [\#18218](https://github.com/netdata/netdata/pull/18218) ([stelfrag](https://github.com/stelfrag)) ## [v1.47.4](https://github.com/netdata/netdata/tree/v1.47.4) (2024-10-09) @@ -410,29 +435,6 @@ - Update Installer Code \(Services\) [\#18253](https://github.com/netdata/netdata/pull/18253) ([thiagoftsm](https://github.com/thiagoftsm)) - Don’t install netdata-updater code on Windows. [\#18251](https://github.com/netdata/netdata/pull/18251) ([Ferroin](https://github.com/Ferroin)) - Add trigger to clean up chart labels and charts [\#18248](https://github.com/netdata/netdata/pull/18248) ([stelfrag](https://github.com/stelfrag)) -- Update README.md [\#18246](https://github.com/netdata/netdata/pull/18246) ([Steve8291](https://github.com/Steve8291)) -- Bump github.com/tidwall/gjson from 1.17.1 to 1.17.3 in /src/go [\#18244](https://github.com/netdata/netdata/pull/18244) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Fix OS detection messages in CMake. [\#18243](https://github.com/netdata/netdata/pull/18243) ([Ferroin](https://github.com/Ferroin)) -- Clean up unneeded depencdencies. [\#18242](https://github.com/netdata/netdata/pull/18242) ([Ferroin](https://github.com/Ferroin)) -- Update node info payload [\#18240](https://github.com/netdata/netdata/pull/18240) ([stelfrag](https://github.com/stelfrag)) -- Bump github.com/prometheus-community/pro-bing from 0.4.0 to 0.4.1 in /src/go [\#18237](https://github.com/netdata/netdata/pull/18237) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/vmware/govmomi from 0.38.0 to 0.39.0 in /src/go [\#18236](https://github.com/netdata/netdata/pull/18236) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/gofrs/flock from 0.12.0 to 0.12.1 in /src/go [\#18235](https://github.com/netdata/netdata/pull/18235) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/docker/docker from 27.0.3+incompatible to 27.1.1+incompatible in /src/go [\#18234](https://github.com/netdata/netdata/pull/18234) ([dependabot[bot]](https://github.com/apps/dependabot)) -- eBPF memory [\#18232](https://github.com/netdata/netdata/pull/18232) ([thiagoftsm](https://github.com/thiagoftsm)) -- Windows build fix [\#18231](https://github.com/netdata/netdata/pull/18231) ([Ferroin](https://github.com/Ferroin)) -- Default to release with debug symbols for Windows builds. [\#18230](https://github.com/netdata/netdata/pull/18230) ([Ferroin](https://github.com/Ferroin)) -- Fix up CMake feature handling for Windows. [\#18229](https://github.com/netdata/netdata/pull/18229) ([Ferroin](https://github.com/Ferroin)) -- Improve windows agent [\#18227](https://github.com/netdata/netdata/pull/18227) ([thiagoftsm](https://github.com/thiagoftsm)) -- Update libbpf \(1.45.0\) [\#18226](https://github.com/netdata/netdata/pull/18226) ([thiagoftsm](https://github.com/thiagoftsm)) -- Regenerate integrations.js [\#18225](https://github.com/netdata/netdata/pull/18225) ([netdatabot](https://github.com/netdatabot)) -- Update Cloud MSTeam documentation [\#18224](https://github.com/netdata/netdata/pull/18224) ([car12o](https://github.com/car12o)) -- Add code signing for Windows executables. [\#18222](https://github.com/netdata/netdata/pull/18222) ([Ferroin](https://github.com/Ferroin)) -- go.d fix netdata dir path on win under msys [\#18221](https://github.com/netdata/netdata/pull/18221) ([ilyam8](https://github.com/ilyam8)) -- Port Tomcat collector to Go [\#18220](https://github.com/netdata/netdata/pull/18220) ([Ancairon](https://github.com/Ancairon)) -- go.d drop using cancelreader [\#18219](https://github.com/netdata/netdata/pull/18219) ([ilyam8](https://github.com/ilyam8)) -- Support IPV6 when establishing MQTT connection to the cloud [\#18217](https://github.com/netdata/netdata/pull/18217) ([stelfrag](https://github.com/stelfrag)) -- Regenerate integrations.js [\#18216](https://github.com/netdata/netdata/pull/18216) ([netdatabot](https://github.com/netdatabot)) ## [v1.46.3](https://github.com/netdata/netdata/tree/v1.46.3) (2024-07-23) diff --git a/CMakeLists.txt b/CMakeLists.txt index b2c8d21e1e6703..2b629395e60086 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -448,7 +448,8 @@ int main() { check_cxx_source_compiles(" int main() { - __atomic_load_8(nullptr, 0); + int test; + __atomic_load_n(&test, 0); return 0; } " HAVE_BUILTIN_ATOMICS) @@ -670,10 +671,10 @@ set(LIBNETDATA_FILES src/libnetdata/libnetdata.h src/libnetdata/locks/locks.c src/libnetdata/locks/locks.h - src/libnetdata/log/journal.c - src/libnetdata/log/journal.h - src/libnetdata/log/log.c - src/libnetdata/log/log.h + src/libnetdata/log/systemd-journal-helpers.c + src/libnetdata/log/systemd-journal-helpers.h + src/libnetdata/log/nd_log.c + src/libnetdata/log/nd_log.h src/libnetdata/os/os.c src/libnetdata/os/os.h src/libnetdata/simple_hashtable.h @@ -822,6 +823,25 @@ set(LIBNETDATA_FILES src/libnetdata/sanitizers/sanitizers-functions.h src/libnetdata/sanitizers/sanitizers-pluginsd.c src/libnetdata/sanitizers/sanitizers-pluginsd.h + src/libnetdata/log/nd_log-internals.c + src/libnetdata/log/nd_log-internals.h + src/libnetdata/log/nd_log_limit.c + src/libnetdata/log/nd_log_limit.h + src/libnetdata/log/nd_log-config.c + src/libnetdata/log/nd_log-init.c + src/libnetdata/log/nd_log-to-syslog.c + src/libnetdata/log/nd_log-to-systemd-journal.c + src/libnetdata/log/nd_log-annotators.c + src/libnetdata/log/nd_log-field-formatters.c + src/libnetdata/log/nd_log-format-logfmt.c + src/libnetdata/log/nd_log-format-json.c + src/libnetdata/log/nd_log-to-file.c + src/libnetdata/log/nd_log-to-windows-events.c + src/libnetdata/string/utf8.c + src/libnetdata/spawn_server/log-forwarder.c + src/libnetdata/spawn_server/log-forwarder.h + src/libnetdata/log/nd_log-common.h + src/libnetdata/log/nd_log-to-windows-common.h ) if(ENABLE_PLUGIN_EBPF) @@ -1439,12 +1459,13 @@ set(WINDOWS_EVENTS_PLUGIN_FILES src/collectors/windows-events.plugin/windows-events-unicode.h src/collectors/windows-events.plugin/windows-events-xml.c src/collectors/windows-events.plugin/windows-events-xml.h - src/collectors/windows-events.plugin/windows-events-publishers.c - src/collectors/windows-events.plugin/windows-events-publishers.h + src/collectors/windows-events.plugin/windows-events-providers.c + src/collectors/windows-events.plugin/windows-events-providers.h src/collectors/windows-events.plugin/windows-events-fields-cache.c src/collectors/windows-events.plugin/windows-events-fields-cache.h src/collectors/windows-events.plugin/windows-events-query-builder.c src/collectors/windows-events.plugin/windows-events-query-builder.h + src/collectors/windows-events.plugin/windows-events-query-evt-variant.c ) set(WINDOWS_PLUGIN_FILES @@ -1461,6 +1482,7 @@ set(WINDOWS_PLUGIN_FILES src/collectors/windows.plugin/perflib-thermalzone.c src/collectors/windows.plugin/perflib-objects.c src/collectors/windows.plugin/perflib-network.c + src/collectors/windows.plugin/perflib-netframework.c src/collectors/windows.plugin/perflib-memory.c src/collectors/windows.plugin/perflib-processes.c src/collectors/windows.plugin/perflib-web-service.c @@ -1745,10 +1767,90 @@ target_include_directories(libnetdata BEFORE PUBLIC ${CONFIG_H_DIR} ${CMAKE_SOUR target_link_libraries(libnetdata PUBLIC "$<$>:atomic>" "$<$,$>:pthread;rt>" - "$<$:kernel32;advapi32;winmm;rpcrt4;bcrypt>" + "$<$:kernel32;advapi32;winmm;rpcrt4;bcrypt;wevtapi>" "$<$:m>" "${SYSTEMD_LDFLAGS}") +if(OS_WINDOWS) + set(HAVE_ETW True) + set(HAVE_WEL True) + + # Output the results for debugging purposes + message(STATUS "Have Event Tracing for Windows (ETW): ${HAVE_ETW}") + message(STATUS "Have Windows Event Log (WEL): ${HAVE_WEL}") + + if(HAVE_WEL OR HAVE_ETW) + # Define the source and generated file paths + set(WEVT_GEN_SRC_H_FILE "${CMAKE_SOURCE_DIR}/src/libnetdata/log/nd_log-to-windows-common.h") + set(WEVT_GEN_SRC_C_FILE "${CMAKE_SOURCE_DIR}/src/libnetdata/log/wevt_netdata_mc_generate.c") + set(WEVT_GEN_BIN_FILE "${CMAKE_BINARY_DIR}/wevt_netdata_mc_generate") + + set(WEVT_BUILD_SCRIPT "${CMAKE_SOURCE_DIR}/src/libnetdata/log/wevt_netdata_compile.sh") + + set(WEVT_MC_FILE "${CMAKE_BINARY_DIR}/wevt_netdata.mc") + set(WEVT_MAN_FILE "${CMAKE_BINARY_DIR}/wevt_netdata_manifest.xml") + set(WEVT_RC_FILE "${CMAKE_BINARY_DIR}/wevt_netdata.rc") + set(WEVT_MC_H_FILE "${CMAKE_BINARY_DIR}/wevt_netdata.h") + set(WEVT_MAN_H_FILE "${CMAKE_BINARY_DIR}/wevt_netdata_manifest.h") + set(WEVT_RES_OBJECT "${CMAKE_BINARY_DIR}/wevt_netdata_res.o") + + set(WEVT_DLL_FILE "${CMAKE_BINARY_DIR}/wevt_netdata.dll") + set(WEVT_ETW_INSTALL_SCRIPT "${CMAKE_SOURCE_DIR}/src/libnetdata/log/wevt_netdata_install.bat") + + # we compile ${WEVT_GEN_BIN_FILE}, which generates the manifest, the .mc, + # and the headers required for compiling libnetdata/logs + + if(HAVE_ETW) + # ETW method also supports WEL + # but it requires Microsoft tools mc, rc, and link + add_custom_command( + OUTPUT "${WEVT_MC_H_FILE}" "${WEVT_MAN_H_FILE}" "${WEVT_DLL_FILE}" + COMMAND "${CMAKE_C_COMPILER}" -o "${WEVT_GEN_BIN_FILE}" "${WEVT_GEN_SRC_C_FILE}" + COMMAND "${WEVT_GEN_BIN_FILE}" >"${WEVT_MC_FILE}" + COMMAND "${WEVT_GEN_BIN_FILE}" --manifest >"${WEVT_MAN_FILE}" + COMMAND "${WEVT_BUILD_SCRIPT}" "${CMAKE_SOURCE_DIR}/src/libnetdata/log" "${CMAKE_BINARY_DIR}" + DEPENDS "${WEVT_GEN_SRC_C_FILE}" "${WEVT_GEN_SRC_H_FILE}" + COMMENT "Compiling ${WEVT_MC_FILE} to generate ${WEVT_MC_H_FILE} and ${WEVT_DLL_FILE}" + ) + else() + # WEL method can be built with windmc, windres and the normal linker + add_custom_command( + OUTPUT "${WEVT_MC_H_FILE}" "${WEVT_DLL_FILE}" + COMMAND "${CMAKE_C_COMPILER}" -o "${WEVT_GEN_BIN_FILE}" "${WEVT_GEN_SRC_C_FILE}" + COMMAND "${WEVT_GEN_BIN_FILE}" >"${WEVT_MC_FILE}" + COMMAND "${WEVT_GEN_BIN_FILE}" --manifest >"${WEVT_MAN_FILE}" + COMMAND windmc -r "${CMAKE_BINARY_DIR}" -h "${CMAKE_BINARY_DIR}" ${WEVT_MC_FILE} + COMMAND echo "1 2004" "wevt_netdata_manifest.xml" >> "${WEVT_RC_FILE}" + COMMAND windres ${WEVT_RC_FILE} -o ${WEVT_RES_OBJECT} + COMMAND ${CMAKE_LINKER} -dll --entry 0 -nostdlib -o ${WEVT_DLL_FILE} ${WEVT_RES_OBJECT} + DEPENDS "${WEVT_GEN_SRC_C_FILE}" "${WEVT_GEN_SRC_H_FILE}" + COMMENT "Compiling ${WEVT_MC_FILE} to generate ${WEVT_MC_H_FILE} and ${WEVT_DLL_FILE}" + ) + endif() + + # Create a custom target for the DLL + add_custom_target(wevt_netdata ALL DEPENDS ${WEVT_DLL_FILE}) + + set_source_files_properties(src/libnetdata/log/nd_log-to-windows-events.c PROPERTIES + OBJECT_DEPENDS "${WEVT_MC_H_FILE}") + + if(HAVE_ETW) + set_source_files_properties(src/libnetdata/log/nd_log-to-windows-events.c PROPERTIES + OBJECT_DEPENDS "${WEVT_MAN_H_FILE}") + + install(FILES "${WEVT_DLL_FILE}" "${WEVT_MAN_FILE}" "${WEVT_ETW_INSTALL_SCRIPT}" + COMPONENT wevt_netdata_dll + DESTINATION "${BINDIR}") + else() + # do not install the manifest in this case + # the nsi installer will skip registering the ETW publisher + install(FILES "${WEVT_DLL_FILE}" + COMPONENT wevt_netdata_dll + DESTINATION "${BINDIR}") + endif() + endif() +endif() + # ebpf if(ENABLE_PLUGIN_EBPF) netdata_add_libbpf_to_target(libnetdata) @@ -1924,8 +2026,7 @@ if(ENABLE_PLUGIN_APPS) add_executable(apps.plugin ${APPS_PLUGIN_FILES}) target_link_libraries(apps.plugin libnetdata ${CAP_LIBRARIES} - "$<$:Version>" - "$<$:ntdll>") + "$<$:Version;ntdll>") target_include_directories(apps.plugin PRIVATE ${CAP_INCLUDE_DIRS}) target_compile_options(apps.plugin PRIVATE ${CAP_CFLAGS_OTHER}) diff --git a/README.md b/README.md index d3d9ef4fc70b44..fea7040c34bf8e 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,7 @@ Netdata scales effortlessly from a single server to thousands, even in complex, Netdata Agents can be linked to together (in parent-child relationships), to build observability centralization points within your infrastructure, allowing you to control data replication and retention at multiple levels. - :fire: **Fully Automated Powerful Visualization**
- Using the NIDL (Nodes, Instances, Dimensions & Labels) data model, the Netdata Agent enables the creation of fully automated dashboards, providing corellated visualization of all metrics, allowing you to understand any dataset at first sight, but also to filter, slice and dice the data directly on the dashboards, without the need to learn a query language. + Using the NIDL (Nodes, Instances, Dimensions & Labels) data model, the Netdata Agent enables the creation of fully automated dashboards, providing correlated visualization of all metrics, allowing you to understand any dataset at first sight, but also to filter, slice and dice the data directly on the dashboards, without the need to learn a query language. Note: the Netdata UI is closed-source, but free to use with Netdata Agents and Netdata Cloud. diff --git a/docs/netdata-agent/configuration/README.md b/docs/netdata-agent/configuration/README.md index 0685329c9e05ae..abe5113139aeff 100644 --- a/docs/netdata-agent/configuration/README.md +++ b/docs/netdata-agent/configuration/README.md @@ -1,5 +1,9 @@ # Netdata Agent Configuration +> **Info** +> +> Netdata Cloud lets you configure Agents on the fly. Check the [Dynamic Configuration Manager](/docs/netdata-agent/configuration/dynamic-configuration.md) documentation for details. + The main Netdata Agent configuration is `netdata.conf`. ## The Netdata config directory diff --git a/docs/netdata-agent/configuration/dynamic-configuration.md b/docs/netdata-agent/configuration/dynamic-configuration.md index 4434a2982cc996..006de102b34a2e 100644 --- a/docs/netdata-agent/configuration/dynamic-configuration.md +++ b/docs/netdata-agent/configuration/dynamic-configuration.md @@ -1,6 +1,8 @@ # Dynamic Configuration Manager -**Netdata Cloud paid subscription required.** +> **Info** +> +> Netdata Cloud paid subscription is required. The Dynamic Configuration Manager allows direct configuration of collectors and alerts through the Netdata UI. This feature allows users to: diff --git a/integrations/cloud-notifications/integrations/amazon_sns.md b/integrations/cloud-notifications/integrations/amazon_sns.md index be37c34aa93adb..94feda0d865173 100644 --- a/integrations/cloud-notifications/integrations/amazon_sns.md +++ b/integrations/cloud-notifications/integrations/amazon_sns.md @@ -9,13 +9,8 @@ endmeta--> # Amazon SNS - - -You can configure notification delivery to AWS SNS from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/discord.md b/integrations/cloud-notifications/integrations/discord.md index 2177aeb99d21df..03d446278ddc68 100644 --- a/integrations/cloud-notifications/integrations/discord.md +++ b/integrations/cloud-notifications/integrations/discord.md @@ -9,13 +9,8 @@ endmeta--> # Discord - - -You can configure notification delivery to your Discord server from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/ilert.md b/integrations/cloud-notifications/integrations/ilert.md index 68bc2005bfc651..31243d6cc81fd8 100644 --- a/integrations/cloud-notifications/integrations/ilert.md +++ b/integrations/cloud-notifications/integrations/ilert.md @@ -9,13 +9,8 @@ endmeta--> # ilert - - -You can configure notification delivery to ilert from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/mattermost.md b/integrations/cloud-notifications/integrations/mattermost.md index 977e6cfeb49203..5af1058d352e03 100644 --- a/integrations/cloud-notifications/integrations/mattermost.md +++ b/integrations/cloud-notifications/integrations/mattermost.md @@ -9,13 +9,8 @@ endmeta--> # Mattermost - - -You can configure notification delivery to Mattermost from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/microsoft_teams.md b/integrations/cloud-notifications/integrations/microsoft_teams.md index 7f877bb4788dcd..169a00ab3d82a8 100644 --- a/integrations/cloud-notifications/integrations/microsoft_teams.md +++ b/integrations/cloud-notifications/integrations/microsoft_teams.md @@ -9,13 +9,8 @@ endmeta--> # Microsoft Teams - - -You can configure notification delivery to a Microsoft Teams channel from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/netdata_mobile_app.md b/integrations/cloud-notifications/integrations/netdata_mobile_app.md index 16f594b95bb0e7..e8285bf0984c65 100644 --- a/integrations/cloud-notifications/integrations/netdata_mobile_app.md +++ b/integrations/cloud-notifications/integrations/netdata_mobile_app.md @@ -9,13 +9,8 @@ endmeta--> # Netdata Mobile App - - -You can configure notification delivery to the Netdata Mobile Application from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/opsgenie.md b/integrations/cloud-notifications/integrations/opsgenie.md index e74b0a5ff21a67..1587fd0b697215 100644 --- a/integrations/cloud-notifications/integrations/opsgenie.md +++ b/integrations/cloud-notifications/integrations/opsgenie.md @@ -9,13 +9,8 @@ endmeta--> # Opsgenie - - -You can configure notification delivery to Opsgenie from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/pagerduty.md b/integrations/cloud-notifications/integrations/pagerduty.md index e32b1baec88eb2..b39a97c93b9cb7 100644 --- a/integrations/cloud-notifications/integrations/pagerduty.md +++ b/integrations/cloud-notifications/integrations/pagerduty.md @@ -9,13 +9,8 @@ endmeta--> # PagerDuty - - -You can configure notification delivery to PagerDuty from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/rocketchat.md b/integrations/cloud-notifications/integrations/rocketchat.md index c8805ca3396489..710cd16fa1a509 100644 --- a/integrations/cloud-notifications/integrations/rocketchat.md +++ b/integrations/cloud-notifications/integrations/rocketchat.md @@ -9,13 +9,8 @@ endmeta--> # RocketChat - - -You can configure notification delivery to RocketChat from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/slack.md b/integrations/cloud-notifications/integrations/slack.md index 2f5b13397a6d07..e86f5b2160ae84 100644 --- a/integrations/cloud-notifications/integrations/slack.md +++ b/integrations/cloud-notifications/integrations/slack.md @@ -9,13 +9,8 @@ endmeta--> # Slack - - -You can configure notification delivery to Slack from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/splunk.md b/integrations/cloud-notifications/integrations/splunk.md index ebe8968c6d80f4..d7f8b17d6395e2 100644 --- a/integrations/cloud-notifications/integrations/splunk.md +++ b/integrations/cloud-notifications/integrations/splunk.md @@ -9,13 +9,8 @@ endmeta--> # Splunk - - -You can configure notification delivery to Splunk from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/splunk_victorops.md b/integrations/cloud-notifications/integrations/splunk_victorops.md index 06532c5e4ef86f..7bf881f4e48f85 100644 --- a/integrations/cloud-notifications/integrations/splunk_victorops.md +++ b/integrations/cloud-notifications/integrations/splunk_victorops.md @@ -9,13 +9,8 @@ endmeta--> # Splunk VictorOps - - -You can configure notification delivery to Splunk On-Call/VictorOps from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/telegram.md b/integrations/cloud-notifications/integrations/telegram.md index 938dd468ee82d6..93867e474a72bd 100644 --- a/integrations/cloud-notifications/integrations/telegram.md +++ b/integrations/cloud-notifications/integrations/telegram.md @@ -9,13 +9,8 @@ endmeta--> # Telegram - - -You can configure notification delivery to Telegram from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/integrations/webhook.md b/integrations/cloud-notifications/integrations/webhook.md index b2edcb356aec98..4d94c2c4ee31f4 100644 --- a/integrations/cloud-notifications/integrations/webhook.md +++ b/integrations/cloud-notifications/integrations/webhook.md @@ -9,13 +9,8 @@ endmeta--> # Webhook - - -You can configure notification delivery to a webhook using a predefined schema from the Netdata Cloud UI. - - ## Setup diff --git a/integrations/cloud-notifications/metadata.yaml b/integrations/cloud-notifications/metadata.yaml index 5589945c399516..586a169981b534 100644 --- a/integrations/cloud-notifications/metadata.yaml +++ b/integrations/cloud-notifications/metadata.yaml @@ -11,9 +11,6 @@ - mobile-app - phone - personal-notifications - overview: - notification_description: "You can configure notification delivery to the Netdata Mobile Application from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -44,9 +41,6 @@ keywords: - discord - community - overview: - notification_description: "You can configure notification delivery to your Discord server from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -85,9 +79,6 @@ icon_filename: "pagerduty.png" keywords: - pagerduty - overview: - notification_description: "You can configure notification delivery to PagerDuty from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -127,9 +118,6 @@ icon_filename: "slack.png" keywords: - slack - overview: - notification_description: "You can configure notification delivery to Slack from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -175,9 +163,6 @@ keywords: - opsgenie - atlassian - overview: - notification_description: "You can configure notification delivery to Opsgenie from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -215,9 +200,6 @@ icon_filename: "mattermost.png" keywords: - mattermost - overview: - notification_description: "You can configure notification delivery to Mattermost from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -260,9 +242,6 @@ icon_filename: "rocketchat.png" keywords: - rocketchat - overview: - notification_description: "You can configure notification delivery to RocketChat from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -306,9 +285,6 @@ icon_filename: "awssns.png" keywords: - awssns - overview: - notification_description: "You can configure notification delivery to AWS SNS from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -351,9 +327,6 @@ keywords: - microsoft - teams - overview: - notification_description: "You can configure notification delivery to a Microsoft Teams channel from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -396,9 +369,6 @@ icon_filename: "telegram.svg" keywords: - Telegram - overview: - notification_description: "You can configure notification delivery to Telegram from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -441,9 +411,6 @@ icon_filename: "splunk-black.svg" keywords: - Splunk - overview: - notification_description: "You can configure notification delivery to Splunk from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -479,9 +446,6 @@ - VictorOps - Splunk - On-Call - overview: - notification_description: "You can configure notification delivery to Splunk On-Call/VictorOps from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -515,9 +479,6 @@ keywords: - generic webhooks - webhooks - overview: - notification_description: "You can configure notification delivery to a webhook using a predefined schema from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites @@ -765,9 +726,6 @@ icon_filename: "ilert.svg" keywords: - ilert - overview: - notification_description: "You can configure notification delivery to ilert from the Netdata Cloud UI." - notification_limitations: "" setup: description: | ### Prerequisites diff --git a/integrations/gen_docs_integrations.py b/integrations/gen_docs_integrations.py index 51a59ed48aa505..73d3b32b930dc8 100644 --- a/integrations/gen_docs_integrations.py +++ b/integrations/gen_docs_integrations.py @@ -1,7 +1,7 @@ import json +import re import shutil from pathlib import Path -import re # Dictionary responsible for making the symbolic links at the end of the script's run. symlink_dict = {} @@ -25,10 +25,14 @@ def cleanup(): for element in Path("integrations/cloud-notifications").glob('**/*/'): if "integrations" in str(element) and not "metadata.yaml" in str(element): shutil.rmtree(element) + for element in Path("integrations/logs").glob('**/*/'): + if "integrations" in str(element) and "metadata.yaml" not in str(element): + shutil.rmtree(element) for element in Path("integrations/cloud-authentication").glob('**/*/'): if "integrations" in str(element) and not "metadata.yaml" in str(element): shutil.rmtree(element) + def generate_category_from_name(category_fragment, category_array): """ Takes a category ID in splitted form ("." as delimiter) and the array of the categories, and returns the proper category name that Learn expects. @@ -46,7 +50,7 @@ def generate_category_from_name(category_fragment, category_array): try: # print("equals") # print(fragment, category_fragment[i+1]) - dummy_id = dummy_id + "." + category_fragment[i+1] + dummy_id = dummy_id + "." + category_fragment[i + 1] # print(dummy_id) except IndexError: return category_name.split("/", 1)[1] @@ -75,14 +79,18 @@ def add_custom_edit_url(markdown_string, meta_yaml_link, sidebar_label_string, m """ output = "" + path_to_md_file = "" if mode == 'default': path_to_md_file = f'{meta_yaml_link.replace("/metadata.yaml", "")}/integrations/{clean_string(sidebar_label_string)}' - elif mode == 'cloud-notifications': + elif mode == 'cloud-notification': path_to_md_file = meta_yaml_link.replace("metadata.yaml", f'integrations/{clean_string(sidebar_label_string)}') - elif mode == 'agent-notifications': + elif mode == 'agent-notification': + path_to_md_file = meta_yaml_link.replace("metadata.yaml", "README") + + elif mode == 'logs': path_to_md_file = meta_yaml_link.replace("metadata.yaml", "README") elif mode == 'cloud-authentication': @@ -122,23 +130,29 @@ def read_integrations_js(path_to_file): print("Exception", e) -def create_overview(integration, filename): +def create_overview(integration, filename, overview_key_name="overview"): + # empty overview_key_name to have only image on overview + if not overview_key_name: + return f"""# {integration['meta']['name']} - split = re.split(r'(#.*\n)', integration['overview'], 1) + +""" + + split = re.split(r'(#.*\n)', integration[overview_key_name], 1) first_overview_part = split[1] rest_overview_part = split[2] - if len(filename) > 0: - return f"""{first_overview_part} + if not filename: + return f"""{first_overview_part}{rest_overview_part} +""" + + return f"""{first_overview_part} {rest_overview_part} """ - else: - return f"""{first_overview_part}{rest_overview_part} -""" def build_readme_from_integration(integration, mode=''): @@ -150,7 +164,8 @@ def build_readme_from_integration(integration, mode=''): meta_yaml = integration['edit_link'].replace("blob", "edit") sidebar_label = integration['meta']['monitored_instance']['name'] learn_rel_path = generate_category_from_name( - integration['meta']['monitored_instance']['categories'][0].split("."), categories).replace("Data Collection", "Collecting Metrics") + integration['meta']['monitored_instance']['categories'][0].split("."), categories).replace( + "Data Collection", "Collecting Metrics") most_popular = integration['meta']['most_popular'] # build the markdown string @@ -221,7 +236,7 @@ def build_readme_from_integration(integration, mode=''): print("Exception in exporter md construction", e, integration['id']) # NOTIFICATIONS - elif mode == 'notification': + elif mode == 'agent-notification': try: # initiate the variables for the notification method meta_yaml = integration['edit_link'].replace("blob", "edit") @@ -238,7 +253,7 @@ def build_readme_from_integration(integration, mode=''): message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE NOTIFICATION'S metadata.yaml FILE" endmeta--> -{create_overview(integration, integration['meta']['icon_filename'])}""" +{create_overview(integration, integration['meta']['icon_filename'], "overview")}""" if integration['setup']: md += f""" @@ -252,7 +267,67 @@ def build_readme_from_integration(integration, mode=''): except Exception as e: print("Exception in notification md construction", e, integration['id']) - + + elif mode == 'cloud-notification': + try: + # initiate the variables for the notification method + meta_yaml = integration['edit_link'].replace("blob", "edit") + sidebar_label = integration['meta']['name'] + learn_rel_path = generate_category_from_name(integration['meta']['categories'][0].split("."), categories) + + # build the markdown string + md = \ + f""" + +{create_overview(integration, integration['meta']['icon_filename'], "")}""" + + if integration['setup']: + md += f""" +{integration['setup']} +""" + + if integration['troubleshooting']: + md += f""" +{integration['troubleshooting']} +""" + + except Exception as e: + print("Exception in notification md construction", e, integration['id']) + + elif mode == 'logs': + try: + # initiate the variables for the logs integration + meta_yaml = integration['edit_link'].replace("blob", "edit") + sidebar_label = integration['meta']['name'] + learn_rel_path = generate_category_from_name(integration['meta']['categories'][0].split("."), categories) + + # build the markdown string + md = \ + f""" + +{create_overview(integration, integration['meta']['icon_filename'])}""" + + if integration['setup']: + md += f""" +{integration['setup']} +""" + + except Exception as e: + print("Exception in logs md construction", e, integration['id']) + + # AUTHENTICATIONS elif mode == 'authentication': if True: @@ -339,27 +414,35 @@ def write_to_file(path, md, meta_yaml, sidebar_label, community, mode='default') except KeyError: # We don't need to print something here. pass - elif mode == 'notification': + elif mode == 'cloud-notification': - if "cloud-notifications" in path: - # for cloud notifications we generate them near their metadata.yaml - name = clean_string(integration['meta']['name']) + # for cloud notifications we generate them near their metadata.yaml + name = clean_string(integration['meta']['name']) - if not Path(f'{path}/integrations').exists(): - Path(f'{path}/integrations').mkdir() + if not Path(f'{path}/integrations').exists(): + Path(f'{path}/integrations').mkdir() - # proper_edit_name = meta_yaml.replace( - # "metadata.yaml", f'integrations/{clean_string(sidebar_label)}.md\"') + # proper_edit_name = meta_yaml.replace( + # "metadata.yaml", f'integrations/{clean_string(sidebar_label)}.md\"') - md = add_custom_edit_url(md, meta_yaml, sidebar_label, mode='cloud-notifications') + md = add_custom_edit_url(md, meta_yaml, sidebar_label, mode='cloud-notification') - finalpath = f'{path}/integrations/{name}.md' - else: - # add custom_edit_url as the md file, so we can have uniqueness in the ingest script - # afterwards the ingest will replace this metadata with meta_yaml - md = add_custom_edit_url(md, meta_yaml, sidebar_label, mode='agent-notifications') + finalpath = f'{path}/integrations/{name}.md' - finalpath = f'{path}/README.md' + try: + clean_and_write( + md, + Path(finalpath) + ) + except FileNotFoundError as e: + print("Exception in writing to file", e) + elif mode == 'agent-notification': + # add custom_edit_url as the md file, so we can have uniqueness in the ingest script + # afterwards the ingest will replace this metadata with meta_yaml + + md = add_custom_edit_url(md, meta_yaml, sidebar_label, mode='agent-notification') + + finalpath = f'{path}/README.md' try: clean_and_write( @@ -369,7 +452,28 @@ def write_to_file(path, md, meta_yaml, sidebar_label, community, mode='default') except FileNotFoundError as e: print("Exception in writing to file", e) + elif mode == 'logs': + + name = clean_string(integration['meta']['name']) + if not Path(f'{path}/integrations').exists(): + Path(f'{path}/integrations').mkdir() + + # proper_edit_name = meta_yaml.replace( + # "metadata.yaml", f'integrations/{clean_string(sidebar_label)}.md\"') + + md = add_custom_edit_url(md, meta_yaml, sidebar_label, mode='logs') + + finalpath = f'{path}/integrations/{name}.md' + + try: + clean_and_write( + md, + Path(finalpath) + ) + + except FileNotFoundError as e: + print("Exception in writing to file", e) elif mode == 'authentication': name = clean_string(integration['meta']['name']) @@ -383,7 +487,7 @@ def write_to_file(path, md, meta_yaml, sidebar_label, community, mode='default') md = add_custom_edit_url(md, meta_yaml, sidebar_label, mode='cloud-authentication') finalpath = f'{path}/integrations/{name}.md' - + try: clean_and_write( md, @@ -422,7 +526,6 @@ def make_symlinks(symlink_dict): categories, integrations = read_integrations_js('integrations/integrations.js') - # Iterate through every integration for integration in integrations: @@ -442,20 +545,32 @@ def make_symlinks(symlink_dict): path = build_path(meta_yaml) write_to_file(path, md, meta_yaml, sidebar_label, community) - # kind of specific if clause, so we can avoid running excessive code in the go repo - elif integration['integration_type'] == "notification": + elif integration['integration_type'] == "agent_notification": + + meta_yaml, sidebar_label, learn_rel_path, md, community = build_readme_from_integration( + integration, mode='agent-notification') + path = build_path(meta_yaml) + write_to_file(path, md, meta_yaml, sidebar_label, community, mode='agent-notification') + + elif integration['integration_type'] == "cloud_notification": meta_yaml, sidebar_label, learn_rel_path, md, community = build_readme_from_integration( - integration, mode='notification') + integration, mode='cloud-notification') path = build_path(meta_yaml) - write_to_file(path, md, meta_yaml, sidebar_label, community, mode='notification') + write_to_file(path, md, meta_yaml, sidebar_label, community, mode='cloud-notification') + + elif integration['integration_type'] == "logs": + + meta_yaml, sidebar_label, learn_rel_path, md, community = build_readme_from_integration( + integration, mode='logs') + path = build_path(meta_yaml) + write_to_file(path, md, meta_yaml, sidebar_label, community, mode='logs') elif integration['integration_type'] == "authentication": meta_yaml, sidebar_label, learn_rel_path, md, community = build_readme_from_integration( integration, mode='authentication') path = build_path(meta_yaml) - write_to_file(path, md, meta_yaml, sidebar_label, community, mode='authentication') - + write_to_file(path, md, meta_yaml, sidebar_label, community, mode='authentication') make_symlinks(symlink_dict) diff --git a/integrations/gen_integrations.py b/integrations/gen_integrations.py index b4516eebef24b2..771c6627fe97c4 100755 --- a/integrations/gen_integrations.py +++ b/integrations/gen_integrations.py @@ -4,7 +4,6 @@ import os import re import sys - from copy import deepcopy from pathlib import Path @@ -40,11 +39,18 @@ (AGENT_REPO, REPO_PATH / 'src' / 'exporting', True), ] -NOTIFICATION_SOURCES = [ +AGENT_NOTIFICATION_SOURCES = [ (AGENT_REPO, REPO_PATH / 'src' / 'health' / 'notifications', True), +] + +CLOUD_NOTIFICATION_SOURCES = [ (AGENT_REPO, INTEGRATIONS_PATH / 'cloud-notifications' / 'metadata.yaml', False), ] +LOGS_SOURCES = [ + (AGENT_REPO, INTEGRATIONS_PATH / 'logs' / 'metadata.yaml', False), +] + AUTHENTICATION_SOURCES = [ (AGENT_REPO, INTEGRATIONS_PATH / 'cloud-authentication' / 'metadata.yaml', False), ] @@ -64,12 +70,22 @@ 'troubleshooting', ] -NOTIFICATION_RENDER_KEYS = [ +AGENT_NOTIFICATION_RENDER_KEYS = [ 'overview', 'setup', 'troubleshooting', ] +CLOUD_NOTIFICATION_RENDER_KEYS = [ + 'setup', + 'troubleshooting', +] + +LOGS_RENDER_KEYS = [ + 'overview', + 'setup', +] + AUTHENTICATION_RENDER_KEYS = [ 'overview', 'setup', @@ -85,18 +101,18 @@ def debug(msg): if GITHUB_ACTIONS: - print(f':debug:{ msg }') + print(f':debug:{msg}') elif DEBUG: - print(f'>>> { msg }') + print(f'>>> {msg}') else: pass def warn(msg, path): if GITHUB_ACTIONS: - print(f':warning file={ path }:{ msg }') + print(f':warning file={path}:{msg}') else: - print(f'!!! WARNING:{ path }:{ msg }') + print(f'!!! WARNING:{path}:{msg}') def retrieve_from_filesystem(uri): @@ -122,8 +138,18 @@ def retrieve_from_filesystem(uri): registry=registry, ) -NOTIFICATION_VALIDATOR = Draft7Validator( - {'$ref': './notification.json#'}, +AGENT_NOTIFICATION_VALIDATOR = Draft7Validator( + {'$ref': './agent_notification.json#'}, + registry=registry, +) + +CLOUD_NOTIFICATION_VALIDATOR = Draft7Validator( + {'$ref': './cloud_notification.json#'}, + registry=registry, +) + +LOGS_VALIDATOR = Draft7Validator( + {'$ref': './logs.json#'}, registry=registry, ) @@ -209,19 +235,19 @@ def load_yaml(src): yaml = YAML(typ='safe') if not src.is_file(): - warn(f'{ src } is not a file.', src) + warn(f'{src} is not a file.', src) return False try: contents = src.read_text() except (IOError, OSError): - warn(f'Failed to read { src }.', src) + warn(f'Failed to read {src}.', src) return False try: data = yaml.load(contents) except YAMLError: - warn(f'Failed to parse { src } as YAML.', src) + warn(f'Failed to parse {src} as YAML.', src) return False return data @@ -236,7 +262,7 @@ def load_categories(): try: CATEGORY_VALIDATOR.validate(categories) except ValidationError: - warn(f'Failed to validate { CATEGORIES_FILE } against the schema.', CATEGORIES_FILE) + warn(f'Failed to validate {CATEGORIES_FILE} against the schema.', CATEGORIES_FILE) sys.exit(1) return categories @@ -248,7 +274,7 @@ def load_collectors(): entries = get_collector_metadata_entries() for repo, path in entries: - debug(f'Loading { path }.') + debug(f'Loading {path}.') data = load_yaml(path) if not data: @@ -257,7 +283,7 @@ def load_collectors(): try: COLLECTOR_VALIDATOR.validate(data) except ValidationError: - warn(f'Failed to validate { path } against the schema.', path) + warn(f'Failed to validate {path} against the schema.', path) continue for idx, item in enumerate(data['modules']): @@ -273,7 +299,7 @@ def load_collectors(): def _load_deploy_file(file, repo): ret = [] - debug(f'Loading { file }.') + debug(f'Loading {file}.') data = load_yaml(file) if not data: @@ -282,7 +308,7 @@ def _load_deploy_file(file, repo): try: DEPLOY_VALIDATOR.validate(data) except ValidationError: - warn(f'Failed to validate { file } against the schema.', file) + warn(f'Failed to validate {file} against the schema.', file) return [] for idx, item in enumerate(data): @@ -309,7 +335,7 @@ def load_deploy(): def _load_exporter_file(file, repo): - debug(f'Loading { file }.') + debug(f'Loading {file}.') data = load_yaml(file) if not data: @@ -318,7 +344,7 @@ def _load_exporter_file(file, repo): try: EXPORTER_VALIDATOR.validate(data) except ValidationError: - warn(f'Failed to validate { file } against the schema.', file) + warn(f'Failed to validate {file} against the schema.', file) return [] if 'id' in data: @@ -354,21 +380,113 @@ def load_exporters(): return ret -def _load_notification_file(file, repo): - debug(f'Loading { file }.') +def _load_agent_notification_file(file, repo): + debug(f'Loading {file}.') + data = load_yaml(file) + + if not data: + return [] + + try: + AGENT_NOTIFICATION_VALIDATOR.validate(data) + except ValidationError: + warn(f'Failed to validate {file} against the schema.', file) + return [] + + if 'id' in data: + data['integration_type'] = 'agent_notification' + data['_src_path'] = file + data['_repo'] = repo + data['_index'] = 0 + + return [data] + else: + ret = [] + + for idx, item in enumerate(data): + item['integration_type'] = 'agent_notification' + item['_src_path'] = file + item['_repo'] = repo + item['_index'] = idx + ret.append(item) + + return ret + + +def load_agent_notifications(): + ret = [] + + for repo, path, match in AGENT_NOTIFICATION_SOURCES: + if match and path.exists() and path.is_dir(): + for file in path.glob(METADATA_PATTERN): + ret.extend(_load_agent_notification_file(file, repo)) + elif not match and path.exists() and path.is_file(): + ret.extend(_load_agent_notification_file(path, repo)) + + return ret + + +def _load_cloud_notification_file(file, repo): + debug(f'Loading {file}.') + data = load_yaml(file) + + if not data: + return [] + + try: + CLOUD_NOTIFICATION_VALIDATOR.validate(data) + except ValidationError: + warn(f'Failed to validate {file} against the schema.', file) + return [] + + if 'id' in data: + data['integration_type'] = 'cloud_notification' + data['_src_path'] = file + data['_repo'] = repo + data['_index'] = 0 + + return [data] + else: + ret = [] + + for idx, item in enumerate(data): + item['integration_type'] = 'cloud_notification' + item['_src_path'] = file + item['_repo'] = repo + item['_index'] = idx + ret.append(item) + + return ret + + +def load_cloud_notifications(): + ret = [] + + for repo, path, match in CLOUD_NOTIFICATION_SOURCES: + if match and path.exists() and path.is_dir(): + for file in path.glob(METADATA_PATTERN): + ret.extend(_load_cloud_notification_file(file, repo)) + elif not match and path.exists() and path.is_file(): + ret.extend(_load_cloud_notification_file(path, repo)) + + return ret + + +def _load_logs_file(file, repo): + debug(f'Loading {file}.') data = load_yaml(file) if not data: return [] try: - NOTIFICATION_VALIDATOR.validate(data) + LOGS_VALIDATOR.validate(data) except ValidationError: - warn(f'Failed to validate { file } against the schema.', file) + warn(f'Failed to validate {file} against the schema.', file) return [] if 'id' in data: - data['integration_type'] = 'notification' + data['integration_type'] = 'logs' data['_src_path'] = file data['_repo'] = repo data['_index'] = 0 @@ -378,7 +496,7 @@ def _load_notification_file(file, repo): ret = [] for idx, item in enumerate(data): - item['integration_type'] = 'notification' + item['integration_type'] = 'logs' item['_src_path'] = file item['_repo'] = repo item['_index'] = idx @@ -387,20 +505,21 @@ def _load_notification_file(file, repo): return ret -def load_notifications(): +def load_logs(): ret = [] - for repo, path, match in NOTIFICATION_SOURCES: + for repo, path, match in LOGS_SOURCES: if match and path.exists() and path.is_dir(): for file in path.glob(METADATA_PATTERN): - ret.extend(_load_notification_file(file, repo)) + ret.extend(_load_logs_file(file, repo)) elif not match and path.exists() and path.is_file(): - ret.extend(_load_notification_file(path, repo)) + ret.extend(_load_logs_file(path, repo)) return ret + def _load_authentication_file(file, repo): - debug(f'Loading { file }.') + debug(f'Loading {file}.') data = load_yaml(file) if not data: @@ -409,7 +528,7 @@ def _load_authentication_file(file, repo): try: AUTHENTICATION_VALIDATOR.validate(data) except ValidationError: - warn(f'Failed to validate { file } against the schema.', file) + warn(f'Failed to validate {file} against the schema.', file) return [] if 'id' in data: @@ -453,13 +572,13 @@ def make_id(meta): else: instance_name = '000_unknown' - return f'{ meta["plugin_name"] }-{ meta["module_name"] }-{ instance_name }' + return f'{meta["plugin_name"]}-{meta["module_name"]}-{instance_name}' def make_edit_link(item): item_path = item['_src_path'].relative_to(REPO_PATH) - return f'https://github.com/{ item["_repo"] }/blob/master/{ item_path }' + return f'https://github.com/{item["_repo"]}/blob/master/{item_path}' def sort_integrations(integrations): @@ -474,7 +593,9 @@ def dedupe_integrations(integrations, ids): for i in integrations: if ids.get(i['id'], False): first_path, first_index = ids[i['id']] - warn(f'Duplicate integration ID found at { i["_src_path"] } index { i["_index"] } (original definition at { first_path } index { first_index }), ignoring that integration.', i['_src_path']) + warn( + f'Duplicate integration ID found at {i["_src_path"]} index {i["_index"]} (original definition at {first_path} index {first_index}), ignoring that integration.', + i['_src_path']) else: tmp_integrations.append(i) ids[i['id']] = (i['_src_path'], i['_index']) @@ -504,7 +625,7 @@ def render_collectors(categories, collectors, ids): idmap = {i['id']: i for i in collectors} for item in collectors: - debug(f'Processing { item["id"] }.') + debug(f'Processing {item["id"]}.') item['edit_link'] = make_edit_link(item) @@ -516,7 +637,7 @@ def render_collectors(categories, collectors, ids): res_id = make_id(res) if res_id not in idmap.keys(): - warn(f'Could not find related integration { res_id }, ignoring it.', item['_src_path']) + warn(f'Could not find related integration {res_id}, ignoring it.', item['_src_path']) continue related.append({ @@ -532,17 +653,19 @@ def render_collectors(categories, collectors, ids): actual_cats = item_cats & valid_cats if bogus_cats: - warn(f'Ignoring invalid categories: { ", ".join(bogus_cats) }', item["_src_path"]) + warn(f'Ignoring invalid categories: {", ".join(bogus_cats)}', item["_src_path"]) if not item_cats: item['meta']['monitored_instance']['categories'] = list(default_cats) - warn(f'{ item["id"] } does not list any caregories, adding it to: { default_cats }', item["_src_path"]) + warn(f'{item["id"]} does not list any caregories, adding it to: {default_cats}', item["_src_path"]) else: - item['meta']['monitored_instance']['categories'] = [x for x in item['meta']['monitored_instance']['categories'] if x in list(actual_cats)] + item['meta']['monitored_instance']['categories'] = [x for x in + item['meta']['monitored_instance']['categories'] if + x in list(actual_cats)] for scope in item['metrics']['scopes']: if scope['name'] == 'global': - scope['name'] = f'{ item["meta"]["monitored_instance"]["name"] } instance' + scope['name'] = f'{item["meta"]["monitored_instance"]["name"]} instance' for cfg_example in item['setup']['configuration']['examples']['list']: if 'folding' not in cfg_example: @@ -552,7 +675,7 @@ def render_collectors(categories, collectors, ids): for key in COLLECTOR_RENDER_KEYS: if key in item.keys(): - template = get_jinja_env().get_template(f'{ key }.md') + template = get_jinja_env().get_template(f'{key}.md') data = template.render(entry=item, related=related, clean=False) clean_data = template.render(entry=item, related=related, clean=True) @@ -589,7 +712,7 @@ def render_deploy(distros, categories, deploy, ids): template = get_jinja_env().get_template('platform_info.md') for item in deploy: - debug(f'Processing { item["id"] }.') + debug(f'Processing {item["id"]}.') item['edit_link'] = make_edit_link(item) clean_item = deepcopy(item) @@ -646,7 +769,7 @@ def render_exporters(categories, exporters, ids): for key in EXPORTER_RENDER_KEYS: if key in item.keys(): - template = get_jinja_env().get_template(f'{ key }.md') + template = get_jinja_env().get_template(f'{key}.md') data = template.render(entry=item, clean=False) clean_data = template.render(entry=item, clean=True) @@ -670,7 +793,7 @@ def render_exporters(categories, exporters, ids): return exporters, clean_exporters, ids -def render_notifications(categories, notifications, ids): +def render_agent_notifications(categories, notifications, ids): debug('Sorting notifications.') sort_integrations(notifications) @@ -686,10 +809,11 @@ def render_notifications(categories, notifications, ids): clean_item = deepcopy(item) - for key in NOTIFICATION_RENDER_KEYS: + for key in AGENT_NOTIFICATION_RENDER_KEYS: if key in item.keys(): - template = get_jinja_env().get_template(f'{ key }.md') + template = get_jinja_env().get_template(f'{key}.md') data = template.render(entry=item, clean=False) + clean_data = template.render(entry=item, clean=True) if 'variables' in item['meta']: @@ -712,6 +836,90 @@ def render_notifications(categories, notifications, ids): return notifications, clean_notifications, ids +def render_cloud_notifications(categories, notifications, ids): + debug('Sorting notifications.') + + sort_integrations(notifications) + + debug('Checking notification ids.') + + notifications, ids = dedupe_integrations(notifications, ids) + + clean_notifications = [] + + for item in notifications: + item['edit_link'] = make_edit_link(item) + + clean_item = deepcopy(item) + + for key in CLOUD_NOTIFICATION_RENDER_KEYS: + if key in item.keys(): + template = get_jinja_env().get_template(f'{key}.md') + data = template.render(entry=item, clean=False) + clean_data = template.render(entry=item, clean=True) + + if 'variables' in item['meta']: + template = get_jinja_env().from_string(data) + data = template.render(variables=item['meta']['variables'], clean=False) + template = get_jinja_env().from_string(clean_data) + clean_data = template.render(variables=item['meta']['variables'], clean=True) + else: + data = '' + clean_data = '' + + item[key] = data + clean_item[key] = clean_data + + for k in ['_src_path', '_repo', '_index']: + del item[k], clean_item[k] + + clean_notifications.append(clean_item) + + return notifications, clean_notifications, ids + + +def render_logs(categories, logs, ids): + debug('Sorting logs.') + + sort_integrations(logs) + + debug('Checking log ids.') + + logs, ids = dedupe_integrations(logs, ids) + + clean_logs = [] + + for item in logs: + item['edit_link'] = make_edit_link(item) + + clean_item = deepcopy(item) + + for key in LOGS_RENDER_KEYS: + if key in item.keys(): + template = get_jinja_env().get_template(f'{key}.md') + data = template.render(entry=item, clean=False) + clean_data = template.render(entry=item, clean=True) + + if 'variables' in item['meta']: + template = get_jinja_env().from_string(data) + data = template.render(variables=item['meta']['variables'], clean=False) + template = get_jinja_env().from_string(clean_data) + clean_data = template.render(variables=item['meta']['variables'], clean=True) + else: + data = '' + clean_data = '' + + item[key] = data + clean_item[key] = clean_data + + for k in ['_src_path', '_repo', '_index']: + del item[k], clean_item[k] + + clean_logs.append(clean_item) + + return logs, clean_logs, ids + + def render_authentications(categories, authentications, ids): debug('Sorting authentications.') @@ -729,9 +937,9 @@ def render_authentications(categories, authentications, ids): clean_item = deepcopy(item) for key in AUTHENTICATION_RENDER_KEYS: - + if key in item.keys(): - template = get_jinja_env().get_template(f'{ key }.md') + template = get_jinja_env().get_template(f'{key}.md') data = template.render(entry=item, clean=False) clean_data = template.render(entry=item, clean=True) @@ -746,7 +954,7 @@ def render_authentications(categories, authentications, ids): item[key] = data clean_item[key] = clean_data - + for k in ['_src_path', '_repo', '_index']: del item[k], clean_item[k] @@ -777,20 +985,23 @@ def main(): collectors = load_collectors() deploy = load_deploy() exporters = load_exporters() - notifications = load_notifications() + agent_notifications = load_agent_notifications() + cloud_notifications = load_cloud_notifications() + logs = load_logs() authentications = load_authentications() collectors, clean_collectors, ids = render_collectors(categories, collectors, dict()) deploy, clean_deploy, ids = render_deploy(distros, categories, deploy, ids) exporters, clean_exporters, ids = render_exporters(categories, exporters, ids) - notifications, clean_notifications, ids = render_notifications(categories, notifications, ids) + agent_notifications, clean_agent_notifications, ids = render_agent_notifications(categories, agent_notifications,ids) + cloud_notifications, clean_cloud_notifications, ids = render_cloud_notifications(categories, cloud_notifications,ids) + logs, clean_logs, ids = render_logs(categories, logs,ids) authentications, clean_authentications, ids = render_authentications(categories, authentications, ids) - - integrations = collectors + deploy + exporters + notifications + authentications + integrations = collectors + deploy + exporters + agent_notifications + cloud_notifications + logs + authentications render_integrations(categories, integrations) - clean_integrations = clean_collectors + clean_deploy + clean_exporters + clean_notifications + clean_authentications + clean_integrations = clean_collectors + clean_deploy + clean_exporters + clean_agent_notifications + clean_cloud_notifications + clean_logs + clean_authentications render_json(categories, clean_integrations) diff --git a/integrations/integrations.js b/integrations/integrations.js index 64425aa9597cc8..28f1f32273f2c1 100644 --- a/integrations/integrations.js +++ b/integrations/integrations.js @@ -20111,7 +20111,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 40 | Core | x86_64, aarch64 | |\n| 39 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 41 | Core | x86_64, aarch64 | |\n| 40 | Core | x86_64, aarch64 | |\n| 39 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20602,7 +20602,7 @@ export const integrations = [ ], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 24.04 | Core | amd64, armhf, arm64 | |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 24.04 | Core | amd64, armhf, arm64 | |\n| 24.10 | Core | amd64, armhf, arm64 | |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -21589,7 +21589,7 @@ export const integrations = [ "overview": "# Alerta\n\nThe [Alerta](https://alerta.io/) monitoring system is a tool used to consolidate and de-duplicate alerts from multiple sources for quick \u2018at-a-glance\u2019 visualization. With just one system you can monitor alerts from many other monitoring tools on a single screen.\nYou can send Netdata alerts to Alerta to see alerts coming from many Netdata hosts or also from a multi-host Netdata configuration.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working Alerta instance\n- An Alerta API key (if authentication in Alerta is enabled)\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ALERTA | Set `SEND_ALERTA` to YES | | yes |\n| ALERTA_WEBHOOK_URL | set `ALERTA_WEBHOOK_URL` to the API url you defined when you installed the Alerta server. | | yes |\n| ALERTA_API_KEY | Set `ALERTA_API_KEY` to your API key. | | yes |\n| DEFAULT_RECIPIENT_ALERTA | Set `DEFAULT_RECIPIENT_ALERTA` to the default recipient environment you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n| DEFAULT_RECIPIENT_CUSTOM | Set different recipient environments per role, by editing `DEFAULT_RECIPIENT_CUSTOM` with the environment name of your choice | | no |\n\n##### ALERTA_API_KEY\n\nYou will need an API key to send messages from any source, if Alerta is configured to use authentication (recommended). To create a new API key:\n1. Go to Configuration > API Keys.\n2. Create a new API key called \"netdata\" with `write:alerts` permission.\n\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_alerta[sysadmin]=\"Systems\"\nrole_recipients_alerta[domainadmin]=\"Domains\"\nrole_recipients_alerta[dba]=\"Databases Systems\"\nrole_recipients_alerta[webmaster]=\"Marketing Development\"\nrole_recipients_alerta[proxyadmin]=\"Proxy\"\nrole_recipients_alerta[sitemgr]=\"Sites\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# alerta (alerta.io) global notification options\n\nSEND_ALERTA=\"YES\"\nALERTA_WEBHOOK_URL=\"http://yourserver/alerta/api\"\nALERTA_API_KEY=\"INSERT_YOUR_API_KEY_HERE\"\nDEFAULT_RECIPIENT_ALERTA=\"Production\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/alerta/metadata.yaml" }, { @@ -21608,283 +21608,9 @@ export const integrations = [ "overview": "# AWS SNS\n\nAs part of its AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' (SNS). Amazon SNS works similarly to Netdata's own notification system, allowing to dispatch a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to:\n- Email addresses\n- Mobile Phones via SMS\n- HTTP or HTTPS web hooks\n- AWS Lambda functions\n- AWS SQS queues\n- Mobile applications via push notifications\nYou can send notifications through Amazon SNS using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- While Amazon SNS supports sending differently formatted messages for different delivery methods, Netdata does not currently support this functionality.\n- For email notification support, we recommend using Netdata's email notifications, as it is has the following benefits:\n - In most cases, it requires less configuration.\n - Netdata's emails are nicely pre-formatted and support features like threading, which requires a lot of manual effort in SNS.\n - It is less resource intensive and more cost-efficient than SNS.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The [Amazon Web Services CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (awscli).\n- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. The setup depends on the distribution, but `/var/lib/netdata` is the recommended directory. If you are using Netdata as a dedicated user, the permissions will already be correct.\n- An Amazon SNS topic to send notifications to with one or more subscribers. The Getting Started section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.\n- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. For an additional layer of security, you can create one for each system or group of systems.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| aws path | The full path of the aws command. If empty, the system `$PATH` will be searched for it. If not found, Amazon SNS notifications will be silently disabled. | | yes |\n| SEND_AWSNS | Set `SEND_AWSNS` to YES | YES | yes |\n| AWSSNS_MESSAGE_FORMAT | Set `AWSSNS_MESSAGE_FORMAT` to to the string that you want the alert to be sent into. | ${status} on ${host} at ${date}: ${chart} ${value_string} | yes |\n| DEFAULT_RECIPIENT_AWSSNS | Set `DEFAULT_RECIPIENT_AWSSNS` to the Topic ARN you noted down upon creating the Topic. | | yes |\n\n##### AWSSNS_MESSAGE_FORMAT\n\nThe supported variables are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n##### DEFAULT_RECIPIENT_AWSSNS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_awssns[sysadmin]=\"arn:aws:sns:us-east-2:123456789012:Systems\"\nrole_recipients_awssns[domainadmin]=\"arn:aws:sns:us-east-2:123456789012:Domains\"\nrole_recipients_awssns[dba]=\"arn:aws:sns:us-east-2:123456789012:Databases\"\nrole_recipients_awssns[webmaster]=\"arn:aws:sns:us-east-2:123456789012:Development\"\nrole_recipients_awssns[proxyadmin]=\"arn:aws:sns:us-east-2:123456789012:Proxy\"\nrole_recipients_awssns[sitemgr]=\"arn:aws:sns:us-east-2:123456789012:Sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\nAn example working configuration would be:\n\n```yaml\n```text\n#------------------------------------------------------------------------------\n# Amazon SNS notifications\n\nSEND_AWSSNS=\"YES\"\nAWSSNS_MESSAGE_FORMAT=\"${status} on ${host} at ${date}: ${chart} ${value_string}\"\nDEFAULT_RECIPIENT_AWSSNS=\"arn:aws:sns:us-east-2:123456789012:MyTopic\"\n```\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/awssns/metadata.yaml" }, - { - "id": "notify-cloud-awssns", - "meta": { - "name": "Amazon SNS", - "link": "https://aws.amazon.com/sns/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "awssns.png" - }, - "keywords": [ - "awssns" - ], - "overview": "# Amazon SNS\n\nYou can configure notification delivery to AWS SNS from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- An AWS account with AWS SNS access\n\n### AWS SNS Configuration\n\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, select the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n3. Copy the **Topic ARN** in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the AWS SNS Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Topic ARN: The topic provided on AWS SNS (with region) for where to publish your notifications.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-discord", - "meta": { - "name": "Discord", - "link": "https://discord.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "discord.png" - }, - "keywords": [ - "discord", - "community" - ], - "overview": "# Discord\n\nYou can configure notification delivery to your Discord server from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n\n### Discord Server Configuration\n\n1. Go to **Server Settings** --> **Integrations**\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Keep note of the **Webhook URL** as you will need it for the configuration of the integration on the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Discord Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: The URL you copied from the previous section\n - Channel Parameters: Select the channel type which the notifications will be sent to, if it is a Forum channel, you need to specify a thread name\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-ilert", - "meta": { - "name": "ilert", - "link": "https://www.ilert.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "ilert.svg" - }, - "keywords": [ - "ilert" - ], - "overview": "# ilert\n\nYou can configure notification delivery to ilert from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on ilert to add new Alert sources.\n\n### ilert Configuration\n\n1. From the navigation bar, open the Alert sources drop down and click \"Alert sources\"\n2. Click on the \"+ Create a new alert source\" button\n3. Configure an Alert source:\n - Select \"API integration\" and click Next\n - Provide a name that suits the source's purpose, for example \"Netdata\"\n - Select Escalation policy\n - Select Alert grouping (optional)\n4. Obtain the API Key:\n - Once the Alert source is created, you will be provided with an API key. Copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the ilert Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Alert Source API key: The key you copied in the ilert configuration step.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-mattermost", - "meta": { - "name": "Mattermost", - "link": "https://mattermost.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "mattermost.png" - }, - "keywords": [ - "mattermost" - ], - "overview": "# Mattermost\n\nYou can configure notification delivery to Mattermost from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n\n### Mattermost Server Configuration\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don't have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook.\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook URL that looks like `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Mattermost Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on Mattermost for the channel you want to receive your notifications\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-microsoftteams", - "meta": { - "name": "Microsoft Teams", - "link": "https://www.microsoft.com/en-us/microsoft-teams", - "categories": [ - "notify.cloud" - ], - "icon_filename": "teams.svg" - }, - "keywords": [ - "microsoft", - "teams" - ], - "overview": "# Microsoft Teams\n\nYou can configure notification delivery to a Microsoft Teams channel from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature\n\n### Microsoft Teams Configuration\n\n1. Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots icon that appears\n2. Select \"Workflows\" from the options, then choose \"Post to a channel when a webhook request is received\"\n3. **Configure Workflow Details**\n - Give your workflow a name, such as \"Netdata Alerts\"\n - Select the target team and channel where you will receive notifications\n - Click \"Add workflow\"\n4. Once the workflow is created, you will receive a unique Workflow Webhook URL, copy it, in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Microsoft Teams Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Microsoft Teams Incoming Webhook URL: The Incoming Webhook URL that you copied earlier.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-mobile-app", - "meta": { - "name": "Netdata Mobile App", - "link": "https://netdata.cloud", - "categories": [ - "notify.cloud" - ], - "icon_filename": "netdata.png" - }, - "keywords": [ - "mobile-app", - "phone", - "personal-notifications" - ], - "overview": "# Netdata Mobile App\n\nYou can configure notification delivery to the Netdata Mobile Application from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- You need to have the Netdata Mobile App installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration and device linking\n\nIn order to login to the Netdata Mobile App\n\n1. Download the Netdata Mobile App from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose your Sign-in option\n - Email Address: Enter the email address of your registered Netdata Cloud account and click on the verification link received by email on your mobile device.\n - Sign-in with QR Code: Scan the QR code from the Netdata Cloud UI under **Profile Picture** --> **Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code**\n\n### Netdata Configuration\n\nAfter linking your device, enable the toggle for **Mobile App Notifications** under the same settings panel.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-opsgenie", - "meta": { - "name": "Opsgenie", - "link": "https://www.atlassian.com/software/opsgenie", - "categories": [ - "notify.cloud" - ], - "icon_filename": "opsgenie.png" - }, - "keywords": [ - "opsgenie", - "atlassian" - ], - "overview": "# Opsgenie\n\nYou can configure notification delivery to Opsgenie from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\n1. Go to the integrations tab of your team, click **Add integration**\n2. Pick **API** from the available integrations and copy the API Key in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Opsgenie Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - API Key: The key provided on Opsgenie for the channel you want to receive your notifications\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-pagerduty", - "meta": { - "name": "PagerDuty", - "link": "https://www.pagerduty.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "pagerduty.png" - }, - "keywords": [ - "pagerduty" - ], - "overview": "# PagerDuty\n\nYou can configure notification delivery to PagerDuty from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a PagerDuty service to receive events using webhooks.\n\n### PagerDuty Server Configuration\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. On the third step of the service creation, select `Events API V2` Integration\n3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** in order to add them to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Integration Key: A 32 character key provided by PagerDuty to receive events on your service.\n - Integration URL (Alert Events): The URL provided by PagerDuty where Netdata Cloud will send notifications.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-rocketchat", - "meta": { - "name": "RocketChat", - "link": "https://www.rocket.chat/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "rocketchat.png" - }, - "keywords": [ - "rocketchat" - ], - "overview": "# RocketChat\n\nYou can configure notification delivery to RocketChat from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on RocketChat to add new integrations.\n\n### RocketChat Server Configuration\n\nSteps to configure your RocketChat server to receive notifications from Netdata Cloud:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations\n2. Click **+New** at the top right corner\n3. For more details about each parameter, check [Create a new incoming webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook)\n4. You will end up with a webhook endpoint that looks like `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on RocketChat for the channel you want to receive your notifications\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-slack", - "meta": { - "name": "Slack", - "link": "https://slack.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "slack.png" - }, - "keywords": [ - "slack" - ], - "overview": "# Slack\n\nYou can configure notification delivery to Slack from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\n1. Create an app to receive webhook integrations. Check the [Slack documentation](https://api.slack.com/apps?new_app=1) for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - Specify the channel where you want your notifications to be delivered\n - Once completed, copy the Webhook URL in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Slack Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on Slack for the channel you want to receive your notifications\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-splunk", - "meta": { - "name": "Splunk", - "link": "https://splunk.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "splunk-black.svg" - }, - "keywords": [ - "Splunk" - ], - "overview": "# Splunk\n\nYou can configure notification delivery to Splunk from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions on how to set it up.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - HTTP Event Collector URI: The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token: The token that Splunk provided to you when you created the HTTP Event Collector\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-telegram", - "meta": { - "name": "Telegram", - "link": "https://telegram.org/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "telegram.svg" - }, - "keywords": [ - "Telegram" - ], - "overview": "# Telegram\n\nYou can configure notification delivery to Telegram from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Telegram bot token, chat ID and optionally the topic ID\n\n### Telegram Configuration\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n- To get the topic ID, the easiest way is this: Post a message to that topic, then right-click on it and select `Copy Message Link`. Paste it on a scratchpad and notice that it has the following structure `https://t.me/c/XXXXXXXXXX/YY/ZZ`. The topic ID is `YY` (integer).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Telegram Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Bot Token: The token of your bot\n - Chat ID: The chat id where your bot will deliver messages to\n - Topic ID: The identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-victorops", - "meta": { - "name": "Splunk VictorOps", - "link": "https://www.splunk.com/en_us/about-splunk/acquisitions/splunk-on-call.html", - "categories": [ - "notify.cloud" - ], - "icon_filename": "victorops.svg" - }, - "keywords": [ - "VictorOps", - "Splunk", - "On-Call" - ], - "overview": "# Splunk VictorOps\n\nYou can configure notification delivery to Splunk On-Call/VictorOps from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk VictorOps Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Destination URL - The URL provided by VictorOps of your REST endpoint.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-webhook", - "meta": { - "name": "Webhook", - "link": "https://en.wikipedia.org/wiki/Webhook", - "categories": [ - "notify.cloud" - ], - "icon_filename": "webhook.svg" - }, - "keywords": [ - "generic webhooks", - "webhooks" - ], - "overview": "# Webhook\n\nYou can configure notification delivery to a webhook using a predefined schema from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Webhook integration\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: The url of the service that Netdata will send notifications to. In order to keep the communication secured, Netdata only accepts HTTPS urls.\n - Extra headers: Optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism, Netdata webhook integration supports 3 different authentication mechanisms.\n - Mutual TLS (recommended): Default authentication mechanism used if no other method is selected\n - Basic: The client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**.\n - Bearer: The client sends a request with an Authorization header that includes a **bearer token**.\n\n### Webhook service\n\nA webhook service allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL.\n\nIn this section, we'll go over the steps to set up a generic webhook service, including adding headers, and implementing different types of authorization mechanisms.\n\n#### Netdata webhook integration\n\nNetdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected.\n\nFor alert notifications, the content sent to the destination service contains a JSON object with the following properties:\n\n| field | type | description |\n|:----------------------------------|:--------------------------------|:--------------------------------------------------------------------------|\n| message | string | A summary message of the alert. |\n| alert | string | The alert the notification is related to. |\n| info | string | Additional info related with the alert. |\n| chart | string | The chart associated with the alert. |\n| context | string | The chart context. |\n| space | string | The space where the node that raised the alert is assigned. |\n| Rooms | object\\[object(string,string)\\] | Object with list of Rooms names and urls where the node belongs to. |\n| family | string | Context family. |\n| class | string | Classification of the alert, e.g. `Error`. |\n| severity | string | Alert severity, can be one of `warning`, `critical` or `clear`. |\n| date | string | Date of the alert in ISO8601 format. |\n| duration | string | Duration the alert has been raised. |\n| additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n| additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n| alert_url | string | Netdata Cloud URL for this alert. |\n\nFor reachability notifications, the JSON object will contain the following properties:\n\n| field | type | description |\n|:-----------------|:--------|:------------------------------------------------------------------------------------------------------------------------------|\n| message | string | A summary message of the reachability alert. |\n| url | string | Netdata Cloud URL for the host experiencing the reachability alert. |\n| host | string | The hostname experiencing the reachability alert. |\n| severity | string | Severity for this notification. If host is reachable, severity will be `info`, if host is unreachable, it will be `critical`. |\n| status | object | An object with the status information. |\n| status.reachable | boolean | `true` if host is reachable, `false` otherwise |\n| status.text | string | Can be `reachable` or `unreachable` |\n\n#### Extra headers\n\nWhen setting up a webhook service, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\nBy default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:------------:|------------------|\n | Content-Type | application/json |\n\n#### Authentication mechanisms\n\nNetdata webhook integration supports 3 different authentication mechanisms:\n\n##### Mutual TLS authentication (recommended)\n\nIn mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\nThis is the default authentication mechanism used if no other method is selected.\n\nTo take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\nThe steps to perform this validation are as follows:\n\n- Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
\n Netdata CA certificate\n\n ```text\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n\n
\n\n- Enable client certificate validation on the web server that is doing the TLS termination. Below there are examples on how to perform this configuration in `NGINX` and `Apache`.\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n##### Basic authentication\n\nIn basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n##### Bearer token authentication\n\nIn bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n##### Challenge secret\n\nTo validate that you have ownership of the web application that will receive the webhook events, Netdata is using a challenge response check mechanism.\n\nThis mechanism works as follows:\n\n- The challenge secret parameter that you provide is a shared secret between only you and Netdata.\n- On your request for creating a new Webhook integration, Netdata will make a GET request to the URL of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n- You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n- Netdata will compare your application's response with the hash that it will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\nNetdata does this validation every time you update your integration configuration.\n\n- Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n**Example response token generation in Python:**\n\nHere you can see how to define a handler for a Flask application in python 3:\n\n```python\nimport base64\nimport hashlib\nimport hmac\nimport json\n\nkey ='YOUR_CHALLENGE_SECRET'\n\n@app.route('/webhooks/netdata')\ndef webhook_challenge():\ntoken = request.args.get('crc_token').encode('ascii')\n\n# creates HMAC SHA-256 hash from incomming token and your consumer secret\nsha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n# construct response data with base64 encoded hash\nresponse = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n}\n\n# returns properly formatted json response\nreturn json.dumps(response)\n```\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, { "id": "notify-custom", "meta": { @@ -21901,7 +21627,7 @@ export const integrations = [ "overview": "# Custom\n\nNetdata Agent's alert notification feature allows you to send custom notifications to any endpoint you choose.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_CUSTOM | Set `SEND_CUSTOM` to YES | YES | yes |\n| DEFAULT_RECIPIENT_CUSTOM | This value is dependent on how you handle the `${to}` variable inside the `custom_sender()` function. | | yes |\n| custom_sender() | You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the function in this configuration file. | | no |\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nAll roles will default to this variable if left unconfigured. You can edit `DEFAULT_RECIPIENT_CUSTOM` with the variable you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_custom[sysadmin]=\"systems\"\nrole_recipients_custom[domainadmin]=\"domains\"\nrole_recipients_custom[dba]=\"databases systems\"\nrole_recipients_custom[webmaster]=\"marketing development\"\nrole_recipients_custom[proxyadmin]=\"proxy-admin\"\nrole_recipients_custom[sitemgr]=\"sites\"\n```\n\n\n##### custom_sender()\n\nThe following is a sample custom_sender() function in health_alarm_notify.conf, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:\n```\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n```\n\nThe supported variables that you can use for the function's `msg` variable are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# custom notifications\n\nSEND_CUSTOM=\"YES\"\nDEFAULT_RECIPIENT_CUSTOM=\"\"\n\n# The custom_sender() is a custom function to do whatever you need to do\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/custom/metadata.yaml" }, { @@ -21920,7 +21646,7 @@ export const integrations = [ "overview": "# Discord\n\nSend notifications to Discord using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more Discord channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DISCORD | Set `SEND_DISCORD` to YES | YES | yes |\n| DISCORD_WEBHOOK_URL | set `DISCORD_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_DISCORD | Set `DEFAULT_RECIPIENT_DISCORD` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_DISCORD\n\nAll roles will default to this variable if left unconfigured.\nYou can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_discord[sysadmin]=\"systems\"\nrole_recipients_discord[domainadmin]=\"domains\"\nrole_recipients_discord[dba]=\"databases systems\"\nrole_recipients_discord[webmaster]=\"marketing development\"\nrole_recipients_discord[proxyadmin]=\"proxy-admin\"\nrole_recipients_discord[sitemgr]=\"sites\"\n```\n\nThe values you provide should already exist as Discord channels in your server.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# discord (discordapp.com) global notification options\n\nSEND_DISCORD=\"YES\"\nDISCORD_WEBHOOK_URL=\"https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_DISCORD=\"alerts\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/discord/metadata.yaml" }, { @@ -21939,7 +21665,7 @@ export const integrations = [ "overview": "# Dynatrace\n\nDynatrace allows you to receive notifications using their Events REST API. See the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event) about POSTing an event in the Events API for more details.\nYou can send notifications to Dynatrace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts. The Dynatrace server should be with protocol prefixed (http:// or https://), for example: https://monitor.example.com.\n- An API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API. See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.\n- An API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n- A Server Tag. To generate one on your Dynatrace Server, go to Settings --> Tags --> Manually applied tags and create the Tag. The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DYNATRACE | Set `SEND_DYNATRACE` to YES | YES | yes |\n| DYNATRACE_SERVER | Set `DYNATRACE_SERVER` to the Dynatrace server with the protocol prefix, for example `https://monitor.example.com`. | | yes |\n| DYNATRACE_TOKEN | Set `DYNATRACE_TOKEN` to your Dynatrace API authentication token | | yes |\n| DYNATRACE_SPACE | Set `DYNATRACE_SPACE` to the API Space, it is the URL part of the page you have access in order to generate the API Token. | | yes |\n| DYNATRACE_TAG_VALUE | Set `DYNATRACE_TAG_VALUE` to your Dynatrace Server Tag. | | yes |\n| DYNATRACE_ANNOTATION_TYPE | `DYNATRACE_ANNOTATION_TYPE` can be left to its default value Netdata Alarm, but you can change it to better fit your needs. | Netdata Alarm | no |\n| DYNATRACE_EVENT | Set `DYNATRACE_EVENT` to the Dynatrace eventType you want. | Netdata Alarm | no |\n\n##### DYNATRACE_SPACE\n\nFor example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n\n\n##### DYNATRACE_EVENT\n\n`AVAILABILITY_EVENT`, `CUSTOM_ALERT`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, `CUSTOM_DEPLOYMENT`, `CUSTOM_INFO`, `ERROR_EVENT`,\n`MARKED_FOR_TERMINATION`, `PERFORMANCE_EVENT`, `RESOURCE_CONTENTION_EVENT`.\nYou can read more [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event#request-body-objects).\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Dynatrace global notification options\n\nSEND_DYNATRACE=\"YES\"\nDYNATRACE_SERVER=\"https://monitor.example.com\"\nDYNATRACE_TOKEN=\"XXXXXXX\"\nDYNATRACE_SPACE=\"2a93fe0e-4cd5-469a-9d0d-1a064235cfce\"\nDYNATRACE_TAG_VALUE=\"SERVERTAG\"\nDYNATRACE_ANNOTATION_TYPE=\"Netdata Alert\"\nDYNATRACE_EVENT=\"AVAILABILITY_EVENT\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/dynatrace/metadata.yaml" }, { @@ -21958,7 +21684,7 @@ export const integrations = [ "overview": "# Email\n\nSend notifications via Email using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working sendmail command is required for email alerts to work. Almost all MTAs provide a sendmail interface. Netdata sends all emails as user netdata, so make sure your sendmail works for local users.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| EMAIL_SENDER | You can change `EMAIL_SENDER` to the email address sending the notifications. | netdata | no |\n| SEND_EMAIL | Set `SEND_EMAIL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_EMAIL | Set `DEFAULT_RECIPIENT_EMAIL` to the email address you want the email to be sent by default. You can define multiple email addresses like this: `alarms@example.com` `systems@example.com`. | root | yes |\n\n##### DEFAULT_RECIPIENT_EMAIL\n\nAll roles will default to this variable if left unconfigured.\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_email[sysadmin]=\"systems@example.com\"\nrole_recipients_email[domainadmin]=\"domains@example.com\"\nrole_recipients_email[dba]=\"databases@example.com systems@example.com\"\nrole_recipients_email[webmaster]=\"marketing@example.com development@example.com\"\nrole_recipients_email[proxyadmin]=\"proxy-admin@example.com\"\nrole_recipients_email[sitemgr]=\"sites@example.com\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# email global notification options\n\nEMAIL_SENDER=\"example@domain.com\"\nSEND_EMAIL=\"YES\"\nDEFAULT_RECIPIENT_EMAIL=\"recipient@example.com\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/email/metadata.yaml" }, { @@ -21977,7 +21703,7 @@ export const integrations = [ "overview": "# Flock\n\nSend notifications to Flock using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by flock.com. You can use the same on all your Netdata servers (or you can have multiple if you like). Read more about flock webhooks and how to get one [here](https://admin.flock.com/webhooks).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_FLOCK | Set `SEND_FLOCK` to YES | YES | yes |\n| FLOCK_WEBHOOK_URL | set `FLOCK_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_FLOCK | Set `DEFAULT_RECIPIENT_FLOCK` to the Flock channel you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n\n##### DEFAULT_RECIPIENT_FLOCK\n\nYou can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_flock[sysadmin]=\"systems\"\nrole_recipients_flock[domainadmin]=\"domains\"\nrole_recipients_flock[dba]=\"databases systems\"\nrole_recipients_flock[webmaster]=\"marketing development\"\nrole_recipients_flock[proxyadmin]=\"proxy-admin\"\nrole_recipients_flock[sitemgr]=\"sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# flock (flock.com) global notification options\n\nSEND_FLOCK=\"YES\"\nFLOCK_WEBHOOK_URL=\"https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_FLOCK=\"alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/flock/metadata.yaml" }, { @@ -21996,7 +21722,7 @@ export const integrations = [ "overview": "# Gotify\n\n[Gotify](https://gotify.net/) is a self-hosted push notification service created for sending and receiving messages in real time.\nYou can send alerts to your Gotify instance using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An application token. You can generate a new token in the Gotify Web UI.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_GOTIFY | Set `SEND_GOTIFY` to YES | YES | yes |\n| GOTIFY_APP_TOKEN | set `GOTIFY_APP_TOKEN` to the app token you generated. | | yes |\n| GOTIFY_APP_URL | Set `GOTIFY_APP_URL` to point to your Gotify instance, for example `https://push.example.domain/` | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_GOTIFY=\"YES\"\nGOTIFY_APP_TOKEN=\"XXXXXXXXXXXXXXX\"\nGOTIFY_APP_URL=\"https://push.example.domain/\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/gotify/metadata.yaml" }, { @@ -22015,7 +21741,7 @@ export const integrations = [ "overview": "# ilert\n\nilert is an alerting and incident management tool. It helps teams reduce response times by enhancing monitoring and ticketing tools with reliable alerts, automatic escalations, on-call schedules, and features for incident response, communication, and status updates.\nSending notification to ilert via Netdata's Agent alert notification feature includes links, images and resolving of corresponding alerts.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Netdata alert source in ilert. You can create a [Netdata alert source](https://docs.ilert.com/inbound-integrations/netdata) in [ilert](https://www.ilert.com/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ILERT | Set `SEND_ILERT` to YES | YES | yes |\n| ILERT_ALERT_SOURCE_URL | Set `ILERT_ALERT_SOURCE_URL` to your Netdata alert source url in ilert. | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_ILERT=\"YES\"\nILERT_ALERT_SOURCE_URL=\"https://api.ilert.com/api/v1/events/netdata/{API-KEY}\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ilert/metadata.yaml" }, { @@ -22034,7 +21760,7 @@ export const integrations = [ "overview": "# IRC\n\nSend notifications to IRC using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The `nc` utility. You can set the path to it, or Netdata will search for it in your system `$PATH`.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| nc path | Set the path for nc, otherwise Netdata will search for it in your system $PATH | | yes |\n| SEND_IRC | Set `SEND_IRC` YES. | YES | yes |\n| IRC_NETWORK | Set `IRC_NETWORK` to the IRC network which your preferred channels belong to. | | yes |\n| IRC_PORT | Set `IRC_PORT` to the IRC port to which a connection will occur. | | no |\n| IRC_NICKNAME | Set `IRC_NICKNAME` to the IRC nickname which is required to send the notification. It must not be an already registered name as the connection's MODE is defined as a guest. | | yes |\n| IRC_REALNAME | Set `IRC_REALNAME` to the IRC realname which is required in order to make the connection. | | yes |\n| DEFAULT_RECIPIENT_IRC | You can have different channels per role, by editing `DEFAULT_RECIPIENT_IRC` with the channel you want | | yes |\n\n##### nc path\n\n```sh\n#------------------------------------------------------------------------------\n# external commands\n#\n# The full path of the nc command.\n# If empty, the system $PATH will be searched for it.\n# If not found, irc notifications will be silently disabled.\nnc=\"/usr/bin/nc\"\n```\n\n\n##### DEFAULT_RECIPIENT_IRC\n\nThe `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_irc[sysadmin]=\"#systems\"\nrole_recipients_irc[domainadmin]=\"#domains\"\nrole_recipients_irc[dba]=\"#databases #systems\"\nrole_recipients_irc[webmaster]=\"#marketing #development\"\nrole_recipients_irc[proxyadmin]=\"#proxy-admin\"\nrole_recipients_irc[sitemgr]=\"#sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# irc notification options\n#\nSEND_IRC=\"YES\"\nDEFAULT_RECIPIENT_IRC=\"#system-alarms\"\nIRC_NETWORK=\"irc.freenode.net\"\nIRC_NICKNAME=\"netdata-alarm-user\"\nIRC_REALNAME=\"netdata-user\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/irc/metadata.yaml" }, { @@ -22053,7 +21779,7 @@ export const integrations = [ "overview": "# Kavenegar\n\n[Kavenegar](https://kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs.\nYou can send notifications to Kavenegar using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The APIKEY and Sender from http://panel.kavenegar.com/client/setting/account\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_KAVENEGAR | Set `SEND_KAVENEGAR` to YES | YES | yes |\n| KAVENEGAR_API_KEY | Set `KAVENEGAR_API_KEY` to your API key. | | yes |\n| KAVENEGAR_SENDER | Set `KAVENEGAR_SENDER` to the value of your Sender. | | yes |\n| DEFAULT_RECIPIENT_KAVENEGAR | Set `DEFAULT_RECIPIENT_KAVENEGAR` to the SMS recipient you want the alert notifications to be sent to. You can define multiple recipients like this: 09155555555 09177777777. | | yes |\n\n##### DEFAULT_RECIPIENT_KAVENEGAR\n\nAll roles will default to this variable if lest unconfigured.\n\nYou can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_kavenegar[sysadmin]=\"09100000000\"\nrole_recipients_kavenegar[domainadmin]=\"09111111111\"\nrole_recipients_kavenegar[dba]=\"0922222222\"\nrole_recipients_kavenegar[webmaster]=\"0933333333\"\nrole_recipients_kavenegar[proxyadmin]=\"0944444444\"\nrole_recipients_kavenegar[sitemgr]=\"0955555555\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Kavenegar (Kavenegar.com) SMS options\n\nSEND_KAVENEGAR=\"YES\"\nKAVENEGAR_API_KEY=\"XXXXXXXXXXXX\"\nKAVENEGAR_SENDER=\"YYYYYYYY\"\nDEFAULT_RECIPIENT_KAVENEGAR=\"0912345678\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/kavenegar/metadata.yaml" }, { @@ -22072,7 +21798,7 @@ export const integrations = [ "overview": "# Matrix\n\nSend notifications to Matrix network rooms using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The url of the homeserver (`https://homeserver:port`).\n- Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a dedicated notification account). These tokens usually don't expire.\n- The Room ids that you want to sent the notification to.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MATRIX | Set `SEND_MATRIX` to YES | YES | yes |\n| MATRIX_HOMESERVER | set `MATRIX_HOMESERVER` to the URL of the Matrix homeserver. | | yes |\n| MATRIX_ACCESSTOKEN | Set `MATRIX_ACCESSTOKEN` to the access token from your Matrix account. | | yes |\n| DEFAULT_RECIPIENT_MATRIX | Set `DEFAULT_RECIPIENT_MATRIX` to the Rooms you want the alert notifications to be sent to. The format is `!roomid:homeservername`. | | yes |\n\n##### MATRIX_ACCESSTOKEN\n\nTo obtain the access token, you can use the following curl command:\n```\ncurl -XPOST -d '{\"type\":\"m.login.password\", \"user\":\"example\", \"password\":\"wordpass\"}' \"https://homeserver:8448/_matrix/client/r0/login\"\n```\n\n\n##### DEFAULT_RECIPIENT_MATRIX\n\nThe Room ids are unique identifiers and can be obtained from the Room settings in a Matrix client (e.g. Riot).\n\nYou can define multiple Rooms like this: `!roomid1:homeservername` `!roomid2:homeservername`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different Rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_matrix[sysadmin]=\"!roomid1:homeservername\"\nrole_recipients_matrix[domainadmin]=\"!roomid2:homeservername\"\nrole_recipients_matrix[dba]=\"!roomid3:homeservername\"\nrole_recipients_matrix[webmaster]=\"!roomid4:homeservername\"\nrole_recipients_matrix[proxyadmin]=\"!roomid5:homeservername\"\nrole_recipients_matrix[sitemgr]=\"!roomid6:homeservername\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Matrix notifications\n\nSEND_MATRIX=\"YES\"\nMATRIX_HOMESERVER=\"https://matrix.org:8448\"\nMATRIX_ACCESSTOKEN=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MATRIX=\"!XXXXXXXXXXXX:matrix.org\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/matrix/metadata.yaml" }, { @@ -22091,7 +21817,7 @@ export const integrations = [ "overview": "# MessageBird\n\nSend notifications to MessageBird using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An access key under 'API ACCESS (REST)' (you will want a live key), you can read more [here](https://developers.messagebird.com/quickstarts/sms/test-credits-api-keys/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MESSAGEBIRD | Set `SEND_MESSAGEBIRD` to YES | YES | yes |\n| MESSAGEBIRD_ACCESS_KEY | Set `MESSAGEBIRD_ACCESS_KEY` to your API key. | | yes |\n| MESSAGEBIRD_NUMBER | Set `MESSAGEBIRD_NUMBER` to the MessageBird number you want to use for the alert. | | yes |\n| DEFAULT_RECIPIENT_MESSAGEBIRD | Set `DEFAULT_RECIPIENT_MESSAGEBIRD` to the number you want the alert notification to be sent as an SMS. You can define multiple recipients like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_MESSAGEBIRD\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_messagebird[sysadmin]=\"+15555555555\"\nrole_recipients_messagebird[domainadmin]=\"+15555555556\"\nrole_recipients_messagebird[dba]=\"+15555555557\"\nrole_recipients_messagebird[webmaster]=\"+15555555558\"\nrole_recipients_messagebird[proxyadmin]=\"+15555555559\"\nrole_recipients_messagebird[sitemgr]=\"+15555555550\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Messagebird (messagebird.com) SMS options\n\nSEND_MESSAGEBIRD=\"YES\"\nMESSAGEBIRD_ACCESS_KEY=\"XXXXXXXX\"\nMESSAGEBIRD_NUMBER=\"XXXXXXX\"\nDEFAULT_RECIPIENT_MESSAGEBIRD=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/messagebird/metadata.yaml" }, { @@ -22110,7 +21836,7 @@ export const integrations = [ "overview": "# ntfy\n\n[ntfy](https://ntfy.sh/) (pronounce: notify) is a simple HTTP-based [pub-sub](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) notification service. It allows you to send notifications to your phone or desktop via scripts from any computer, entirely without signup, cost or setup. It's also [open source](https://github.com/binwiederhier/ntfy) if you want to run your own server.\nYou can send alerts to an ntfy server using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- (Optional) A [self-hosted ntfy server](https://docs.ntfy.sh/faq/#can-i-self-host-it), in case you don't want to use https://ntfy.sh\n- A new [topic](https://ntfy.sh/#subscribe) for the notifications to be published to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_NTFY | Set `SEND_NTFY` to YES | YES | yes |\n| DEFAULT_RECIPIENT_NTFY | URL formed by the server-topic combination you want the alert notifications to be sent to. Unless hosting your own server, the server should always be set to https://ntfy.sh. | | yes |\n| NTFY_USERNAME | The username for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_PASSWORD | The password for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_ACCESS_TOKEN | The access token for netdata to use to authenticate with an ntfy server. | | no |\n\n##### DEFAULT_RECIPIENT_NTFY\n\nYou can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `https://SERVER2/TOPIC2`\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_ntfy[sysadmin]=\"https://SERVER1/TOPIC1\"\nrole_recipients_ntfy[domainadmin]=\"https://SERVER2/TOPIC2\"\nrole_recipients_ntfy[dba]=\"https://SERVER3/TOPIC3\"\nrole_recipients_ntfy[webmaster]=\"https://SERVER4/TOPIC4\"\nrole_recipients_ntfy[proxyadmin]=\"https://SERVER5/TOPIC5\"\nrole_recipients_ntfy[sitemgr]=\"https://SERVER6/TOPIC6\"\n```\n\n\n##### NTFY_USERNAME\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_PASSWORD\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_ACCESS_TOKEN\n\nThis can be used in place of `NTFY_USERNAME` and `NTFY_PASSWORD` to authenticate with a self-hosted ntfy instance. See [access tokens](https://docs.ntfy.sh/config/?h=access+to#access-tokens) for details.\nEnsure that the token user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_NTFY=\"YES\"\nDEFAULT_RECIPIENT_NTFY=\"https://ntfy.sh/netdata-X7seHg7d3Tw9zGOk https://ntfy.sh/netdata-oIPm4IK1IlUtlA30\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ntfy/metadata.yaml" }, { @@ -22129,7 +21855,7 @@ export const integrations = [ "overview": "# OpsGenie\n\nOpsgenie is an alerting and incident response tool. It is designed to group and filter alarms, build custom routing rules for on-call teams, and correlate deployments and commits to incidents.\nYou can send notifications to Opsgenie using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Opsgenie integration. You can create an [integration](https://docs.opsgenie.com/docs/api-integration) in the [Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_OPSGENIE | Set `SEND_OPSGENIE` to YES | YES | yes |\n| OPSGENIE_API_KEY | Set `OPSGENIE_API_KEY` to your API key. | | yes |\n| OPSGENIE_API_URL | Set `OPSGENIE_API_URL` to the corresponding URL if required, for example there are region-specific API URLs such as `https://eu.api.opsgenie.com`. | https://api.opsgenie.com | no |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_OPSGENIE=\"YES\"\nOPSGENIE_API_KEY=\"11111111-2222-3333-4444-555555555555\"\nOPSGENIE_API_URL=\"\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/opsgenie/metadata.yaml" }, { @@ -22148,7 +21874,7 @@ export const integrations = [ "overview": "# PagerDuty\n\nPagerDuty is an enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times.\nYou can send notifications to PagerDuty using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An installation of the [PagerDuty](https://www.pagerduty.com/docs/guides/agent-install-guide/) agent on the node running the Netdata Agent\n- A PagerDuty Generic API service using either the `Events API v2` or `Events API v1`\n- [Add a new service](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) to PagerDuty. Click Use our API directly and select either `Events API v2` or `Events API v1`. Once you finish creating the service, click on the Integrations tab to find your Integration Key.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PD | Set `SEND_PD` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PD | Set `DEFAULT_RECIPIENT_PD` to the PagerDuty service key you want the alert notifications to be sent to. You can define multiple service keys like this: `pd_service_key_1` `pd_service_key_2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PD\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pd[sysadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa\"\nrole_recipients_pd[domainadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb\"\nrole_recipients_pd[dba]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc\"\nrole_recipients_pd[webmaster]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxd\"\nrole_recipients_pd[proxyadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe\"\nrole_recipients_pd[sitemgr]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxf\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pagerduty.com notification options\n\nSEND_PD=\"YES\"\nDEFAULT_RECIPIENT_PD=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\nUSE_PD_VERSION=\"2\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pagerduty/metadata.yaml" }, { @@ -22167,7 +21893,7 @@ export const integrations = [ "overview": "# Prowl\n\nSend notifications to Prowl using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- Because of how Netdata integrates with Prowl, there is a hard limit of at most 1000 notifications per hour (starting from the first notification sent). Any alerts beyond the first thousand in an hour will be dropped.\n- Warning messages will be sent with the 'High' priority, critical messages will be sent with the 'Emergency' priority, and all other messages will be sent with the normal priority. Opening the notification's associated URL will take you to the Netdata dashboard of the system that issued the alert, directly to the chart that it triggered on.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Prowl API key, which can be requested through the Prowl website after registering\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PROWL | Set `SEND_PROWL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PROWL | Set `DEFAULT_RECIPIENT_PROWL` to the Prowl API key you want the alert notifications to be sent to. You can define multiple API keys like this: `APIKEY1`, `APIKEY2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PROWL\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_prowl[sysadmin]=\"AAAAAAAA\"\nrole_recipients_prowl[domainadmin]=\"BBBBBBBBB\"\nrole_recipients_prowl[dba]=\"CCCCCCCCC\"\nrole_recipients_prowl[webmaster]=\"DDDDDDDDDD\"\nrole_recipients_prowl[proxyadmin]=\"EEEEEEEEEE\"\nrole_recipients_prowl[sitemgr]=\"FFFFFFFFFF\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# iOS Push Notifications\n\nSEND_PROWL=\"YES\"\nDEFAULT_RECIPIENT_PROWL=\"XXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/prowl/metadata.yaml" }, { @@ -22186,7 +21912,7 @@ export const integrations = [ "overview": "# Pushbullet\n\nSend notifications to Pushbullet using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Pushbullet access token that can be created in your [account settings](https://www.pushbullet.com/#settings/account).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Send_PUSHBULLET | Set `Send_PUSHBULLET` to YES | YES | yes |\n| PUSHBULLET_ACCESS_TOKEN | set `PUSHBULLET_ACCESS_TOKEN` to the access token you generated. | | yes |\n| DEFAULT_RECIPIENT_PUSHBULLET | Set `DEFAULT_RECIPIENT_PUSHBULLET` to the email (e.g. `example@domain.com`) or the channel tag (e.g. `#channel`) you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHBULLET\n\nYou can define multiple entries like this: user1@email.com user2@email.com.\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pushbullet[sysadmin]=\"user1@email.com\"\nrole_recipients_pushbullet[domainadmin]=\"user2@mail.com\"\nrole_recipients_pushbullet[dba]=\"#channel1\"\nrole_recipients_pushbullet[webmaster]=\"#channel2\"\nrole_recipients_pushbullet[proxyadmin]=\"user3@mail.com\"\nrole_recipients_pushbullet[sitemgr]=\"user4@mail.com\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushbullet (pushbullet.com) push notification options\n\nSEND_PUSHBULLET=\"YES\"\nPUSHBULLET_ACCESS_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHBULLET=\"admin1@example.com admin3@somemail.com #examplechanneltag #anotherchanneltag\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushbullet/metadata.yaml" }, { @@ -22205,7 +21931,7 @@ export const integrations = [ "overview": "# PushOver\n\nSend notification to Pushover using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n- Netdata will send warning messages with priority 0 and critical messages with priority 1.\n- Pushover allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours.\n- All other notifications will be delivered silently.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Application token. You can use the same on all your Netdata servers.\n- A User token for each user you are going to send notifications to. This is the actual recipient of the notification.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PUSHOVER | Set `SEND_PUSHOVER` to YES | YES | yes |\n| PUSHOVER_WEBHOOK_URL | set `PUSHOVER_WEBHOOK_URL` to your Pushover Application token. | | yes |\n| DEFAULT_RECIPIENT_PUSHOVER | Set `DEFAULT_RECIPIENT_PUSHOVER` the Pushover User token you want the alert notifications to be sent to. You can define multiple User tokens like this: `USERTOKEN1` `USERTOKEN2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHOVER\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pushover[sysadmin]=\"USERTOKEN1\"\nrole_recipients_pushover[domainadmin]=\"USERTOKEN2\"\nrole_recipients_pushover[dba]=\"USERTOKEN3 USERTOKEN4\"\nrole_recipients_pushover[webmaster]=\"USERTOKEN5\"\nrole_recipients_pushover[proxyadmin]=\"USERTOKEN6\"\nrole_recipients_pushover[sitemgr]=\"USERTOKEN7\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushover (pushover.net) global notification options\n\nSEND_PUSHOVER=\"YES\"\nPUSHOVER_APP_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHOVER=\"USERTOKEN\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushover/metadata.yaml" }, { @@ -22224,7 +21950,7 @@ export const integrations = [ "overview": "# RocketChat\n\nSend notifications to Rocket.Chat using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ROCKETCHAT | Set `SEND_ROCKETCHAT` to `YES` | YES | yes |\n| ROCKETCHAT_WEBHOOK_URL | set `ROCKETCHAT_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_ROCKETCHAT | Set `DEFAULT_RECIPIENT_ROCKETCHAT` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_ROCKETCHAT\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_rocketchat[sysadmin]=\"systems\"\nrole_recipients_rocketchat[domainadmin]=\"domains\"\nrole_recipients_rocketchat[dba]=\"databases systems\"\nrole_recipients_rocketchat[webmaster]=\"marketing development\"\nrole_recipients_rocketchat[proxyadmin]=\"proxy_admin\"\nrole_recipients_rocketchat[sitemgr]=\"sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# rocketchat (rocket.chat) global notification options\n\nSEND_ROCKETCHAT=\"YES\"\nROCKETCHAT_WEBHOOK_URL=\"\"\nDEFAULT_RECIPIENT_ROCKETCHAT=\"monitoring_alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/rocketchat/metadata.yaml" }, { @@ -22243,7 +21969,7 @@ export const integrations = [ "overview": "# Slack\n\nSend notifications to a Slack workspace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Slack app along with an incoming webhook, read Slack's guide on the topic [here](https://api.slack.com/messaging/webhooks).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_SLACK | Set `SEND_SLACK` to YES | YES | yes |\n| SLACK_WEBHOOK_URL | set `SLACK_WEBHOOK_URL` to your Slack app's webhook URL. | | yes |\n| DEFAULT_RECIPIENT_SLACK | Set `DEFAULT_RECIPIENT_SLACK` to the Slack channel your Slack app is set to send messages to. The syntax for channels is `#channel` or `channel`. | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# slack (slack.com) global notification options\n\nSEND_SLACK=\"YES\"\nSLACK_WEBHOOK_URL=\"https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\" \nDEFAULT_RECIPIENT_SLACK=\"#alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/slack/metadata.yaml" }, { @@ -22264,7 +21990,7 @@ export const integrations = [ "overview": "# SMS\n\nSend notifications to `smstools3` using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\nThe SMS Server Tools 3 is a SMS Gateway software which can send and receive short messages through GSM modems and mobile phones.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- [Install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) `smsd`\n- To ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:\n - Have write permissions to /tmp and /var/spool/sms/outgoing\n - Be a member of group smsd\n - To ensure that the steps above are successful, just su netdata and execute sendsms phone message.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sendsms | Set the path for `sendsms`, otherwise Netdata will search for it in your system `$PATH:` | YES | yes |\n| SEND_SMS | Set `SEND_SMS` to `YES`. | | yes |\n| DEFAULT_RECIPIENT_SMS | Set DEFAULT_RECIPIENT_SMS to the phone number you want the alert notifications to be sent to. You can define multiple phone numbers like this: PHONE1 PHONE2. | | yes |\n\n##### sendsms\n\n# The full path of the sendsms command (smstools3).\n# If empty, the system $PATH will be searched for it.\n# If not found, SMS notifications will be silently disabled.\nsendsms=\"/usr/bin/sendsms\"\n\n\n##### DEFAULT_RECIPIENT_SMS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_sms[sysadmin]=\"PHONE1\"\nrole_recipients_sms[domainadmin]=\"PHONE2\"\nrole_recipients_sms[dba]=\"PHONE3\"\nrole_recipients_sms[webmaster]=\"PHONE4\"\nrole_recipients_sms[proxyadmin]=\"PHONE5\"\nrole_recipients_sms[sitemgr]=\"PHONE6\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMS Server Tools 3 (smstools3) global notification options\nSEND_SMS=\"YES\"\nDEFAULT_RECIPIENT_SMS=\"1234567890\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/smstools3/metadata.yaml" }, { @@ -22283,7 +22009,7 @@ export const integrations = [ "overview": "# syslog\n\nSend notifications to Syslog using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SYSLOG_FACILITY | Set `SYSLOG_FACILITY` to the facility used for logging, by default this value is set to `local6`. | | yes |\n| DEFAULT_RECIPIENT_SYSLOG | Set `DEFAULT_RECIPIENT_SYSLOG` to the recipient you want the alert notifications to be sent to. | | yes |\n| SEND_SYSLOG | Set SEND_SYSLOG to YES, make sure you have everything else configured before turning this on. | | yes |\n\n##### DEFAULT_RECIPIENT_SYSLOG\n\nTargets are defined as follows:\n\n```\n[[facility.level][@host[:port]]/]prefix\n```\n\nprefix defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'.\n\nThe facility and level are the standard syslog facility and level options, for more info on them see your local logger and syslog documentation. By default, Netdata will log to the local6 facility, with a log level dependent on the type of message (crit for CRITICAL, warning for WARNING, and info for everything else).\n\nYou can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).\n\nYou can define multiple recipients like this: daemon.notice@loghost:514/netdata daemon.notice@loghost2:514/netdata.\nAll roles will default to this variable if left unconfigured.\n\n\n##### SEND_SYSLOG \n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_syslog[sysadmin]=\"daemon.notice@loghost1:514/netdata\"\nrole_recipients_syslog[domainadmin]=\"daemon.notice@loghost2:514/netdata\"\nrole_recipients_syslog[dba]=\"daemon.notice@loghost3:514/netdata\"\nrole_recipients_syslog[webmaster]=\"daemon.notice@loghost4:514/netdata\"\nrole_recipients_syslog[proxyadmin]=\"daemon.notice@loghost5:514/netdata\"\nrole_recipients_syslog[sitemgr]=\"daemon.notice@loghost6:514/netdata\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# syslog notifications\n\nSEND_SYSLOG=\"YES\"\nSYSLOG_FACILITY='local6'\nDEFAULT_RECIPIENT_SYSLOG=\"daemon.notice@loghost6:514/netdata\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/syslog/metadata.yaml" }, { @@ -22304,7 +22030,7 @@ export const integrations = [ "overview": "# Microsoft Teams\n\nYou can send Netdata alerts to Microsoft Teams using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Microsoft Teams. You can use the same on all your Netdata servers (or you can have multiple if you like).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MSTEAMS | Set `SEND_MSTEAMS` to YES | YES | yes |\n| MSTEAMS_WEBHOOK_URL | set `MSTEAMS_WEBHOOK_URL` to the incoming webhook URL as given by Microsoft Teams. | | yes |\n| DEFAULT_RECIPIENT_MSTEAMS | Set `DEFAULT_RECIPIENT_MSTEAMS` to the encoded Microsoft Teams channel name you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_MSTEAMS\n\nIn Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhook/`. You can define multiple channels like this: `CHANNEL1` `CHANNEL2`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_msteams[sysadmin]=\"CHANNEL1\"\nrole_recipients_msteams[domainadmin]=\"CHANNEL2\"\nrole_recipients_msteams[dba]=\"databases CHANNEL3\"\nrole_recipients_msteams[webmaster]=\"CHANNEL4\"\nrole_recipients_msteams[proxyadmin]=\"CHANNEL5\"\nrole_recipients_msteams[sitemgr]=\"CHANNEL6\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Microsoft Teams (office.com) global notification options\n\nSEND_MSTEAMS=\"YES\"\nMSTEAMS_WEBHOOK_URL=\"https://outlook.office.com/webhook/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/IncomingWebhook/CHANNEL/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MSTEAMS=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/msteams/metadata.yaml" }, { @@ -22323,7 +22049,7 @@ export const integrations = [ "overview": "# Telegram\n\nSend notifications to Telegram using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. Invite your bot to a group where you want it to send messages.\n- The chat ID for every chat you want to send messages to. Invite [@myidbot](https://t.me/myidbot) bot to the group that will receive notifications, and write the command `/getgroupid@myidbot` to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TELEGRAM | Set `SEND_TELEGRAM` to YES | YES | yes |\n| TELEGRAM_BOT_TOKEN | set `TELEGRAM_BOT_TOKEN` to your bot token. | | yes |\n| DEFAULT_RECIPIENT_TELEGRAM | Set the `DEFAULT_RECIPIENT_TELEGRAM` variable in your config file to your Telegram chat ID (find it with @myidbot). Separate multiple chat IDs with spaces. To send alerts to a specific topic within a chat, use `chatID:topicID`. | | yes |\n\n##### DEFAULT_RECIPIENT_TELEGRAM\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_telegram[sysadmin]=\"-49999333324\"\nrole_recipients_telegram[domainadmin]=\"-49999333389\"\nrole_recipients_telegram[dba]=\"-10099992222\"\nrole_recipients_telegram[webmaster]=\"-10099992222 -49999333389\"\nrole_recipients_telegram[proxyadmin]=\"-49999333344\"\nrole_recipients_telegram[sitemgr]=\"-49999333876\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# telegram (telegram.org) global notification options\n\nSEND_TELEGRAM=\"YES\"\nTELEGRAM_BOT_TOKEN=\"111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5\"\nDEFAULT_RECIPIENT_TELEGRAM=\"-49999333876\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/telegram/metadata.yaml" }, { @@ -22342,9 +22068,289 @@ export const integrations = [ "overview": "# Twilio\n\nSend notifications to Twilio using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Get your SID, and Token from https://www.twilio.com/console\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TWILIO | Set `SEND_TWILIO` to YES | YES | yes |\n| TWILIO_ACCOUNT_SID | set `TWILIO_ACCOUNT_SID` to your account SID. | | yes |\n| TWILIO_ACCOUNT_TOKEN | Set `TWILIO_ACCOUNT_TOKEN` to your account token. | | yes |\n| TWILIO_NUMBER | Set `TWILIO_NUMBER` to your account's number. | | yes |\n| DEFAULT_RECIPIENT_TWILIO | Set DEFAULT_RECIPIENT_TWILIO to the number you want the alert notifications to be sent to. You can define multiple numbers like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_TWILIO\n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient's number you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_twilio[sysadmin]=\"+15555555555\"\nrole_recipients_twilio[domainadmin]=\"+15555555556\"\nrole_recipients_twilio[dba]=\"+15555555557\"\nrole_recipients_twilio[webmaster]=\"+15555555558\"\nrole_recipients_twilio[proxyadmin]=\"+15555555559\"\nrole_recipients_twilio[sitemgr]=\"+15555555550\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Twilio (twilio.com) SMS options\n\nSEND_TWILIO=\"YES\"\nTWILIO_ACCOUNT_SID=\"xxxxxxxxx\"\nTWILIO_ACCOUNT_TOKEN=\"xxxxxxxxxx\"\nTWILIO_NUMBER=\"xxxxxxxxxxx\"\nDEFAULT_RECIPIENT_TWILIO=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/twilio/metadata.yaml" }, + { + "id": "notify-cloud-awssns", + "meta": { + "name": "Amazon SNS", + "link": "https://aws.amazon.com/sns/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "awssns.png" + }, + "keywords": [ + "awssns" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- An AWS account with AWS SNS access\n\n### AWS SNS Configuration\n\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, select the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n3. Copy the **Topic ARN** in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the AWS SNS Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Topic ARN: The topic provided on AWS SNS (with region) for where to publish your notifications.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-discord", + "meta": { + "name": "Discord", + "link": "https://discord.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "discord.png" + }, + "keywords": [ + "discord", + "community" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n\n### Discord Server Configuration\n\n1. Go to **Server Settings** --> **Integrations**\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Keep note of the **Webhook URL** as you will need it for the configuration of the integration on the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Discord Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: The URL you copied from the previous section\n - Channel Parameters: Select the channel type which the notifications will be sent to, if it is a Forum channel, you need to specify a thread name\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-ilert", + "meta": { + "name": "ilert", + "link": "https://www.ilert.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "ilert.svg" + }, + "keywords": [ + "ilert" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on ilert to add new Alert sources.\n\n### ilert Configuration\n\n1. From the navigation bar, open the Alert sources drop down and click \"Alert sources\"\n2. Click on the \"+ Create a new alert source\" button\n3. Configure an Alert source:\n - Select \"API integration\" and click Next\n - Provide a name that suits the source's purpose, for example \"Netdata\"\n - Select Escalation policy\n - Select Alert grouping (optional)\n4. Obtain the API Key:\n - Once the Alert source is created, you will be provided with an API key. Copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the ilert Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Alert Source API key: The key you copied in the ilert configuration step.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-mattermost", + "meta": { + "name": "Mattermost", + "link": "https://mattermost.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "mattermost.png" + }, + "keywords": [ + "mattermost" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n\n### Mattermost Server Configuration\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don't have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook.\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook URL that looks like `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Mattermost Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on Mattermost for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-microsoftteams", + "meta": { + "name": "Microsoft Teams", + "link": "https://www.microsoft.com/en-us/microsoft-teams", + "categories": [ + "notify.cloud" + ], + "icon_filename": "teams.svg" + }, + "keywords": [ + "microsoft", + "teams" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature\n\n### Microsoft Teams Configuration\n\n1. Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots icon that appears\n2. Select \"Workflows\" from the options, then choose \"Post to a channel when a webhook request is received\"\n3. **Configure Workflow Details**\n - Give your workflow a name, such as \"Netdata Alerts\"\n - Select the target team and channel where you will receive notifications\n - Click \"Add workflow\"\n4. Once the workflow is created, you will receive a unique Workflow Webhook URL, copy it, in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Microsoft Teams Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Microsoft Teams Incoming Webhook URL: The Incoming Webhook URL that you copied earlier.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-mobile-app", + "meta": { + "name": "Netdata Mobile App", + "link": "https://netdata.cloud", + "categories": [ + "notify.cloud" + ], + "icon_filename": "netdata.png" + }, + "keywords": [ + "mobile-app", + "phone", + "personal-notifications" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- You need to have the Netdata Mobile App installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration and device linking\n\nIn order to login to the Netdata Mobile App\n\n1. Download the Netdata Mobile App from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose your Sign-in option\n - Email Address: Enter the email address of your registered Netdata Cloud account and click on the verification link received by email on your mobile device.\n - Sign-in with QR Code: Scan the QR code from the Netdata Cloud UI under **Profile Picture** --> **Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code**\n\n### Netdata Configuration\n\nAfter linking your device, enable the toggle for **Mobile App Notifications** under the same settings panel.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-opsgenie", + "meta": { + "name": "Opsgenie", + "link": "https://www.atlassian.com/software/opsgenie", + "categories": [ + "notify.cloud" + ], + "icon_filename": "opsgenie.png" + }, + "keywords": [ + "opsgenie", + "atlassian" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\n1. Go to the integrations tab of your team, click **Add integration**\n2. Pick **API** from the available integrations and copy the API Key in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Opsgenie Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - API Key: The key provided on Opsgenie for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-pagerduty", + "meta": { + "name": "PagerDuty", + "link": "https://www.pagerduty.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "pagerduty.png" + }, + "keywords": [ + "pagerduty" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a PagerDuty service to receive events using webhooks.\n\n### PagerDuty Server Configuration\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. On the third step of the service creation, select `Events API V2` Integration\n3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** in order to add them to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Integration Key: A 32 character key provided by PagerDuty to receive events on your service.\n - Integration URL (Alert Events): The URL provided by PagerDuty where Netdata Cloud will send notifications.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-rocketchat", + "meta": { + "name": "RocketChat", + "link": "https://www.rocket.chat/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "rocketchat.png" + }, + "keywords": [ + "rocketchat" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on RocketChat to add new integrations.\n\n### RocketChat Server Configuration\n\nSteps to configure your RocketChat server to receive notifications from Netdata Cloud:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations\n2. Click **+New** at the top right corner\n3. For more details about each parameter, check [Create a new incoming webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook)\n4. You will end up with a webhook endpoint that looks like `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on RocketChat for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-slack", + "meta": { + "name": "Slack", + "link": "https://slack.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "slack.png" + }, + "keywords": [ + "slack" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\n1. Create an app to receive webhook integrations. Check the [Slack documentation](https://api.slack.com/apps?new_app=1) for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - Specify the channel where you want your notifications to be delivered\n - Once completed, copy the Webhook URL in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Slack Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on Slack for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-splunk", + "meta": { + "name": "Splunk", + "link": "https://splunk.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "splunk-black.svg" + }, + "keywords": [ + "Splunk" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions on how to set it up.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - HTTP Event Collector URI: The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token: The token that Splunk provided to you when you created the HTTP Event Collector\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-telegram", + "meta": { + "name": "Telegram", + "link": "https://telegram.org/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "telegram.svg" + }, + "keywords": [ + "Telegram" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Telegram bot token, chat ID and optionally the topic ID\n\n### Telegram Configuration\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n- To get the topic ID, the easiest way is this: Post a message to that topic, then right-click on it and select `Copy Message Link`. Paste it on a scratchpad and notice that it has the following structure `https://t.me/c/XXXXXXXXXX/YY/ZZ`. The topic ID is `YY` (integer).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Telegram Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Bot Token: The token of your bot\n - Chat ID: The chat id where your bot will deliver messages to\n - Topic ID: The identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-victorops", + "meta": { + "name": "Splunk VictorOps", + "link": "https://www.splunk.com/en_us/about-splunk/acquisitions/splunk-on-call.html", + "categories": [ + "notify.cloud" + ], + "icon_filename": "victorops.svg" + }, + "keywords": [ + "VictorOps", + "Splunk", + "On-Call" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk VictorOps Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Destination URL - The URL provided by VictorOps of your REST endpoint.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-webhook", + "meta": { + "name": "Webhook", + "link": "https://en.wikipedia.org/wiki/Webhook", + "categories": [ + "notify.cloud" + ], + "icon_filename": "webhook.svg" + }, + "keywords": [ + "generic webhooks", + "webhooks" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Webhook integration\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: The url of the service that Netdata will send notifications to. In order to keep the communication secured, Netdata only accepts HTTPS urls.\n - Extra headers: Optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism, Netdata webhook integration supports 3 different authentication mechanisms.\n - Mutual TLS (recommended): Default authentication mechanism used if no other method is selected\n - Basic: The client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**.\n - Bearer: The client sends a request with an Authorization header that includes a **bearer token**.\n\n### Webhook service\n\nA webhook service allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL.\n\nIn this section, we'll go over the steps to set up a generic webhook service, including adding headers, and implementing different types of authorization mechanisms.\n\n#### Netdata webhook integration\n\nNetdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected.\n\nFor alert notifications, the content sent to the destination service contains a JSON object with the following properties:\n\n| field | type | description |\n|:----------------------------------|:--------------------------------|:--------------------------------------------------------------------------|\n| message | string | A summary message of the alert. |\n| alert | string | The alert the notification is related to. |\n| info | string | Additional info related with the alert. |\n| chart | string | The chart associated with the alert. |\n| context | string | The chart context. |\n| space | string | The space where the node that raised the alert is assigned. |\n| Rooms | object\\[object(string,string)\\] | Object with list of Rooms names and urls where the node belongs to. |\n| family | string | Context family. |\n| class | string | Classification of the alert, e.g. `Error`. |\n| severity | string | Alert severity, can be one of `warning`, `critical` or `clear`. |\n| date | string | Date of the alert in ISO8601 format. |\n| duration | string | Duration the alert has been raised. |\n| additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n| additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n| alert_url | string | Netdata Cloud URL for this alert. |\n\nFor reachability notifications, the JSON object will contain the following properties:\n\n| field | type | description |\n|:-----------------|:--------|:------------------------------------------------------------------------------------------------------------------------------|\n| message | string | A summary message of the reachability alert. |\n| url | string | Netdata Cloud URL for the host experiencing the reachability alert. |\n| host | string | The hostname experiencing the reachability alert. |\n| severity | string | Severity for this notification. If host is reachable, severity will be `info`, if host is unreachable, it will be `critical`. |\n| status | object | An object with the status information. |\n| status.reachable | boolean | `true` if host is reachable, `false` otherwise |\n| status.text | string | Can be `reachable` or `unreachable` |\n\n#### Extra headers\n\nWhen setting up a webhook service, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\nBy default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:------------:|------------------|\n | Content-Type | application/json |\n\n#### Authentication mechanisms\n\nNetdata webhook integration supports 3 different authentication mechanisms:\n\n##### Mutual TLS authentication (recommended)\n\nIn mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\nThis is the default authentication mechanism used if no other method is selected.\n\nTo take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\nThe steps to perform this validation are as follows:\n\n- Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
\n Netdata CA certificate\n\n ```text\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n\n
\n\n- Enable client certificate validation on the web server that is doing the TLS termination. Below there are examples on how to perform this configuration in `NGINX` and `Apache`.\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n##### Basic authentication\n\nIn basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n##### Bearer token authentication\n\nIn bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n##### Challenge secret\n\nTo validate that you have ownership of the web application that will receive the webhook events, Netdata is using a challenge response check mechanism.\n\nThis mechanism works as follows:\n\n- The challenge secret parameter that you provide is a shared secret between only you and Netdata.\n- On your request for creating a new Webhook integration, Netdata will make a GET request to the URL of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n- You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n- Netdata will compare your application's response with the hash that it will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\nNetdata does this validation every time you update your integration configuration.\n\n- Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n**Example response token generation in Python:**\n\nHere you can see how to define a handler for a Flask application in python 3:\n\n```python\nimport base64\nimport hashlib\nimport hmac\nimport json\n\nkey ='YOUR_CHALLENGE_SECRET'\n\n@app.route('/webhooks/netdata')\ndef webhook_challenge():\ntoken = request.args.get('crc_token').encode('ascii')\n\n# creates HMAC SHA-256 hash from incomming token and your consumer secret\nsha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n# construct response data with base64 encoded hash\nresponse = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n}\n\n# returns properly formatted json response\nreturn json.dumps(response)\n```\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "logs-systemd-journal", + "meta": { + "name": "Systemd Journal Logs", + "link": "https://github.com/netdata/netdata/blob/master/src/collectors/systemd-journal.plugin/README.md", + "categories": [ + "logs" + ], + "icon_filename": "netdata.png" + }, + "keywords": [ + "systemd", + "journal", + "logs" + ], + "overview": "# Systemd Journal Logs\n\nThe `systemd` journal plugin by Netdata makes viewing, exploring and analyzing `systemd` journal logs simple and efficient.\n\nIt automatically discovers available journal sources, allows advanced filtering, offers interactive visual representations and supports exploring the logs of both individual servers and the logs on infrastructure wide journal centralization servers.\n\nThe plugin automatically detects the available journal sources, based on the journal files available in `/var/log/journal` (persistent logs) and `/run/log/journal` (volatile logs).\n\n\n## Visualization\n\nYou can start exploring `systemd` journal logs on the \"Logs\" tab of the Netdata UI.\n\n\n## Key features\n\n- Works on both **individual servers** and **journal centralization servers**.\n- Supports `persistent` and `volatile` journals.\n- Supports `system`, `user`, `namespaces` and `remote` journals.\n- Allows filtering on **any journal field** or **field value**, for any time-frame.\n- Allows **full text search** (`grep`) on all journal fields, for any time-frame.\n- Provides a **histogram** for log entries over time, with a break down per field-value, for any field and any time-frame.\n- Works directly on journal files, without any other third-party components.\n- Supports coloring log entries, the same way `journalctl` does.\n- In PLAY mode provides the same experience as `journalctl -f`, showing new log entries immediately after they are received.\n", + "setup": "## Setup\n\n## Prerequisites\n\n- A Netdata Cloud account\n\n\n## Configuration\n\nThere is no configuration needed for this integration.\n", + "integration_type": "logs", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml" + }, { "id": "oidc-authentication", "meta": { diff --git a/integrations/integrations.json b/integrations/integrations.json index ddf0234459bdaa..baa3c2260c15af 100644 --- a/integrations/integrations.json +++ b/integrations/integrations.json @@ -20109,7 +20109,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 40 | Core | x86_64, aarch64 | |\n| 39 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 41 | Core | x86_64, aarch64 | |\n| 40 | Core | x86_64, aarch64 | |\n| 39 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -20600,7 +20600,7 @@ ], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, - "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 24.04 | Core | amd64, armhf, arm64 | |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", + "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 24.04 | Core | amd64, armhf, arm64 | |\n| 24.10 | Core | amd64, armhf, arm64 | |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.\n", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml" @@ -21587,7 +21587,7 @@ "overview": "# Alerta\n\nThe [Alerta](https://alerta.io/) monitoring system is a tool used to consolidate and de-duplicate alerts from multiple sources for quick \u2018at-a-glance\u2019 visualization. With just one system you can monitor alerts from many other monitoring tools on a single screen.\nYou can send Netdata alerts to Alerta to see alerts coming from many Netdata hosts or also from a multi-host Netdata configuration.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working Alerta instance\n- An Alerta API key (if authentication in Alerta is enabled)\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ALERTA | Set `SEND_ALERTA` to YES | | yes |\n| ALERTA_WEBHOOK_URL | set `ALERTA_WEBHOOK_URL` to the API url you defined when you installed the Alerta server. | | yes |\n| ALERTA_API_KEY | Set `ALERTA_API_KEY` to your API key. | | yes |\n| DEFAULT_RECIPIENT_ALERTA | Set `DEFAULT_RECIPIENT_ALERTA` to the default recipient environment you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n| DEFAULT_RECIPIENT_CUSTOM | Set different recipient environments per role, by editing `DEFAULT_RECIPIENT_CUSTOM` with the environment name of your choice | | no |\n\n##### ALERTA_API_KEY\n\nYou will need an API key to send messages from any source, if Alerta is configured to use authentication (recommended). To create a new API key:\n1. Go to Configuration > API Keys.\n2. Create a new API key called \"netdata\" with `write:alerts` permission.\n\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_alerta[sysadmin]=\"Systems\"\nrole_recipients_alerta[domainadmin]=\"Domains\"\nrole_recipients_alerta[dba]=\"Databases Systems\"\nrole_recipients_alerta[webmaster]=\"Marketing Development\"\nrole_recipients_alerta[proxyadmin]=\"Proxy\"\nrole_recipients_alerta[sitemgr]=\"Sites\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# alerta (alerta.io) global notification options\n\nSEND_ALERTA=\"YES\"\nALERTA_WEBHOOK_URL=\"http://yourserver/alerta/api\"\nALERTA_API_KEY=\"INSERT_YOUR_API_KEY_HERE\"\nDEFAULT_RECIPIENT_ALERTA=\"Production\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/alerta/metadata.yaml" }, { @@ -21606,283 +21606,9 @@ "overview": "# AWS SNS\n\nAs part of its AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' (SNS). Amazon SNS works similarly to Netdata's own notification system, allowing to dispatch a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to:\n- Email addresses\n- Mobile Phones via SMS\n- HTTP or HTTPS web hooks\n- AWS Lambda functions\n- AWS SQS queues\n- Mobile applications via push notifications\nYou can send notifications through Amazon SNS using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- While Amazon SNS supports sending differently formatted messages for different delivery methods, Netdata does not currently support this functionality.\n- For email notification support, we recommend using Netdata's email notifications, as it is has the following benefits:\n - In most cases, it requires less configuration.\n - Netdata's emails are nicely pre-formatted and support features like threading, which requires a lot of manual effort in SNS.\n - It is less resource intensive and more cost-efficient than SNS.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The [Amazon Web Services CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (awscli).\n- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. The setup depends on the distribution, but `/var/lib/netdata` is the recommended directory. If you are using Netdata as a dedicated user, the permissions will already be correct.\n- An Amazon SNS topic to send notifications to with one or more subscribers. The Getting Started section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.\n- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. For an additional layer of security, you can create one for each system or group of systems.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| aws path | The full path of the aws command. If empty, the system `$PATH` will be searched for it. If not found, Amazon SNS notifications will be silently disabled. | | yes |\n| SEND_AWSNS | Set `SEND_AWSNS` to YES | YES | yes |\n| AWSSNS_MESSAGE_FORMAT | Set `AWSSNS_MESSAGE_FORMAT` to to the string that you want the alert to be sent into. | ${status} on ${host} at ${date}: ${chart} ${value_string} | yes |\n| DEFAULT_RECIPIENT_AWSSNS | Set `DEFAULT_RECIPIENT_AWSSNS` to the Topic ARN you noted down upon creating the Topic. | | yes |\n\n##### AWSSNS_MESSAGE_FORMAT\n\nThe supported variables are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n##### DEFAULT_RECIPIENT_AWSSNS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_awssns[sysadmin]=\"arn:aws:sns:us-east-2:123456789012:Systems\"\nrole_recipients_awssns[domainadmin]=\"arn:aws:sns:us-east-2:123456789012:Domains\"\nrole_recipients_awssns[dba]=\"arn:aws:sns:us-east-2:123456789012:Databases\"\nrole_recipients_awssns[webmaster]=\"arn:aws:sns:us-east-2:123456789012:Development\"\nrole_recipients_awssns[proxyadmin]=\"arn:aws:sns:us-east-2:123456789012:Proxy\"\nrole_recipients_awssns[sitemgr]=\"arn:aws:sns:us-east-2:123456789012:Sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\nAn example working configuration would be:\n\n```yaml\n```text\n#------------------------------------------------------------------------------\n# Amazon SNS notifications\n\nSEND_AWSSNS=\"YES\"\nAWSSNS_MESSAGE_FORMAT=\"${status} on ${host} at ${date}: ${chart} ${value_string}\"\nDEFAULT_RECIPIENT_AWSSNS=\"arn:aws:sns:us-east-2:123456789012:MyTopic\"\n```\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/awssns/metadata.yaml" }, - { - "id": "notify-cloud-awssns", - "meta": { - "name": "Amazon SNS", - "link": "https://aws.amazon.com/sns/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "awssns.png" - }, - "keywords": [ - "awssns" - ], - "overview": "# Amazon SNS\n\nYou can configure notification delivery to AWS SNS from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- An AWS account with AWS SNS access\n\n### AWS SNS Configuration\n\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, select the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n3. Copy the **Topic ARN** in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the AWS SNS Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Topic ARN: The topic provided on AWS SNS (with region) for where to publish your notifications.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-discord", - "meta": { - "name": "Discord", - "link": "https://discord.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "discord.png" - }, - "keywords": [ - "discord", - "community" - ], - "overview": "# Discord\n\nYou can configure notification delivery to your Discord server from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n\n### Discord Server Configuration\n\n1. Go to **Server Settings** --> **Integrations**\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Keep note of the **Webhook URL** as you will need it for the configuration of the integration on the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Discord Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: The URL you copied from the previous section\n - Channel Parameters: Select the channel type which the notifications will be sent to, if it is a Forum channel, you need to specify a thread name\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-ilert", - "meta": { - "name": "ilert", - "link": "https://www.ilert.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "ilert.svg" - }, - "keywords": [ - "ilert" - ], - "overview": "# ilert\n\nYou can configure notification delivery to ilert from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on ilert to add new Alert sources.\n\n### ilert Configuration\n\n1. From the navigation bar, open the Alert sources drop down and click \"Alert sources\"\n2. Click on the \"+ Create a new alert source\" button\n3. Configure an Alert source:\n - Select \"API integration\" and click Next\n - Provide a name that suits the source's purpose, for example \"Netdata\"\n - Select Escalation policy\n - Select Alert grouping (optional)\n4. Obtain the API Key:\n - Once the Alert source is created, you will be provided with an API key. Copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the ilert Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Alert Source API key: The key you copied in the ilert configuration step.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-mattermost", - "meta": { - "name": "Mattermost", - "link": "https://mattermost.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "mattermost.png" - }, - "keywords": [ - "mattermost" - ], - "overview": "# Mattermost\n\nYou can configure notification delivery to Mattermost from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n\n### Mattermost Server Configuration\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don't have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook.\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook URL that looks like `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Mattermost Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on Mattermost for the channel you want to receive your notifications\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-microsoftteams", - "meta": { - "name": "Microsoft Teams", - "link": "https://www.microsoft.com/en-us/microsoft-teams", - "categories": [ - "notify.cloud" - ], - "icon_filename": "teams.svg" - }, - "keywords": [ - "microsoft", - "teams" - ], - "overview": "# Microsoft Teams\n\nYou can configure notification delivery to a Microsoft Teams channel from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature\n\n### Microsoft Teams Configuration\n\n1. Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots icon that appears\n2. Select \"Workflows\" from the options, then choose \"Post to a channel when a webhook request is received\"\n3. **Configure Workflow Details**\n - Give your workflow a name, such as \"Netdata Alerts\"\n - Select the target team and channel where you will receive notifications\n - Click \"Add workflow\"\n4. Once the workflow is created, you will receive a unique Workflow Webhook URL, copy it, in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Microsoft Teams Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Microsoft Teams Incoming Webhook URL: The Incoming Webhook URL that you copied earlier.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-mobile-app", - "meta": { - "name": "Netdata Mobile App", - "link": "https://netdata.cloud", - "categories": [ - "notify.cloud" - ], - "icon_filename": "netdata.png" - }, - "keywords": [ - "mobile-app", - "phone", - "personal-notifications" - ], - "overview": "# Netdata Mobile App\n\nYou can configure notification delivery to the Netdata Mobile Application from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- You need to have the Netdata Mobile App installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration and device linking\n\nIn order to login to the Netdata Mobile App\n\n1. Download the Netdata Mobile App from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose your Sign-in option\n - Email Address: Enter the email address of your registered Netdata Cloud account and click on the verification link received by email on your mobile device.\n - Sign-in with QR Code: Scan the QR code from the Netdata Cloud UI under **Profile Picture** --> **Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code**\n\n### Netdata Configuration\n\nAfter linking your device, enable the toggle for **Mobile App Notifications** under the same settings panel.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-opsgenie", - "meta": { - "name": "Opsgenie", - "link": "https://www.atlassian.com/software/opsgenie", - "categories": [ - "notify.cloud" - ], - "icon_filename": "opsgenie.png" - }, - "keywords": [ - "opsgenie", - "atlassian" - ], - "overview": "# Opsgenie\n\nYou can configure notification delivery to Opsgenie from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\n1. Go to the integrations tab of your team, click **Add integration**\n2. Pick **API** from the available integrations and copy the API Key in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Opsgenie Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - API Key: The key provided on Opsgenie for the channel you want to receive your notifications\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-pagerduty", - "meta": { - "name": "PagerDuty", - "link": "https://www.pagerduty.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "pagerduty.png" - }, - "keywords": [ - "pagerduty" - ], - "overview": "# PagerDuty\n\nYou can configure notification delivery to PagerDuty from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a PagerDuty service to receive events using webhooks.\n\n### PagerDuty Server Configuration\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. On the third step of the service creation, select `Events API V2` Integration\n3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** in order to add them to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Integration Key: A 32 character key provided by PagerDuty to receive events on your service.\n - Integration URL (Alert Events): The URL provided by PagerDuty where Netdata Cloud will send notifications.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-rocketchat", - "meta": { - "name": "RocketChat", - "link": "https://www.rocket.chat/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "rocketchat.png" - }, - "keywords": [ - "rocketchat" - ], - "overview": "# RocketChat\n\nYou can configure notification delivery to RocketChat from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on RocketChat to add new integrations.\n\n### RocketChat Server Configuration\n\nSteps to configure your RocketChat server to receive notifications from Netdata Cloud:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations\n2. Click **+New** at the top right corner\n3. For more details about each parameter, check [Create a new incoming webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook)\n4. You will end up with a webhook endpoint that looks like `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on RocketChat for the channel you want to receive your notifications\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-slack", - "meta": { - "name": "Slack", - "link": "https://slack.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "slack.png" - }, - "keywords": [ - "slack" - ], - "overview": "# Slack\n\nYou can configure notification delivery to Slack from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\n1. Create an app to receive webhook integrations. Check the [Slack documentation](https://api.slack.com/apps?new_app=1) for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - Specify the channel where you want your notifications to be delivered\n - Once completed, copy the Webhook URL in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Slack Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on Slack for the channel you want to receive your notifications\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-splunk", - "meta": { - "name": "Splunk", - "link": "https://splunk.com/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "splunk-black.svg" - }, - "keywords": [ - "Splunk" - ], - "overview": "# Splunk\n\nYou can configure notification delivery to Splunk from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions on how to set it up.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - HTTP Event Collector URI: The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token: The token that Splunk provided to you when you created the HTTP Event Collector\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-telegram", - "meta": { - "name": "Telegram", - "link": "https://telegram.org/", - "categories": [ - "notify.cloud" - ], - "icon_filename": "telegram.svg" - }, - "keywords": [ - "Telegram" - ], - "overview": "# Telegram\n\nYou can configure notification delivery to Telegram from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Telegram bot token, chat ID and optionally the topic ID\n\n### Telegram Configuration\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n- To get the topic ID, the easiest way is this: Post a message to that topic, then right-click on it and select `Copy Message Link`. Paste it on a scratchpad and notice that it has the following structure `https://t.me/c/XXXXXXXXXX/YY/ZZ`. The topic ID is `YY` (integer).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Telegram Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Bot Token: The token of your bot\n - Chat ID: The chat id where your bot will deliver messages to\n - Topic ID: The identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-victorops", - "meta": { - "name": "Splunk VictorOps", - "link": "https://www.splunk.com/en_us/about-splunk/acquisitions/splunk-on-call.html", - "categories": [ - "notify.cloud" - ], - "icon_filename": "victorops.svg" - }, - "keywords": [ - "VictorOps", - "Splunk", - "On-Call" - ], - "overview": "# Splunk VictorOps\n\nYou can configure notification delivery to Splunk On-Call/VictorOps from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk VictorOps Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Destination URL - The URL provided by VictorOps of your REST endpoint.\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, - { - "id": "notify-cloud-webhook", - "meta": { - "name": "Webhook", - "link": "https://en.wikipedia.org/wiki/Webhook", - "categories": [ - "notify.cloud" - ], - "icon_filename": "webhook.svg" - }, - "keywords": [ - "generic webhooks", - "webhooks" - ], - "overview": "# Webhook\n\nYou can configure notification delivery to a webhook using a predefined schema from the Netdata Cloud UI.\n", - "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Webhook integration\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: The url of the service that Netdata will send notifications to. In order to keep the communication secured, Netdata only accepts HTTPS urls.\n - Extra headers: Optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism, Netdata webhook integration supports 3 different authentication mechanisms.\n - Mutual TLS (recommended): Default authentication mechanism used if no other method is selected\n - Basic: The client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**.\n - Bearer: The client sends a request with an Authorization header that includes a **bearer token**.\n\n### Webhook service\n\nA webhook service allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL.\n\nIn this section, we'll go over the steps to set up a generic webhook service, including adding headers, and implementing different types of authorization mechanisms.\n\n#### Netdata webhook integration\n\nNetdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected.\n\nFor alert notifications, the content sent to the destination service contains a JSON object with the following properties:\n\n| field | type | description |\n|:----------------------------------|:--------------------------------|:--------------------------------------------------------------------------|\n| message | string | A summary message of the alert. |\n| alert | string | The alert the notification is related to. |\n| info | string | Additional info related with the alert. |\n| chart | string | The chart associated with the alert. |\n| context | string | The chart context. |\n| space | string | The space where the node that raised the alert is assigned. |\n| Rooms | object\\[object(string,string)\\] | Object with list of Rooms names and urls where the node belongs to. |\n| family | string | Context family. |\n| class | string | Classification of the alert, e.g. `Error`. |\n| severity | string | Alert severity, can be one of `warning`, `critical` or `clear`. |\n| date | string | Date of the alert in ISO8601 format. |\n| duration | string | Duration the alert has been raised. |\n| additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n| additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n| alert_url | string | Netdata Cloud URL for this alert. |\n\nFor reachability notifications, the JSON object will contain the following properties:\n\n| field | type | description |\n|:-----------------|:--------|:------------------------------------------------------------------------------------------------------------------------------|\n| message | string | A summary message of the reachability alert. |\n| url | string | Netdata Cloud URL for the host experiencing the reachability alert. |\n| host | string | The hostname experiencing the reachability alert. |\n| severity | string | Severity for this notification. If host is reachable, severity will be `info`, if host is unreachable, it will be `critical`. |\n| status | object | An object with the status information. |\n| status.reachable | boolean | `true` if host is reachable, `false` otherwise |\n| status.text | string | Can be `reachable` or `unreachable` |\n\n#### Extra headers\n\nWhen setting up a webhook service, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\nBy default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:------------:|------------------|\n | Content-Type | application/json |\n\n#### Authentication mechanisms\n\nNetdata webhook integration supports 3 different authentication mechanisms:\n\n##### Mutual TLS authentication (recommended)\n\nIn mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\nThis is the default authentication mechanism used if no other method is selected.\n\nTo take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\nThe steps to perform this validation are as follows:\n\n- Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
\n Netdata CA certificate\n\n ```text\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n\n
\n\n- Enable client certificate validation on the web server that is doing the TLS termination. Below there are examples on how to perform this configuration in `NGINX` and `Apache`.\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n##### Basic authentication\n\nIn basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n##### Bearer token authentication\n\nIn bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n##### Challenge secret\n\nTo validate that you have ownership of the web application that will receive the webhook events, Netdata is using a challenge response check mechanism.\n\nThis mechanism works as follows:\n\n- The challenge secret parameter that you provide is a shared secret between only you and Netdata.\n- On your request for creating a new Webhook integration, Netdata will make a GET request to the URL of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n- You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n- Netdata will compare your application's response with the hash that it will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\nNetdata does this validation every time you update your integration configuration.\n\n- Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n**Example response token generation in Python:**\n\nHere you can see how to define a handler for a Flask application in python 3:\n\n```python\nimport base64\nimport hashlib\nimport hmac\nimport json\n\nkey ='YOUR_CHALLENGE_SECRET'\n\n@app.route('/webhooks/netdata')\ndef webhook_challenge():\ntoken = request.args.get('crc_token').encode('ascii')\n\n# creates HMAC SHA-256 hash from incomming token and your consumer secret\nsha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n# construct response data with base64 encoded hash\nresponse = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n}\n\n# returns properly formatted json response\nreturn json.dumps(response)\n```\n\n", - "integration_type": "notification", - "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", - "troubleshooting": "" - }, { "id": "notify-custom", "meta": { @@ -21899,7 +21625,7 @@ "overview": "# Custom\n\nNetdata Agent's alert notification feature allows you to send custom notifications to any endpoint you choose.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_CUSTOM | Set `SEND_CUSTOM` to YES | YES | yes |\n| DEFAULT_RECIPIENT_CUSTOM | This value is dependent on how you handle the `${to}` variable inside the `custom_sender()` function. | | yes |\n| custom_sender() | You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the function in this configuration file. | | no |\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nAll roles will default to this variable if left unconfigured. You can edit `DEFAULT_RECIPIENT_CUSTOM` with the variable you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_custom[sysadmin]=\"systems\"\nrole_recipients_custom[domainadmin]=\"domains\"\nrole_recipients_custom[dba]=\"databases systems\"\nrole_recipients_custom[webmaster]=\"marketing development\"\nrole_recipients_custom[proxyadmin]=\"proxy-admin\"\nrole_recipients_custom[sitemgr]=\"sites\"\n```\n\n\n##### custom_sender()\n\nThe following is a sample custom_sender() function in health_alarm_notify.conf, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:\n```\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n```\n\nThe supported variables that you can use for the function's `msg` variable are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# custom notifications\n\nSEND_CUSTOM=\"YES\"\nDEFAULT_RECIPIENT_CUSTOM=\"\"\n\n# The custom_sender() is a custom function to do whatever you need to do\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/custom/metadata.yaml" }, { @@ -21918,7 +21644,7 @@ "overview": "# Discord\n\nSend notifications to Discord using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more Discord channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DISCORD | Set `SEND_DISCORD` to YES | YES | yes |\n| DISCORD_WEBHOOK_URL | set `DISCORD_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_DISCORD | Set `DEFAULT_RECIPIENT_DISCORD` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_DISCORD\n\nAll roles will default to this variable if left unconfigured.\nYou can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_discord[sysadmin]=\"systems\"\nrole_recipients_discord[domainadmin]=\"domains\"\nrole_recipients_discord[dba]=\"databases systems\"\nrole_recipients_discord[webmaster]=\"marketing development\"\nrole_recipients_discord[proxyadmin]=\"proxy-admin\"\nrole_recipients_discord[sitemgr]=\"sites\"\n```\n\nThe values you provide should already exist as Discord channels in your server.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# discord (discordapp.com) global notification options\n\nSEND_DISCORD=\"YES\"\nDISCORD_WEBHOOK_URL=\"https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_DISCORD=\"alerts\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/discord/metadata.yaml" }, { @@ -21937,7 +21663,7 @@ "overview": "# Dynatrace\n\nDynatrace allows you to receive notifications using their Events REST API. See the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event) about POSTing an event in the Events API for more details.\nYou can send notifications to Dynatrace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts. The Dynatrace server should be with protocol prefixed (http:// or https://), for example: https://monitor.example.com.\n- An API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API. See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.\n- An API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n- A Server Tag. To generate one on your Dynatrace Server, go to Settings --> Tags --> Manually applied tags and create the Tag. The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DYNATRACE | Set `SEND_DYNATRACE` to YES | YES | yes |\n| DYNATRACE_SERVER | Set `DYNATRACE_SERVER` to the Dynatrace server with the protocol prefix, for example `https://monitor.example.com`. | | yes |\n| DYNATRACE_TOKEN | Set `DYNATRACE_TOKEN` to your Dynatrace API authentication token | | yes |\n| DYNATRACE_SPACE | Set `DYNATRACE_SPACE` to the API Space, it is the URL part of the page you have access in order to generate the API Token. | | yes |\n| DYNATRACE_TAG_VALUE | Set `DYNATRACE_TAG_VALUE` to your Dynatrace Server Tag. | | yes |\n| DYNATRACE_ANNOTATION_TYPE | `DYNATRACE_ANNOTATION_TYPE` can be left to its default value Netdata Alarm, but you can change it to better fit your needs. | Netdata Alarm | no |\n| DYNATRACE_EVENT | Set `DYNATRACE_EVENT` to the Dynatrace eventType you want. | Netdata Alarm | no |\n\n##### DYNATRACE_SPACE\n\nFor example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n\n\n##### DYNATRACE_EVENT\n\n`AVAILABILITY_EVENT`, `CUSTOM_ALERT`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, `CUSTOM_DEPLOYMENT`, `CUSTOM_INFO`, `ERROR_EVENT`,\n`MARKED_FOR_TERMINATION`, `PERFORMANCE_EVENT`, `RESOURCE_CONTENTION_EVENT`.\nYou can read more [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event#request-body-objects).\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Dynatrace global notification options\n\nSEND_DYNATRACE=\"YES\"\nDYNATRACE_SERVER=\"https://monitor.example.com\"\nDYNATRACE_TOKEN=\"XXXXXXX\"\nDYNATRACE_SPACE=\"2a93fe0e-4cd5-469a-9d0d-1a064235cfce\"\nDYNATRACE_TAG_VALUE=\"SERVERTAG\"\nDYNATRACE_ANNOTATION_TYPE=\"Netdata Alert\"\nDYNATRACE_EVENT=\"AVAILABILITY_EVENT\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/dynatrace/metadata.yaml" }, { @@ -21956,7 +21682,7 @@ "overview": "# Email\n\nSend notifications via Email using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working sendmail command is required for email alerts to work. Almost all MTAs provide a sendmail interface. Netdata sends all emails as user netdata, so make sure your sendmail works for local users.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| EMAIL_SENDER | You can change `EMAIL_SENDER` to the email address sending the notifications. | netdata | no |\n| SEND_EMAIL | Set `SEND_EMAIL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_EMAIL | Set `DEFAULT_RECIPIENT_EMAIL` to the email address you want the email to be sent by default. You can define multiple email addresses like this: `alarms@example.com` `systems@example.com`. | root | yes |\n\n##### DEFAULT_RECIPIENT_EMAIL\n\nAll roles will default to this variable if left unconfigured.\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_email[sysadmin]=\"systems@example.com\"\nrole_recipients_email[domainadmin]=\"domains@example.com\"\nrole_recipients_email[dba]=\"databases@example.com systems@example.com\"\nrole_recipients_email[webmaster]=\"marketing@example.com development@example.com\"\nrole_recipients_email[proxyadmin]=\"proxy-admin@example.com\"\nrole_recipients_email[sitemgr]=\"sites@example.com\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# email global notification options\n\nEMAIL_SENDER=\"example@domain.com\"\nSEND_EMAIL=\"YES\"\nDEFAULT_RECIPIENT_EMAIL=\"recipient@example.com\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/email/metadata.yaml" }, { @@ -21975,7 +21701,7 @@ "overview": "# Flock\n\nSend notifications to Flock using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by flock.com. You can use the same on all your Netdata servers (or you can have multiple if you like). Read more about flock webhooks and how to get one [here](https://admin.flock.com/webhooks).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_FLOCK | Set `SEND_FLOCK` to YES | YES | yes |\n| FLOCK_WEBHOOK_URL | set `FLOCK_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_FLOCK | Set `DEFAULT_RECIPIENT_FLOCK` to the Flock channel you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n\n##### DEFAULT_RECIPIENT_FLOCK\n\nYou can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_flock[sysadmin]=\"systems\"\nrole_recipients_flock[domainadmin]=\"domains\"\nrole_recipients_flock[dba]=\"databases systems\"\nrole_recipients_flock[webmaster]=\"marketing development\"\nrole_recipients_flock[proxyadmin]=\"proxy-admin\"\nrole_recipients_flock[sitemgr]=\"sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# flock (flock.com) global notification options\n\nSEND_FLOCK=\"YES\"\nFLOCK_WEBHOOK_URL=\"https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_FLOCK=\"alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/flock/metadata.yaml" }, { @@ -21994,7 +21720,7 @@ "overview": "# Gotify\n\n[Gotify](https://gotify.net/) is a self-hosted push notification service created for sending and receiving messages in real time.\nYou can send alerts to your Gotify instance using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An application token. You can generate a new token in the Gotify Web UI.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_GOTIFY | Set `SEND_GOTIFY` to YES | YES | yes |\n| GOTIFY_APP_TOKEN | set `GOTIFY_APP_TOKEN` to the app token you generated. | | yes |\n| GOTIFY_APP_URL | Set `GOTIFY_APP_URL` to point to your Gotify instance, for example `https://push.example.domain/` | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_GOTIFY=\"YES\"\nGOTIFY_APP_TOKEN=\"XXXXXXXXXXXXXXX\"\nGOTIFY_APP_URL=\"https://push.example.domain/\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/gotify/metadata.yaml" }, { @@ -22013,7 +21739,7 @@ "overview": "# ilert\n\nilert is an alerting and incident management tool. It helps teams reduce response times by enhancing monitoring and ticketing tools with reliable alerts, automatic escalations, on-call schedules, and features for incident response, communication, and status updates.\nSending notification to ilert via Netdata's Agent alert notification feature includes links, images and resolving of corresponding alerts.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Netdata alert source in ilert. You can create a [Netdata alert source](https://docs.ilert.com/inbound-integrations/netdata) in [ilert](https://www.ilert.com/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ILERT | Set `SEND_ILERT` to YES | YES | yes |\n| ILERT_ALERT_SOURCE_URL | Set `ILERT_ALERT_SOURCE_URL` to your Netdata alert source url in ilert. | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_ILERT=\"YES\"\nILERT_ALERT_SOURCE_URL=\"https://api.ilert.com/api/v1/events/netdata/{API-KEY}\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ilert/metadata.yaml" }, { @@ -22032,7 +21758,7 @@ "overview": "# IRC\n\nSend notifications to IRC using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The `nc` utility. You can set the path to it, or Netdata will search for it in your system `$PATH`.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| nc path | Set the path for nc, otherwise Netdata will search for it in your system $PATH | | yes |\n| SEND_IRC | Set `SEND_IRC` YES. | YES | yes |\n| IRC_NETWORK | Set `IRC_NETWORK` to the IRC network which your preferred channels belong to. | | yes |\n| IRC_PORT | Set `IRC_PORT` to the IRC port to which a connection will occur. | | no |\n| IRC_NICKNAME | Set `IRC_NICKNAME` to the IRC nickname which is required to send the notification. It must not be an already registered name as the connection's MODE is defined as a guest. | | yes |\n| IRC_REALNAME | Set `IRC_REALNAME` to the IRC realname which is required in order to make the connection. | | yes |\n| DEFAULT_RECIPIENT_IRC | You can have different channels per role, by editing `DEFAULT_RECIPIENT_IRC` with the channel you want | | yes |\n\n##### nc path\n\n```sh\n#------------------------------------------------------------------------------\n# external commands\n#\n# The full path of the nc command.\n# If empty, the system $PATH will be searched for it.\n# If not found, irc notifications will be silently disabled.\nnc=\"/usr/bin/nc\"\n```\n\n\n##### DEFAULT_RECIPIENT_IRC\n\nThe `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_irc[sysadmin]=\"#systems\"\nrole_recipients_irc[domainadmin]=\"#domains\"\nrole_recipients_irc[dba]=\"#databases #systems\"\nrole_recipients_irc[webmaster]=\"#marketing #development\"\nrole_recipients_irc[proxyadmin]=\"#proxy-admin\"\nrole_recipients_irc[sitemgr]=\"#sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# irc notification options\n#\nSEND_IRC=\"YES\"\nDEFAULT_RECIPIENT_IRC=\"#system-alarms\"\nIRC_NETWORK=\"irc.freenode.net\"\nIRC_NICKNAME=\"netdata-alarm-user\"\nIRC_REALNAME=\"netdata-user\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/irc/metadata.yaml" }, { @@ -22051,7 +21777,7 @@ "overview": "# Kavenegar\n\n[Kavenegar](https://kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs.\nYou can send notifications to Kavenegar using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The APIKEY and Sender from http://panel.kavenegar.com/client/setting/account\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_KAVENEGAR | Set `SEND_KAVENEGAR` to YES | YES | yes |\n| KAVENEGAR_API_KEY | Set `KAVENEGAR_API_KEY` to your API key. | | yes |\n| KAVENEGAR_SENDER | Set `KAVENEGAR_SENDER` to the value of your Sender. | | yes |\n| DEFAULT_RECIPIENT_KAVENEGAR | Set `DEFAULT_RECIPIENT_KAVENEGAR` to the SMS recipient you want the alert notifications to be sent to. You can define multiple recipients like this: 09155555555 09177777777. | | yes |\n\n##### DEFAULT_RECIPIENT_KAVENEGAR\n\nAll roles will default to this variable if lest unconfigured.\n\nYou can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_kavenegar[sysadmin]=\"09100000000\"\nrole_recipients_kavenegar[domainadmin]=\"09111111111\"\nrole_recipients_kavenegar[dba]=\"0922222222\"\nrole_recipients_kavenegar[webmaster]=\"0933333333\"\nrole_recipients_kavenegar[proxyadmin]=\"0944444444\"\nrole_recipients_kavenegar[sitemgr]=\"0955555555\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Kavenegar (Kavenegar.com) SMS options\n\nSEND_KAVENEGAR=\"YES\"\nKAVENEGAR_API_KEY=\"XXXXXXXXXXXX\"\nKAVENEGAR_SENDER=\"YYYYYYYY\"\nDEFAULT_RECIPIENT_KAVENEGAR=\"0912345678\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/kavenegar/metadata.yaml" }, { @@ -22070,7 +21796,7 @@ "overview": "# Matrix\n\nSend notifications to Matrix network rooms using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The url of the homeserver (`https://homeserver:port`).\n- Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a dedicated notification account). These tokens usually don't expire.\n- The Room ids that you want to sent the notification to.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MATRIX | Set `SEND_MATRIX` to YES | YES | yes |\n| MATRIX_HOMESERVER | set `MATRIX_HOMESERVER` to the URL of the Matrix homeserver. | | yes |\n| MATRIX_ACCESSTOKEN | Set `MATRIX_ACCESSTOKEN` to the access token from your Matrix account. | | yes |\n| DEFAULT_RECIPIENT_MATRIX | Set `DEFAULT_RECIPIENT_MATRIX` to the Rooms you want the alert notifications to be sent to. The format is `!roomid:homeservername`. | | yes |\n\n##### MATRIX_ACCESSTOKEN\n\nTo obtain the access token, you can use the following curl command:\n```\ncurl -XPOST -d '{\"type\":\"m.login.password\", \"user\":\"example\", \"password\":\"wordpass\"}' \"https://homeserver:8448/_matrix/client/r0/login\"\n```\n\n\n##### DEFAULT_RECIPIENT_MATRIX\n\nThe Room ids are unique identifiers and can be obtained from the Room settings in a Matrix client (e.g. Riot).\n\nYou can define multiple Rooms like this: `!roomid1:homeservername` `!roomid2:homeservername`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different Rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_matrix[sysadmin]=\"!roomid1:homeservername\"\nrole_recipients_matrix[domainadmin]=\"!roomid2:homeservername\"\nrole_recipients_matrix[dba]=\"!roomid3:homeservername\"\nrole_recipients_matrix[webmaster]=\"!roomid4:homeservername\"\nrole_recipients_matrix[proxyadmin]=\"!roomid5:homeservername\"\nrole_recipients_matrix[sitemgr]=\"!roomid6:homeservername\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Matrix notifications\n\nSEND_MATRIX=\"YES\"\nMATRIX_HOMESERVER=\"https://matrix.org:8448\"\nMATRIX_ACCESSTOKEN=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MATRIX=\"!XXXXXXXXXXXX:matrix.org\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/matrix/metadata.yaml" }, { @@ -22089,7 +21815,7 @@ "overview": "# MessageBird\n\nSend notifications to MessageBird using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An access key under 'API ACCESS (REST)' (you will want a live key), you can read more [here](https://developers.messagebird.com/quickstarts/sms/test-credits-api-keys/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MESSAGEBIRD | Set `SEND_MESSAGEBIRD` to YES | YES | yes |\n| MESSAGEBIRD_ACCESS_KEY | Set `MESSAGEBIRD_ACCESS_KEY` to your API key. | | yes |\n| MESSAGEBIRD_NUMBER | Set `MESSAGEBIRD_NUMBER` to the MessageBird number you want to use for the alert. | | yes |\n| DEFAULT_RECIPIENT_MESSAGEBIRD | Set `DEFAULT_RECIPIENT_MESSAGEBIRD` to the number you want the alert notification to be sent as an SMS. You can define multiple recipients like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_MESSAGEBIRD\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_messagebird[sysadmin]=\"+15555555555\"\nrole_recipients_messagebird[domainadmin]=\"+15555555556\"\nrole_recipients_messagebird[dba]=\"+15555555557\"\nrole_recipients_messagebird[webmaster]=\"+15555555558\"\nrole_recipients_messagebird[proxyadmin]=\"+15555555559\"\nrole_recipients_messagebird[sitemgr]=\"+15555555550\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Messagebird (messagebird.com) SMS options\n\nSEND_MESSAGEBIRD=\"YES\"\nMESSAGEBIRD_ACCESS_KEY=\"XXXXXXXX\"\nMESSAGEBIRD_NUMBER=\"XXXXXXX\"\nDEFAULT_RECIPIENT_MESSAGEBIRD=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/messagebird/metadata.yaml" }, { @@ -22108,7 +21834,7 @@ "overview": "# ntfy\n\n[ntfy](https://ntfy.sh/) (pronounce: notify) is a simple HTTP-based [pub-sub](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) notification service. It allows you to send notifications to your phone or desktop via scripts from any computer, entirely without signup, cost or setup. It's also [open source](https://github.com/binwiederhier/ntfy) if you want to run your own server.\nYou can send alerts to an ntfy server using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- (Optional) A [self-hosted ntfy server](https://docs.ntfy.sh/faq/#can-i-self-host-it), in case you don't want to use https://ntfy.sh\n- A new [topic](https://ntfy.sh/#subscribe) for the notifications to be published to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_NTFY | Set `SEND_NTFY` to YES | YES | yes |\n| DEFAULT_RECIPIENT_NTFY | URL formed by the server-topic combination you want the alert notifications to be sent to. Unless hosting your own server, the server should always be set to https://ntfy.sh. | | yes |\n| NTFY_USERNAME | The username for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_PASSWORD | The password for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_ACCESS_TOKEN | The access token for netdata to use to authenticate with an ntfy server. | | no |\n\n##### DEFAULT_RECIPIENT_NTFY\n\nYou can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `https://SERVER2/TOPIC2`\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_ntfy[sysadmin]=\"https://SERVER1/TOPIC1\"\nrole_recipients_ntfy[domainadmin]=\"https://SERVER2/TOPIC2\"\nrole_recipients_ntfy[dba]=\"https://SERVER3/TOPIC3\"\nrole_recipients_ntfy[webmaster]=\"https://SERVER4/TOPIC4\"\nrole_recipients_ntfy[proxyadmin]=\"https://SERVER5/TOPIC5\"\nrole_recipients_ntfy[sitemgr]=\"https://SERVER6/TOPIC6\"\n```\n\n\n##### NTFY_USERNAME\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_PASSWORD\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_ACCESS_TOKEN\n\nThis can be used in place of `NTFY_USERNAME` and `NTFY_PASSWORD` to authenticate with a self-hosted ntfy instance. See [access tokens](https://docs.ntfy.sh/config/?h=access+to#access-tokens) for details.\nEnsure that the token user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_NTFY=\"YES\"\nDEFAULT_RECIPIENT_NTFY=\"https://ntfy.sh/netdata-X7seHg7d3Tw9zGOk https://ntfy.sh/netdata-oIPm4IK1IlUtlA30\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ntfy/metadata.yaml" }, { @@ -22127,7 +21853,7 @@ "overview": "# OpsGenie\n\nOpsgenie is an alerting and incident response tool. It is designed to group and filter alarms, build custom routing rules for on-call teams, and correlate deployments and commits to incidents.\nYou can send notifications to Opsgenie using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Opsgenie integration. You can create an [integration](https://docs.opsgenie.com/docs/api-integration) in the [Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_OPSGENIE | Set `SEND_OPSGENIE` to YES | YES | yes |\n| OPSGENIE_API_KEY | Set `OPSGENIE_API_KEY` to your API key. | | yes |\n| OPSGENIE_API_URL | Set `OPSGENIE_API_URL` to the corresponding URL if required, for example there are region-specific API URLs such as `https://eu.api.opsgenie.com`. | https://api.opsgenie.com | no |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_OPSGENIE=\"YES\"\nOPSGENIE_API_KEY=\"11111111-2222-3333-4444-555555555555\"\nOPSGENIE_API_URL=\"\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/opsgenie/metadata.yaml" }, { @@ -22146,7 +21872,7 @@ "overview": "# PagerDuty\n\nPagerDuty is an enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times.\nYou can send notifications to PagerDuty using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An installation of the [PagerDuty](https://www.pagerduty.com/docs/guides/agent-install-guide/) agent on the node running the Netdata Agent\n- A PagerDuty Generic API service using either the `Events API v2` or `Events API v1`\n- [Add a new service](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) to PagerDuty. Click Use our API directly and select either `Events API v2` or `Events API v1`. Once you finish creating the service, click on the Integrations tab to find your Integration Key.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PD | Set `SEND_PD` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PD | Set `DEFAULT_RECIPIENT_PD` to the PagerDuty service key you want the alert notifications to be sent to. You can define multiple service keys like this: `pd_service_key_1` `pd_service_key_2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PD\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pd[sysadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa\"\nrole_recipients_pd[domainadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb\"\nrole_recipients_pd[dba]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc\"\nrole_recipients_pd[webmaster]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxd\"\nrole_recipients_pd[proxyadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe\"\nrole_recipients_pd[sitemgr]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxf\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pagerduty.com notification options\n\nSEND_PD=\"YES\"\nDEFAULT_RECIPIENT_PD=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\nUSE_PD_VERSION=\"2\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pagerduty/metadata.yaml" }, { @@ -22165,7 +21891,7 @@ "overview": "# Prowl\n\nSend notifications to Prowl using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- Because of how Netdata integrates with Prowl, there is a hard limit of at most 1000 notifications per hour (starting from the first notification sent). Any alerts beyond the first thousand in an hour will be dropped.\n- Warning messages will be sent with the 'High' priority, critical messages will be sent with the 'Emergency' priority, and all other messages will be sent with the normal priority. Opening the notification's associated URL will take you to the Netdata dashboard of the system that issued the alert, directly to the chart that it triggered on.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Prowl API key, which can be requested through the Prowl website after registering\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PROWL | Set `SEND_PROWL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PROWL | Set `DEFAULT_RECIPIENT_PROWL` to the Prowl API key you want the alert notifications to be sent to. You can define multiple API keys like this: `APIKEY1`, `APIKEY2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PROWL\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_prowl[sysadmin]=\"AAAAAAAA\"\nrole_recipients_prowl[domainadmin]=\"BBBBBBBBB\"\nrole_recipients_prowl[dba]=\"CCCCCCCCC\"\nrole_recipients_prowl[webmaster]=\"DDDDDDDDDD\"\nrole_recipients_prowl[proxyadmin]=\"EEEEEEEEEE\"\nrole_recipients_prowl[sitemgr]=\"FFFFFFFFFF\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# iOS Push Notifications\n\nSEND_PROWL=\"YES\"\nDEFAULT_RECIPIENT_PROWL=\"XXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/prowl/metadata.yaml" }, { @@ -22184,7 +21910,7 @@ "overview": "# Pushbullet\n\nSend notifications to Pushbullet using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Pushbullet access token that can be created in your [account settings](https://www.pushbullet.com/#settings/account).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Send_PUSHBULLET | Set `Send_PUSHBULLET` to YES | YES | yes |\n| PUSHBULLET_ACCESS_TOKEN | set `PUSHBULLET_ACCESS_TOKEN` to the access token you generated. | | yes |\n| DEFAULT_RECIPIENT_PUSHBULLET | Set `DEFAULT_RECIPIENT_PUSHBULLET` to the email (e.g. `example@domain.com`) or the channel tag (e.g. `#channel`) you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHBULLET\n\nYou can define multiple entries like this: user1@email.com user2@email.com.\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pushbullet[sysadmin]=\"user1@email.com\"\nrole_recipients_pushbullet[domainadmin]=\"user2@mail.com\"\nrole_recipients_pushbullet[dba]=\"#channel1\"\nrole_recipients_pushbullet[webmaster]=\"#channel2\"\nrole_recipients_pushbullet[proxyadmin]=\"user3@mail.com\"\nrole_recipients_pushbullet[sitemgr]=\"user4@mail.com\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushbullet (pushbullet.com) push notification options\n\nSEND_PUSHBULLET=\"YES\"\nPUSHBULLET_ACCESS_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHBULLET=\"admin1@example.com admin3@somemail.com #examplechanneltag #anotherchanneltag\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushbullet/metadata.yaml" }, { @@ -22203,7 +21929,7 @@ "overview": "# PushOver\n\nSend notification to Pushover using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n- Netdata will send warning messages with priority 0 and critical messages with priority 1.\n- Pushover allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours.\n- All other notifications will be delivered silently.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Application token. You can use the same on all your Netdata servers.\n- A User token for each user you are going to send notifications to. This is the actual recipient of the notification.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PUSHOVER | Set `SEND_PUSHOVER` to YES | YES | yes |\n| PUSHOVER_WEBHOOK_URL | set `PUSHOVER_WEBHOOK_URL` to your Pushover Application token. | | yes |\n| DEFAULT_RECIPIENT_PUSHOVER | Set `DEFAULT_RECIPIENT_PUSHOVER` the Pushover User token you want the alert notifications to be sent to. You can define multiple User tokens like this: `USERTOKEN1` `USERTOKEN2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHOVER\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_pushover[sysadmin]=\"USERTOKEN1\"\nrole_recipients_pushover[domainadmin]=\"USERTOKEN2\"\nrole_recipients_pushover[dba]=\"USERTOKEN3 USERTOKEN4\"\nrole_recipients_pushover[webmaster]=\"USERTOKEN5\"\nrole_recipients_pushover[proxyadmin]=\"USERTOKEN6\"\nrole_recipients_pushover[sitemgr]=\"USERTOKEN7\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushover (pushover.net) global notification options\n\nSEND_PUSHOVER=\"YES\"\nPUSHOVER_APP_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHOVER=\"USERTOKEN\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushover/metadata.yaml" }, { @@ -22222,7 +21948,7 @@ "overview": "# RocketChat\n\nSend notifications to Rocket.Chat using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ROCKETCHAT | Set `SEND_ROCKETCHAT` to `YES` | YES | yes |\n| ROCKETCHAT_WEBHOOK_URL | set `ROCKETCHAT_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_ROCKETCHAT | Set `DEFAULT_RECIPIENT_ROCKETCHAT` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_ROCKETCHAT\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file:\n```text\nrole_recipients_rocketchat[sysadmin]=\"systems\"\nrole_recipients_rocketchat[domainadmin]=\"domains\"\nrole_recipients_rocketchat[dba]=\"databases systems\"\nrole_recipients_rocketchat[webmaster]=\"marketing development\"\nrole_recipients_rocketchat[proxyadmin]=\"proxy_admin\"\nrole_recipients_rocketchat[sitemgr]=\"sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# rocketchat (rocket.chat) global notification options\n\nSEND_ROCKETCHAT=\"YES\"\nROCKETCHAT_WEBHOOK_URL=\"\"\nDEFAULT_RECIPIENT_ROCKETCHAT=\"monitoring_alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/rocketchat/metadata.yaml" }, { @@ -22241,7 +21967,7 @@ "overview": "# Slack\n\nSend notifications to a Slack workspace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Slack app along with an incoming webhook, read Slack's guide on the topic [here](https://api.slack.com/messaging/webhooks).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_SLACK | Set `SEND_SLACK` to YES | YES | yes |\n| SLACK_WEBHOOK_URL | set `SLACK_WEBHOOK_URL` to your Slack app's webhook URL. | | yes |\n| DEFAULT_RECIPIENT_SLACK | Set `DEFAULT_RECIPIENT_SLACK` to the Slack channel your Slack app is set to send messages to. The syntax for channels is `#channel` or `channel`. | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# slack (slack.com) global notification options\n\nSEND_SLACK=\"YES\"\nSLACK_WEBHOOK_URL=\"https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\" \nDEFAULT_RECIPIENT_SLACK=\"#alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/slack/metadata.yaml" }, { @@ -22262,7 +21988,7 @@ "overview": "# SMS\n\nSend notifications to `smstools3` using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\nThe SMS Server Tools 3 is a SMS Gateway software which can send and receive short messages through GSM modems and mobile phones.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- [Install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) `smsd`\n- To ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:\n - Have write permissions to /tmp and /var/spool/sms/outgoing\n - Be a member of group smsd\n - To ensure that the steps above are successful, just su netdata and execute sendsms phone message.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sendsms | Set the path for `sendsms`, otherwise Netdata will search for it in your system `$PATH:` | YES | yes |\n| SEND_SMS | Set `SEND_SMS` to `YES`. | | yes |\n| DEFAULT_RECIPIENT_SMS | Set DEFAULT_RECIPIENT_SMS to the phone number you want the alert notifications to be sent to. You can define multiple phone numbers like this: PHONE1 PHONE2. | | yes |\n\n##### sendsms\n\n# The full path of the sendsms command (smstools3).\n# If empty, the system $PATH will be searched for it.\n# If not found, SMS notifications will be silently disabled.\nsendsms=\"/usr/bin/sendsms\"\n\n\n##### DEFAULT_RECIPIENT_SMS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_sms[sysadmin]=\"PHONE1\"\nrole_recipients_sms[domainadmin]=\"PHONE2\"\nrole_recipients_sms[dba]=\"PHONE3\"\nrole_recipients_sms[webmaster]=\"PHONE4\"\nrole_recipients_sms[proxyadmin]=\"PHONE5\"\nrole_recipients_sms[sitemgr]=\"PHONE6\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMS Server Tools 3 (smstools3) global notification options\nSEND_SMS=\"YES\"\nDEFAULT_RECIPIENT_SMS=\"1234567890\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/smstools3/metadata.yaml" }, { @@ -22281,7 +22007,7 @@ "overview": "# syslog\n\nSend notifications to Syslog using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SYSLOG_FACILITY | Set `SYSLOG_FACILITY` to the facility used for logging, by default this value is set to `local6`. | | yes |\n| DEFAULT_RECIPIENT_SYSLOG | Set `DEFAULT_RECIPIENT_SYSLOG` to the recipient you want the alert notifications to be sent to. | | yes |\n| SEND_SYSLOG | Set SEND_SYSLOG to YES, make sure you have everything else configured before turning this on. | | yes |\n\n##### DEFAULT_RECIPIENT_SYSLOG\n\nTargets are defined as follows:\n\n```\n[[facility.level][@host[:port]]/]prefix\n```\n\nprefix defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'.\n\nThe facility and level are the standard syslog facility and level options, for more info on them see your local logger and syslog documentation. By default, Netdata will log to the local6 facility, with a log level dependent on the type of message (crit for CRITICAL, warning for WARNING, and info for everything else).\n\nYou can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).\n\nYou can define multiple recipients like this: daemon.notice@loghost:514/netdata daemon.notice@loghost2:514/netdata.\nAll roles will default to this variable if left unconfigured.\n\n\n##### SEND_SYSLOG \n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_syslog[sysadmin]=\"daemon.notice@loghost1:514/netdata\"\nrole_recipients_syslog[domainadmin]=\"daemon.notice@loghost2:514/netdata\"\nrole_recipients_syslog[dba]=\"daemon.notice@loghost3:514/netdata\"\nrole_recipients_syslog[webmaster]=\"daemon.notice@loghost4:514/netdata\"\nrole_recipients_syslog[proxyadmin]=\"daemon.notice@loghost5:514/netdata\"\nrole_recipients_syslog[sitemgr]=\"daemon.notice@loghost6:514/netdata\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# syslog notifications\n\nSEND_SYSLOG=\"YES\"\nSYSLOG_FACILITY='local6'\nDEFAULT_RECIPIENT_SYSLOG=\"daemon.notice@loghost6:514/netdata\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/syslog/metadata.yaml" }, { @@ -22302,7 +22028,7 @@ "overview": "# Microsoft Teams\n\nYou can send Netdata alerts to Microsoft Teams using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Microsoft Teams. You can use the same on all your Netdata servers (or you can have multiple if you like).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MSTEAMS | Set `SEND_MSTEAMS` to YES | YES | yes |\n| MSTEAMS_WEBHOOK_URL | set `MSTEAMS_WEBHOOK_URL` to the incoming webhook URL as given by Microsoft Teams. | | yes |\n| DEFAULT_RECIPIENT_MSTEAMS | Set `DEFAULT_RECIPIENT_MSTEAMS` to the encoded Microsoft Teams channel name you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_MSTEAMS\n\nIn Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhook/`. You can define multiple channels like this: `CHANNEL1` `CHANNEL2`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file:\n```text\nrole_recipients_msteams[sysadmin]=\"CHANNEL1\"\nrole_recipients_msteams[domainadmin]=\"CHANNEL2\"\nrole_recipients_msteams[dba]=\"databases CHANNEL3\"\nrole_recipients_msteams[webmaster]=\"CHANNEL4\"\nrole_recipients_msteams[proxyadmin]=\"CHANNEL5\"\nrole_recipients_msteams[sitemgr]=\"CHANNEL6\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Microsoft Teams (office.com) global notification options\n\nSEND_MSTEAMS=\"YES\"\nMSTEAMS_WEBHOOK_URL=\"https://outlook.office.com/webhook/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/IncomingWebhook/CHANNEL/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MSTEAMS=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/msteams/metadata.yaml" }, { @@ -22321,7 +22047,7 @@ "overview": "# Telegram\n\nSend notifications to Telegram using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. Invite your bot to a group where you want it to send messages.\n- The chat ID for every chat you want to send messages to. Invite [@myidbot](https://t.me/myidbot) bot to the group that will receive notifications, and write the command `/getgroupid@myidbot` to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TELEGRAM | Set `SEND_TELEGRAM` to YES | YES | yes |\n| TELEGRAM_BOT_TOKEN | set `TELEGRAM_BOT_TOKEN` to your bot token. | | yes |\n| DEFAULT_RECIPIENT_TELEGRAM | Set the `DEFAULT_RECIPIENT_TELEGRAM` variable in your config file to your Telegram chat ID (find it with @myidbot). Separate multiple chat IDs with spaces. To send alerts to a specific topic within a chat, use `chatID:topicID`. | | yes |\n\n##### DEFAULT_RECIPIENT_TELEGRAM\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_telegram[sysadmin]=\"-49999333324\"\nrole_recipients_telegram[domainadmin]=\"-49999333389\"\nrole_recipients_telegram[dba]=\"-10099992222\"\nrole_recipients_telegram[webmaster]=\"-10099992222 -49999333389\"\nrole_recipients_telegram[proxyadmin]=\"-49999333344\"\nrole_recipients_telegram[sitemgr]=\"-49999333876\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# telegram (telegram.org) global notification options\n\nSEND_TELEGRAM=\"YES\"\nTELEGRAM_BOT_TOKEN=\"111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5\"\nDEFAULT_RECIPIENT_TELEGRAM=\"-49999333876\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/telegram/metadata.yaml" }, { @@ -22340,9 +22066,289 @@ "overview": "# Twilio\n\nSend notifications to Twilio using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Get your SID, and Token from https://www.twilio.com/console\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TWILIO | Set `SEND_TWILIO` to YES | YES | yes |\n| TWILIO_ACCOUNT_SID | set `TWILIO_ACCOUNT_SID` to your account SID. | | yes |\n| TWILIO_ACCOUNT_TOKEN | Set `TWILIO_ACCOUNT_TOKEN` to your account token. | | yes |\n| TWILIO_NUMBER | Set `TWILIO_NUMBER` to your account's number. | | yes |\n| DEFAULT_RECIPIENT_TWILIO | Set DEFAULT_RECIPIENT_TWILIO to the number you want the alert notifications to be sent to. You can define multiple numbers like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_TWILIO\n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient's number you want, in the following entries at the bottom of the same file:\n\n```text\nrole_recipients_twilio[sysadmin]=\"+15555555555\"\nrole_recipients_twilio[domainadmin]=\"+15555555556\"\nrole_recipients_twilio[dba]=\"+15555555557\"\nrole_recipients_twilio[webmaster]=\"+15555555558\"\nrole_recipients_twilio[proxyadmin]=\"+15555555559\"\nrole_recipients_twilio[sitemgr]=\"+15555555550\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Twilio (twilio.com) SMS options\n\nSEND_TWILIO=\"YES\"\nTWILIO_ACCOUNT_SID=\"xxxxxxxxx\"\nTWILIO_ACCOUNT_TOKEN=\"xxxxxxxxxx\"\nTWILIO_NUMBER=\"xxxxxxxxxxx\"\nDEFAULT_RECIPIENT_TWILIO=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", - "integration_type": "notification", + "integration_type": "agent_notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/twilio/metadata.yaml" }, + { + "id": "notify-cloud-awssns", + "meta": { + "name": "Amazon SNS", + "link": "https://aws.amazon.com/sns/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "awssns.png" + }, + "keywords": [ + "awssns" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the space as an **Admin**\n- The Space needs to be on a paid plan\n- An AWS account with AWS SNS access\n\n### AWS SNS Configuration\n\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, select the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n3. Copy the **Topic ARN** in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the AWS SNS Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Topic ARN: The topic provided on AWS SNS (with region) for where to publish your notifications.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-discord", + "meta": { + "name": "Discord", + "link": "https://discord.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "discord.png" + }, + "keywords": [ + "discord", + "community" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n\n### Discord Server Configuration\n\n1. Go to **Server Settings** --> **Integrations**\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Keep note of the **Webhook URL** as you will need it for the configuration of the integration on the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Discord Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: The URL you copied from the previous section\n - Channel Parameters: Select the channel type which the notifications will be sent to, if it is a Forum channel, you need to specify a thread name\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-ilert", + "meta": { + "name": "ilert", + "link": "https://www.ilert.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "ilert.svg" + }, + "keywords": [ + "ilert" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on ilert to add new Alert sources.\n\n### ilert Configuration\n\n1. From the navigation bar, open the Alert sources drop down and click \"Alert sources\"\n2. Click on the \"+ Create a new alert source\" button\n3. Configure an Alert source:\n - Select \"API integration\" and click Next\n - Provide a name that suits the source's purpose, for example \"Netdata\"\n - Select Escalation policy\n - Select Alert grouping (optional)\n4. Obtain the API Key:\n - Once the Alert source is created, you will be provided with an API key. Copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the ilert Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Alert Source API key: The key you copied in the ilert configuration step.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-mattermost", + "meta": { + "name": "Mattermost", + "link": "https://mattermost.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "mattermost.png" + }, + "keywords": [ + "mattermost" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Mattermost to add new integrations.\n\n### Mattermost Server Configuration\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don't have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook.\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook URL that looks like `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Mattermost Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on Mattermost for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-microsoftteams", + "meta": { + "name": "Microsoft Teams", + "link": "https://www.microsoft.com/en-us/microsoft-teams", + "categories": [ + "notify.cloud" + ], + "icon_filename": "teams.svg" + }, + "keywords": [ + "microsoft", + "teams" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- A [Microsoft Teams Essentials subscription](https://www.microsoft.com/en-sg/microsoft-teams/essentials) or higher. Note that this is a **paid** feature\n\n### Microsoft Teams Configuration\n\n1. Navigate to the desired Microsoft Teams channel and hover over the channel name. Click the three dots icon that appears\n2. Select \"Workflows\" from the options, then choose \"Post to a channel when a webhook request is received\"\n3. **Configure Workflow Details**\n - Give your workflow a name, such as \"Netdata Alerts\"\n - Select the target team and channel where you will receive notifications\n - Click \"Add workflow\"\n4. Once the workflow is created, you will receive a unique Workflow Webhook URL, copy it, in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Microsoft Teams Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Microsoft Teams Incoming Webhook URL: The Incoming Webhook URL that you copied earlier.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-mobile-app", + "meta": { + "name": "Netdata Mobile App", + "link": "https://netdata.cloud", + "categories": [ + "notify.cloud" + ], + "icon_filename": "netdata.png" + }, + "keywords": [ + "mobile-app", + "phone", + "personal-notifications" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- You need to have the Netdata Mobile App installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration and device linking\n\nIn order to login to the Netdata Mobile App\n\n1. Download the Netdata Mobile App from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose your Sign-in option\n - Email Address: Enter the email address of your registered Netdata Cloud account and click on the verification link received by email on your mobile device.\n - Sign-in with QR Code: Scan the QR code from the Netdata Cloud UI under **Profile Picture** --> **Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code**\n\n### Netdata Configuration\n\nAfter linking your device, enable the toggle for **Mobile App Notifications** under the same settings panel.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-opsgenie", + "meta": { + "name": "Opsgenie", + "link": "https://www.atlassian.com/software/opsgenie", + "categories": [ + "notify.cloud" + ], + "icon_filename": "opsgenie.png" + }, + "keywords": [ + "opsgenie", + "atlassian" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\n1. Go to the integrations tab of your team, click **Add integration**\n2. Pick **API** from the available integrations and copy the API Key in order to add it to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Opsgenie Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - API Key: The key provided on Opsgenie for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-pagerduty", + "meta": { + "name": "PagerDuty", + "link": "https://www.pagerduty.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "pagerduty.png" + }, + "keywords": [ + "pagerduty" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a PagerDuty service to receive events using webhooks.\n\n### PagerDuty Server Configuration\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. On the third step of the service creation, select `Events API V2` Integration\n3. Once the service is created, you will be redirected to its configuration page, where you can copy the **Integration Key** and **Integration URL (Alert Events)** in order to add them to your integration configuration in the Netdata Cloud UI\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Integration Key: A 32 character key provided by PagerDuty to receive events on your service.\n - Integration URL (Alert Events): The URL provided by PagerDuty where Netdata Cloud will send notifications.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-rocketchat", + "meta": { + "name": "RocketChat", + "link": "https://www.rocket.chat/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "rocketchat.png" + }, + "keywords": [ + "rocketchat" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have permissions on RocketChat to add new integrations.\n\n### RocketChat Server Configuration\n\nSteps to configure your RocketChat server to receive notifications from Netdata Cloud:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations\n2. Click **+New** at the top right corner\n3. For more details about each parameter, check [Create a new incoming webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook)\n4. You will end up with a webhook endpoint that looks like `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`, copy it in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the PagerDuty Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on RocketChat for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-slack", + "meta": { + "name": "Slack", + "link": "https://slack.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "slack.png" + }, + "keywords": [ + "slack" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\n1. Create an app to receive webhook integrations. Check the [Slack documentation](https://api.slack.com/apps?new_app=1) for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - Specify the channel where you want your notifications to be delivered\n - Once completed, copy the Webhook URL in order to add it to your integration configuration in the Netdata Cloud UI\n\nFor more details please check [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Slack Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: URL provided on Slack for the channel you want to receive your notifications\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-splunk", + "meta": { + "name": "Splunk", + "link": "https://splunk.com/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "splunk-black.svg" + }, + "keywords": [ + "Splunk" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions on how to set it up.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - HTTP Event Collector URI: The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token: The token that Splunk provided to you when you created the HTTP Event Collector\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-telegram", + "meta": { + "name": "Telegram", + "link": "https://telegram.org/", + "categories": [ + "notify.cloud" + ], + "icon_filename": "telegram.svg" + }, + "keywords": [ + "Telegram" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Telegram bot token, chat ID and optionally the topic ID\n\n### Telegram Configuration\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n- To get the topic ID, the easiest way is this: Post a message to that topic, then right-click on it and select `Copy Message Link`. Paste it on a scratchpad and notice that it has the following structure `https://t.me/c/XXXXXXXXXX/YY/ZZ`. The topic ID is `YY` (integer).\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Telegram Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Bot Token: The token of your bot\n - Chat ID: The chat id where your bot will deliver messages to\n - Topic ID: The identifier of the chat topic to which your bot will send messages. If omitted or 0, messages will be sent to the General topic. If topics are not supported, messages will be sent to the chat.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-victorops", + "meta": { + "name": "Splunk VictorOps", + "link": "https://www.splunk.com/en_us/about-splunk/acquisitions/splunk-on-call.html", + "categories": [ + "notify.cloud" + ], + "icon_filename": "victorops.svg" + }, + "keywords": [ + "VictorOps", + "Splunk", + "On-Call" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- The Destination URL for your Splunk VictorOps REST Endpoint Integration. Refer to the [VictorOps documentation](https://help.victorops.com/knowledge-base/rest-endpoint-integration-guide) for detailed instructions.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Splunk VictorOps Integration\n5. A modal will be presented to you to enter the required details to enable the integration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Destination URL - The URL provided by VictorOps of your REST endpoint.\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "notify-cloud-webhook", + "meta": { + "name": "Webhook", + "link": "https://en.wikipedia.org/wiki/Webhook", + "categories": [ + "notify.cloud" + ], + "icon_filename": "webhook.svg" + }, + "keywords": [ + "generic webhooks", + "webhooks" + ], + "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Space as an **Admin**\n- The Space needs to be on a paid plan\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Alerts & Notifications** tab\n3. Click on the **+ Add configuration** button\n4. Add the Webhook integration\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings**\n - Configuration name (optional): A name for your configuration in order to easily refer to it\n - Rooms: A list of Rooms for which you want to be notified\n - Notifications: The notifications which you want to be notified\n - **Integration configuration**\n - Webhook URL: The url of the service that Netdata will send notifications to. In order to keep the communication secured, Netdata only accepts HTTPS urls.\n - Extra headers: Optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism, Netdata webhook integration supports 3 different authentication mechanisms.\n - Mutual TLS (recommended): Default authentication mechanism used if no other method is selected\n - Basic: The client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**.\n - Bearer: The client sends a request with an Authorization header that includes a **bearer token**.\n\n### Webhook service\n\nA webhook service allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL.\n\nIn this section, we'll go over the steps to set up a generic webhook service, including adding headers, and implementing different types of authorization mechanisms.\n\n#### Netdata webhook integration\n\nNetdata webhook integration service will send alert and reachability notifications to the destination service as soon as they are detected.\n\nFor alert notifications, the content sent to the destination service contains a JSON object with the following properties:\n\n| field | type | description |\n|:----------------------------------|:--------------------------------|:--------------------------------------------------------------------------|\n| message | string | A summary message of the alert. |\n| alert | string | The alert the notification is related to. |\n| info | string | Additional info related with the alert. |\n| chart | string | The chart associated with the alert. |\n| context | string | The chart context. |\n| space | string | The space where the node that raised the alert is assigned. |\n| Rooms | object\\[object(string,string)\\] | Object with list of Rooms names and urls where the node belongs to. |\n| family | string | Context family. |\n| class | string | Classification of the alert, e.g. `Error`. |\n| severity | string | Alert severity, can be one of `warning`, `critical` or `clear`. |\n| date | string | Date of the alert in ISO8601 format. |\n| duration | string | Duration the alert has been raised. |\n| additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n| additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n| alert_url | string | Netdata Cloud URL for this alert. |\n\nFor reachability notifications, the JSON object will contain the following properties:\n\n| field | type | description |\n|:-----------------|:--------|:------------------------------------------------------------------------------------------------------------------------------|\n| message | string | A summary message of the reachability alert. |\n| url | string | Netdata Cloud URL for the host experiencing the reachability alert. |\n| host | string | The hostname experiencing the reachability alert. |\n| severity | string | Severity for this notification. If host is reachable, severity will be `info`, if host is unreachable, it will be `critical`. |\n| status | object | An object with the status information. |\n| status.reachable | boolean | `true` if host is reachable, `false` otherwise |\n| status.text | string | Can be `reachable` or `unreachable` |\n\n#### Extra headers\n\nWhen setting up a webhook service, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\nBy default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:------------:|------------------|\n | Content-Type | application/json |\n\n#### Authentication mechanisms\n\nNetdata webhook integration supports 3 different authentication mechanisms:\n\n##### Mutual TLS authentication (recommended)\n\nIn mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\nThis is the default authentication mechanism used if no other method is selected.\n\nTo take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\nThe steps to perform this validation are as follows:\n\n- Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
\n Netdata CA certificate\n\n ```text\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n\n
\n\n- Enable client certificate validation on the web server that is doing the TLS termination. Below there are examples on how to perform this configuration in `NGINX` and `Apache`.\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n##### Basic authentication\n\nIn basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n##### Bearer token authentication\n\nIn bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n##### Challenge secret\n\nTo validate that you have ownership of the web application that will receive the webhook events, Netdata is using a challenge response check mechanism.\n\nThis mechanism works as follows:\n\n- The challenge secret parameter that you provide is a shared secret between only you and Netdata.\n- On your request for creating a new Webhook integration, Netdata will make a GET request to the URL of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n- You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n- Netdata will compare your application's response with the hash that it will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\nNetdata does this validation every time you update your integration configuration.\n\n- Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n**Example response token generation in Python:**\n\nHere you can see how to define a handler for a Flask application in python 3:\n\n```python\nimport base64\nimport hashlib\nimport hmac\nimport json\n\nkey ='YOUR_CHALLENGE_SECRET'\n\n@app.route('/webhooks/netdata')\ndef webhook_challenge():\ntoken = request.args.get('crc_token').encode('ascii')\n\n# creates HMAC SHA-256 hash from incomming token and your consumer secret\nsha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n# construct response data with base64 encoded hash\nresponse = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n}\n\n# returns properly formatted json response\nreturn json.dumps(response)\n```\n\n", + "integration_type": "cloud_notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", + "troubleshooting": "" + }, + { + "id": "logs-systemd-journal", + "meta": { + "name": "Systemd Journal Logs", + "link": "https://github.com/netdata/netdata/blob/master/src/collectors/systemd-journal.plugin/README.md", + "categories": [ + "logs" + ], + "icon_filename": "netdata.png" + }, + "keywords": [ + "systemd", + "journal", + "logs" + ], + "overview": "# Systemd Journal Logs\n\nThe `systemd` journal plugin by Netdata makes viewing, exploring and analyzing `systemd` journal logs simple and efficient.\n\nIt automatically discovers available journal sources, allows advanced filtering, offers interactive visual representations and supports exploring the logs of both individual servers and the logs on infrastructure wide journal centralization servers.\n\nThe plugin automatically detects the available journal sources, based on the journal files available in `/var/log/journal` (persistent logs) and `/run/log/journal` (volatile logs).\n\n\n## Visualization\n\nYou can start exploring `systemd` journal logs on the \"Logs\" tab of the Netdata UI.\n\n\n## Key features\n\n- Works on both **individual servers** and **journal centralization servers**.\n- Supports `persistent` and `volatile` journals.\n- Supports `system`, `user`, `namespaces` and `remote` journals.\n- Allows filtering on **any journal field** or **field value**, for any time-frame.\n- Allows **full text search** (`grep`) on all journal fields, for any time-frame.\n- Provides a **histogram** for log entries over time, with a break down per field-value, for any field and any time-frame.\n- Works directly on journal files, without any other third-party components.\n- Supports coloring log entries, the same way `journalctl` does.\n- In PLAY mode provides the same experience as `journalctl -f`, showing new log entries immediately after they are received.\n", + "setup": "## Setup\n\n## Prerequisites\n\n- A Netdata Cloud account\n\n\n## Configuration\n\nThere is no configuration needed for this integration.\n", + "integration_type": "logs", + "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/logs/metadata.yaml" + }, { "id": "oidc-authentication", "meta": { diff --git a/integrations/logs/integrations/systemd_journal_logs.md b/integrations/logs/integrations/systemd_journal_logs.md new file mode 100644 index 00000000000000..9f87ee1325356c --- /dev/null +++ b/integrations/logs/integrations/systemd_journal_logs.md @@ -0,0 +1,53 @@ + + +# Systemd Journal Logs + + + + + +The `systemd` journal plugin by Netdata makes viewing, exploring and analyzing `systemd` journal logs simple and efficient. + +It automatically discovers available journal sources, allows advanced filtering, offers interactive visual representations and supports exploring the logs of both individual servers and the logs on infrastructure wide journal centralization servers. + +The plugin automatically detects the available journal sources, based on the journal files available in `/var/log/journal` (persistent logs) and `/run/log/journal` (volatile logs). + + + + +## Visualization + +You can start exploring `systemd` journal logs on the "Logs" tab of the Netdata UI. + + +## Key features + +- Works on both **individual servers** and **journal centralization servers**. +- Supports `persistent` and `volatile` journals. +- Supports `system`, `user`, `namespaces` and `remote` journals. +- Allows filtering on **any journal field** or **field value**, for any time-frame. +- Allows **full text search** (`grep`) on all journal fields, for any time-frame. +- Provides a **histogram** for log entries over time, with a break down per field-value, for any field and any time-frame. +- Works directly on journal files, without any other third-party components. +- Supports coloring log entries, the same way `journalctl` does. +- In PLAY mode provides the same experience as `journalctl -f`, showing new log entries immediately after they are received. + + +## Setup + +## Prerequisites + +- A Netdata Cloud account + + +## Configuration + +There is no configuration needed for this integration. + diff --git a/integrations/logs/metadata.yaml b/integrations/logs/metadata.yaml new file mode 100644 index 00000000000000..633899962340a6 --- /dev/null +++ b/integrations/logs/metadata.yaml @@ -0,0 +1,38 @@ +# yamllint disable rule:line-length +--- +- id: "logs-systemd-journal" + meta: + name: "Systemd Journal Logs" + link: "https://github.com/netdata/netdata/blob/master/src/collectors/systemd-journal.plugin/README.md" + categories: + - logs + icon_filename: "netdata.png" + keywords: + - systemd + - journal + - logs + overview: + description: | + The `systemd` journal plugin by Netdata makes viewing, exploring and analyzing `systemd` journal logs simple and efficient. + + It automatically discovers available journal sources, allows advanced filtering, offers interactive visual representations and supports exploring the logs of both individual servers and the logs on infrastructure wide journal centralization servers. + + The plugin automatically detects the available journal sources, based on the journal files available in `/var/log/journal` (persistent logs) and `/run/log/journal` (volatile logs). + visualization: + description: | + You can start exploring `systemd` journal logs on the "Logs" tab of the Netdata UI. + key_features: + description: | + - Works on both **individual servers** and **journal centralization servers**. + - Supports `persistent` and `volatile` journals. + - Supports `system`, `user`, `namespaces` and `remote` journals. + - Allows filtering on **any journal field** or **field value**, for any time-frame. + - Allows **full text search** (`grep`) on all journal fields, for any time-frame. + - Provides a **histogram** for log entries over time, with a break down per field-value, for any field and any time-frame. + - Works directly on journal files, without any other third-party components. + - Supports coloring log entries, the same way `journalctl` does. + - In PLAY mode provides the same experience as `journalctl -f`, showing new log entries immediately after they are received. + setup: + prerequisites: + description: | + - A Netdata Cloud account diff --git a/integrations/schemas/notification.json b/integrations/schemas/agent_notification.json similarity index 79% rename from integrations/schemas/notification.json rename to integrations/schemas/agent_notification.json index 2596ca441e54ba..f157a65d9a82be 100644 --- a/integrations/schemas/notification.json +++ b/integrations/schemas/agent_notification.json @@ -46,20 +46,20 @@ ] }, "global_setup": { - "type": "object", - "description": "Flags that show which global setup sections are relevant for this notification method.", - "properties": { - "severity_filtering": { - "type": "boolean" - }, - "http_proxy": { - "type": "boolean" - } + "type": "object", + "description": "Flags that show which global setup sections are relevant for this notification method.", + "properties": { + "severity_filtering": { + "type": "boolean" }, - "required": [ - "severity_filtering", - "http_proxy" - ] + "http_proxy": { + "type": "boolean" + } + }, + "required": [ + "severity_filtering", + "http_proxy" + ] }, "setup": { "oneOf": [ @@ -84,4 +84,4 @@ ] } } -} +} \ No newline at end of file diff --git a/integrations/schemas/cloud_notification.json b/integrations/schemas/cloud_notification.json new file mode 100644 index 00000000000000..60bd66e8f4bf7e --- /dev/null +++ b/integrations/schemas/cloud_notification.json @@ -0,0 +1,68 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Netdata notification mechanism metadata.", + "oneOf": [ + { + "$ref": "#/$defs/entry" + }, + { + "type": "array", + "minLength": 1, + "items": { + "$ref": "#/$defs/entry" + } + } + ], + "$defs": { + "entry": { + "type": "object", + "description": "Data for a single notification method.", + "properties": { + "id": { + "$ref": "./shared.json#/$defs/id" + }, + "meta": { + "$ref": "./shared.json#/$defs/instance" + }, + "keywords": { + "$ref": "./shared.json#/$defs/keywords" + }, + "global_setup": { + "type": "object", + "description": "Flags that show which global setup sections are relevant for this notification method.", + "properties": { + "severity_filtering": { + "type": "boolean" + }, + "http_proxy": { + "type": "boolean" + } + }, + "required": [ + "severity_filtering", + "http_proxy" + ] + }, + "setup": { + "oneOf": [ + { + "$ref": "./shared.json#/$defs/short_setup" + }, + { + "$ref": "./shared.json#/$defs/full_setup" + } + ] + }, + "troubleshooting": { + "$ref": "./shared.json#/$defs/troubleshooting" + } + }, + "required": [ + "id", + "meta", + "keywords", + "setup" + ] + } + } +} \ No newline at end of file diff --git a/integrations/schemas/logs.json b/integrations/schemas/logs.json new file mode 100644 index 00000000000000..8209dc9793b981 --- /dev/null +++ b/integrations/schemas/logs.json @@ -0,0 +1,97 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Netdata Logs integrations metadata.", + "oneOf": [ + { + "$ref": "#/$defs/entry" + }, + { + "type": "array", + "minLength": 1, + "items": { + "$ref": "#/$defs/entry" + } + } + ], + "$defs": { + "entry": { + "type": "object", + "description": "Data for a single logs integration.", + "properties": { + "id": { + "$ref": "./shared.json#/$defs/id" + }, + "meta": { + "$ref": "./shared.json#/$defs/instance" + }, + "keywords": { + "$ref": "./shared.json#/$defs/keywords" + }, + "overview": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "General description of what the integration does." + }, + "visualization": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "How the user can access the data provided by the integration" + } + }, + "required": [ + "description" + ] + }, + "key_features": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "The key features of the integration." + } + }, + "required": [ + "description" + ] + } + }, + "required": [ + "description", + "visualization", + "key_features" + ] + }, + "setup": { + "type": "object", + "properties": { + "prerequisites": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "Prerequisites of getting the integration working. For Log Functions only a Netdata account should be needed." + } + }, + "required": [ + "description" + ] + }, + "required": [ + "prerequisites" + ] + } + } + }, + "required": [ + "id", + "meta", + "keywords", + "overview" + ] + } + } +} \ No newline at end of file diff --git a/integrations/templates/overview.md b/integrations/templates/overview.md index 3063b6860672a5..d75b32d88313e4 100644 --- a/integrations/templates/overview.md +++ b/integrations/templates/overview.md @@ -2,8 +2,10 @@ [% include 'overview/collector.md' %] [% elif entry.integration_type == 'exporter' %] [% include 'overview/exporter.md' %] -[% elif entry.integration_type == 'notification' %] +[% elif entry.integration_type == 'agent_notification' %] [% include 'overview/notification.md' %] [% elif entry.integration_type == 'authentication' %] [% include 'overview/authentication.md' %] +[% elif entry.integration_type == 'logs' %] +[% include 'overview/logs.md' %] [% endif %] diff --git a/integrations/templates/overview/logs.md b/integrations/templates/overview/logs.md new file mode 100644 index 00000000000000..2ff0f69a934eea --- /dev/null +++ b/integrations/templates/overview/logs.md @@ -0,0 +1,11 @@ +# [[ entry.meta.name ]] + +[[ entry.overview.description ]] + +## Visualization + +[[ entry.overview.visualization.description ]] + +## Key features + +[[ entry.overview.key_features.description ]] \ No newline at end of file diff --git a/integrations/templates/setup.md b/integrations/templates/setup.md index 93f76c401d6fc9..2cac9ec7fbdc94 100644 --- a/integrations/templates/setup.md +++ b/integrations/templates/setup.md @@ -1,4 +1,14 @@ ## Setup +[% if entry.integration_type == 'logs' %] + +## Prerequisites + +[[ entry.setup.prerequisites.description]] + +## Configuration + +There is no configuration needed for this integration. +[% else %] [% if entry.setup.description %] [[ entry.setup.description ]] @@ -106,3 +116,4 @@ There are no configuration examples. [% endif %] [% endif %] +[% endif %] \ No newline at end of file diff --git a/integrations/templates/troubleshooting.md b/integrations/templates/troubleshooting.md index 2176dd01014dbc..7c72bdde9bd420 100644 --- a/integrations/templates/troubleshooting.md +++ b/integrations/templates/troubleshooting.md @@ -85,13 +85,12 @@ docker logs netdata 2>&1 | grep [[ entry.meta.module_name ]] [% endif %] [% endif %] -[% elif entry.integration_type == 'notification' %] -[% if 'cloud-notifications' in entry._src_path|string %] +[% elif entry.integration_type == 'cloud_notification' %] [% if entry.troubleshooting.problems.list %] ## Troubleshooting [% endif %] -[% else %] +[% elif entry.integration_type == 'agent_notification' %] ## Troubleshooting ### Test Notification @@ -114,7 +113,6 @@ export NETDATA_ALARM_NOTIFY_DEBUG=1 Note that this will test _all_ alert mechanisms for the selected role. -[% endif %] [% elif entry.integration_type == 'exporter' %] [% if entry.troubleshooting.problems.list %] ## Troubleshooting diff --git a/packaging/cmake/config.cmake.h.in b/packaging/cmake/config.cmake.h.in index 603a57baba7762..d322826300941d 100644 --- a/packaging/cmake/config.cmake.h.in +++ b/packaging/cmake/config.cmake.h.in @@ -169,6 +169,8 @@ #cmakedefine HAVE_LIBYAML #cmakedefine HAVE_LIBMNL +#cmakedefine HAVE_WEL +#cmakedefine HAVE_ETW #cmakedefine RUN_UNDER_CLION // /* Enable GNU extensions on systems that have them. */ diff --git a/packaging/installer/kickstart.sh b/packaging/installer/kickstart.sh index 375fbedf7661ae..6b96767886bd85 100755 --- a/packaging/installer/kickstart.sh +++ b/packaging/installer/kickstart.sh @@ -1275,7 +1275,7 @@ write_claim_config() { run_as_root touch "${claim_config}.tmp" || return 1 run_as_root chmod 0640 "${claim_config}.tmp" || return 1 run_as_root chown ":${NETDATA_CLAIM_GROUP:-netdata}" "${claim_config}.tmp" || return 1 - run_as_root sh -c "echo -e \"${config}\" > \"${claim_config}.tmp\"" || return 1 + run_as_root sh -c "printf '${config}\\n' > \"${claim_config}.tmp\"" || return 1 run_as_root mv -f "${claim_config}.tmp" "${claim_config}" || return 1 if [ -z "${NETDATA_CLAIM_NORELOAD}" ]; then diff --git a/packaging/installer/netdata-updater.sh b/packaging/installer/netdata-updater.sh index 5ebb6baa214074..51b5b14aca609d 100755 --- a/packaging/installer/netdata-updater.sh +++ b/packaging/installer/netdata-updater.sh @@ -590,7 +590,9 @@ self_update() { export ENVIRONMENT_FILE="${ENVIRONMENT_FILE}" force_update="" [ "$NETDATA_FORCE_UPDATE" = "1" ] && force_update="--force-update" - exec ./netdata-updater.sh --not-running-from-cron --no-updater-self-update "$force_update" --tmpdir-path "$(pwd)" + interactive="" + [ "$INTERACTIVE" = "0" ] && interactive="--non-interactive" + exec ./netdata-updater.sh --not-running-from-cron --no-updater-self-update "$force_update" "$interactive" --tmpdir-path "$(pwd)" else error "Failed to download newest version of updater script, continuing with current version." fi diff --git a/packaging/utils/compile-on-windows.sh b/packaging/utils/compile-and-run-windows.sh similarity index 94% rename from packaging/utils/compile-on-windows.sh rename to packaging/utils/compile-and-run-windows.sh index 8867d10323ee4c..2d540eee94e78e 100644 --- a/packaging/utils/compile-on-windows.sh +++ b/packaging/utils/compile-and-run-windows.sh @@ -70,15 +70,20 @@ then ${NULL} fi +ninja -v -C "${build}" || ninja -v -C "${build}" -j 1 + +echo "Stopping service Netdata" +sc stop "Netdata" || echo "Failed" + ninja -v -C "${build}" install || ninja -v -C "${build}" -j 1 +# register the event log publisher +cmd.exe //c "$(cygpath -w -a "/opt/netdata/usr/bin/wevt_netdata_install.bat")" + #echo #echo "Compile with:" #echo "ninja -v -C \"${build}\" install || ninja -v -C \"${build}\" -j 1" -echo "Stopping service Netdata" -sc stop "Netdata" || echo "Failed" - echo "starting netdata..." # enable JIT debug with gdb export MSYS="error_start:$(cygpath -w /usr/bin/gdb)" diff --git a/packaging/version b/packaging/version index 7b888c316ec99f..8ede737c8bafe4 100644 --- a/packaging/version +++ b/packaging/version @@ -1 +1 @@ -v1.99.0-325-nightly +v1.99.0-355-nightly diff --git a/packaging/windows/clion-msys-msys-environment.bat b/packaging/windows/clion-msys-msys-environment.bat index 9f0c095d37c354..16934951d51055 100644 --- a/packaging/windows/clion-msys-msys-environment.bat +++ b/packaging/windows/clion-msys-msys-environment.bat @@ -13,7 +13,9 @@ set MSYSTEM=MSYS :: go exists only mingw64 / ucrt64 / etc, not under msys profile set GOROOT=C:\msys64\mingw64 -set PATH="%PATH%;C:\msys64\usr\bin;C:\msys64\bin;C:\msys64\mingw64\bin" +set "PATH=%PATH%;C:\Program Files (x86)\Windows Kits\10\bin\10.0.26100.0\x64" +set "PATH=%PATH%;C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.39.33519\bin\Hostx64\x64" +set "PATH=%PATH%;C:\msys64\usr\bin;C:\msys64\bin;C:\msys64\mingw64\bin" ::set PKG_CONFIG_EXECUTABLE=C:\msys64\mingw64\bin\pkg-config.exe ::set CMAKE_C_COMPILER=C:\msys64\mingw64\bin\gcc.exe ::set CMAKE_CC_COMPILER=C:\msys64\mingw64\bin\g++.exe diff --git a/packaging/windows/find-sdk-path.sh b/packaging/windows/find-sdk-path.sh new file mode 100644 index 00000000000000..fb0eb600b3a0ff --- /dev/null +++ b/packaging/windows/find-sdk-path.sh @@ -0,0 +1,217 @@ +#!/bin/bash + +# Function to output the path in Windows format (convert from MSYS2/Unix format using cygpath) +convert_to_windows_format() { + cygpath -w -a "$1" +} + +# Function to display help message +display_help() { + echo "Usage: $0 [-s|--sdk] [-v|--visualstudio] [-w|--windows] [--help]" + echo + echo "Options:" + echo " -s, --sdk Search for tools in the Windows SDK." + echo " -v, --visualstudio Search for tools in Visual Studio." + echo " -w, --windows Output the path in Windows format (using cygpath)." + echo " --help Display this help message." + exit 0 +} + +# Function to find tools in the Windows SDK +find_sdk_tools() { + sdk_base_path="/c/Program Files (x86)/Windows Kits/10/bin" + + if [ ! -d "$sdk_base_path" ]; then + echo "ERROR: SDK base path \"$sdk_base_path\" does not exist. No SDK installations found." >&2 + echo "$system_root" + return 1 + fi + + echo "SDK base path exists: \"$sdk_base_path\"" >&2 + + # Find all SDK versions + sdk_versions=($(ls "$sdk_base_path" | tr ' ' '\n' | grep -E "^[0-9]+\..*$")) + echo "Found SDK versions: ${sdk_versions[*]}" >&2 + + if [ ${#sdk_versions[@]} -eq 0 ]; then + echo "ERROR: No valid Windows SDK versions found in \"$sdk_base_path\"." >&2 + echo "$system_root" + return 1 + fi + + # Sort versions and pick the latest + sorted_versions=$(printf '%s\n' "${sdk_versions[@]}" | sort -V) + latest_sdk_version=$(echo "$sorted_versions" | tail -n 1) + sdk_tool_path="$sdk_base_path/$latest_sdk_version/x64" + + echo "Latest SDK version: \"$latest_sdk_version\"" >&2 + + if [ ! -d "$sdk_tool_path" ]; then + echo "ERROR: Tool path \"$sdk_tool_path\" does not exist." >&2 + echo "$system_root" + return 1 + fi + + # Check if required tools exist + tools=("mc.exe" "rc.exe") + for tool in "${tools[@]}"; do + if [ ! -f "$sdk_tool_path/$tool" ]; then + echo "ERROR: $tool not found in \"$sdk_tool_path\"" >&2 + echo "$system_root" + return 1 + else + echo "$tool found in \"$sdk_tool_path\"" >&2 + fi + done + + echo >&2 + echo "DONE: All required tools found in \"$sdk_tool_path\"" >&2 + echo >&2 + + echo "$sdk_tool_path" +} + +# Function to find tools in Visual Studio +find_visual_studio_tools() { + studio_base_path="/c/Program Files/Microsoft Visual Studio/2022" + echo "Checking for Visual Studio installations in: \"$studio_base_path\"" >&2 + + if [ ! -d "$studio_base_path" ]; then + echo "ERROR: Visual Studio base path \"$studio_base_path\" does not exist. No Visual Studio installations found." >&2 + echo "$system_root" + return 1 + fi + + # Visual Studio editions we want to check + editions=("Enterprise" "Professional" "Community") + available_editions=() + + # Loop through each edition and check for tools + for edition in "${editions[@]}"; do + edition_path="$studio_base_path/$edition/VC/Tools/MSVC" + if [ -d "$edition_path" ]; then + available_editions+=("$edition") + echo "Checking edition: $edition in $studio_base_path" >&2 + + # Find all MSVC versions and sort them + msvc_versions=($(ls "$edition_path" | tr ' ' '\n' | grep -E "^[0-9]+\..*$")) + echo "Found MSVC versions in $edition: ${msvc_versions[*]}" >&2 + + if [ ${#msvc_versions[@]} -gt 0 ]; then + sorted_versions=$(printf '%s\n' "${msvc_versions[@]}" | sort -V) + latest_msvc_version=$(echo "${sorted_versions[@]}" | tail -n 1) + vs_tool_path="$edition_path/$latest_msvc_version/bin/Hostx64/x64" + + echo "Latest MSVC version: \"$latest_msvc_version\" in $edition" >&2 + + if [ ! -d "$vs_tool_path" ]; then + echo "WARNING: Tool path \"$vs_tool_path\" does not exist." >&2 + continue + fi + + # Check if required tools exist + tools=("link.exe") + missing_tool=0 + + for tool in "${tools[@]}"; do + if [ ! -f "$vs_tool_path/$tool" ]; then + echo "WARNING: $tool not found in \"$vs_tool_path\" for $edition" >&2 + missing_tool=1 + else + echo "$tool found in \"$vs_tool_path\"" >&2 + fi + done + + if [ $missing_tool -eq 0 ]; then + echo >&2 + echo "All required tools found in \"$vs_tool_path\"" >&2 + echo >&2 + + echo "$vs_tool_path" + return 0 + else + echo "WARNING: skipping edition '$edition', directory does not exist." >&2 + fi + else + echo "WARNING: skipping edition '$edition', MSVC directory does not exist." >&2 + fi + else + echo "WARNING: skipping edition '$edition', directory does not exist." >&2 + fi + done + + echo "ERROR: No valid Visual Studio editions found in \"$studio_base_path\"." >&2 + echo "$system_root" + return 1 +} + +# Parse options using getopt +TEMP=$(getopt -o svwh --long sdk,visualstudio,windows,help -- "$@") +if [ $? != 0 ]; then + echo "ERROR: Invalid options provided." >&2 + exit 1 +fi + +eval set -- "$TEMP" + +search_mode="sdk" +windows_format=0 +system_root="/usr/bin" + +# Process getopt options +while true; do + case "$1" in + -s|--sdk) + search_mode="sdk" + shift + ;; + -v|--visualstudio) + search_mode="visualstudio" + shift + ;; + -w|--windows) + system_root="%SYSTEMROOT%" + windows_format=1 + shift + ;; + --help|-h) + display_help + ;; + --) + shift + break + ;; + *) + echo "ERROR: Invalid option: $1" >&2 + exit 1 + ;; + esac +done + +# Ensure that one of --sdk or --visualstudio is selected +if [ -z "$search_mode" ]; then + echo "ERROR: You must specify either --sdk or --visualstudio." >&2 + display_help +fi + +# Determine which function to call based on the search mode +if [ "$search_mode" = "sdk" ]; then + tool_path=$(find_sdk_tools) +else + tool_path=$(find_visual_studio_tools) +fi + +# If a valid path is found, output it +if [ "$tool_path" != "$system_root" ]; then + if [ "$windows_format" -eq 1 ]; then + windows_tool_path=$(convert_to_windows_format "$tool_path") + echo "$windows_tool_path" + else + echo "$tool_path" + fi +else + echo "$system_root" + exit 1 +fi + +exit 0 diff --git a/packaging/windows/installer.nsi b/packaging/windows/installer.nsi index d57723ccce4dcf..b0796967a7ae17 100644 --- a/packaging/windows/installer.nsi +++ b/packaging/windows/installer.nsi @@ -257,29 +257,97 @@ Function NetdataUninstallRegistry end: FunctionEnd +Function InstallDLL + ; Check if certutil is available + nsExec::ExecToStack 'where certutil' + Pop $R0 + StrCmp $R0 "" NoCertUtil FoundCertUtil + + NoCertUtil: + DetailPrint "certutil not found, assuming files are different." + Goto CopyDLL + + FoundCertUtil: + ; Calculate hash of the existing DLL + nsExec::ExecToStack 'certutil -hashfile "$SYSDIR\wevt_netdata.dll" MD5' + Pop $R0 + + ; Calculate hash of the new DLL + nsExec::ExecToStack 'certutil -hashfile "$INSTDIR\usr\bin\wevt_netdata.dll" MD5' + Pop $R1 + + StrCmp $R0 $R1 SetPermissions + + CopyDLL: + ClearErrors + CopyFiles /SILENT "$INSTDIR\usr\bin\wevt_netdata.dll" "$SYSDIR" + IfErrors RetryPrompt SetPermissions + + RetryPrompt: + MessageBox MB_RETRYCANCEL|MB_ICONEXCLAMATION "Failed to copy wevt_netdata.dll probably because it is in use. Please close the Event Viewer (or other Event Log applications) and press Retry." + StrCmp $R0 IDRETRY CopyDLL + StrCmp $R0 IDCANCEL ExitInstall + + Goto End + + SetPermissions: + nsExec::ExecToLog 'icacls "$SYSDIR\wevt_netdata.dll" /grant "NT SERVICE\EventLog":R' + Goto End + + ExitInstall: + Abort + + End: +FunctionEnd + +Function InstallManifest + IfFileExists "$INSTDIR\usr\bin\wevt_netdata_manifest.xml" CopyManifest End + + CopyManifest: + ClearErrors + CopyFiles /SILENT "$INSTDIR\usr\bin\wevt_netdata_manifest.xml" "$SYSDIR" + IfErrors RetryPrompt InstallManifest + + RetryPrompt: + MessageBox MB_RETRYCANCEL|MB_ICONEXCLAMATION "Failed to copy wevt_netdata_manifest.xml." + StrCmp $R0 IDRETRY CopyManifest + StrCmp $R0 IDCANCEL ExitInstall + + InstallManifest: + nsExec::ExecToLog 'wevtutil im "$SYSDIR\wevt_netdata_manifest.xml" "/mf:$SYSDIR\wevt_netdata.dll" "/rf:$SYSDIR\wevt_netdata.dll"' + Goto End + + ExitInstall: + Abort + + End: +FunctionEnd + Section "Install Netdata" - SetOutPath $INSTDIR - SetCompress off + SetOutPath $INSTDIR + SetCompress off - File /r "C:\msys64\opt\netdata\*.*" + File /r "C:\msys64\opt\netdata\*.*" - ClearErrors + ClearErrors nsExec::ExecToLog '$SYSDIR\sc.exe create Netdata binPath= "$INSTDIR\usr\bin\netdata.exe" start= delayed-auto' pop $0 ${If} $0 != 0 - DetailPrint "Warning: Failed to create Netdata service." + DetailPrint "Warning: Failed to create Netdata service." ${EndIf} - ClearErrors + ClearErrors nsExec::ExecToLog '$SYSDIR\sc.exe description Netdata "Real-time system monitoring service"' pop $0 ${If} $0 != 0 - DetailPrint "Warning: Failed to add Netdata service description." + DetailPrint "Warning: Failed to add Netdata service description." ${EndIf} WriteUninstaller "$INSTDIR\Uninstall.exe" Call NetdataUninstallRegistry + Call InstallDLL + Call InstallManifest StrLen $0 $cloudToken StrLen $1 $cloudRooms @@ -325,9 +393,24 @@ Section "Uninstall" DetailPrint "Warning: Failed to delete Netdata service." ${EndIf} - # https://nsis.sourceforge.io/Reference/RMDir - RMDir /r /REBOOTOK "$INSTDIR" + ; Check if the manifest exists before uninstalling it + IfFileExists "$SYSDIR\wevt_netdata_manifest.xml" ManifestExistsForUninstall ManifestNotExistsForUninstall + +ManifestExistsForUninstall: + nsExec::ExecToLog 'wevtutil um "$SYSDIR\wevt_netdata_manifest.xml"' + pop $0 + ${If} $0 != 0 + DetailPrint "Warning: Failed to uninstall the event manifest." + ${EndIf} + Goto DoneUninstall + +ManifestNotExistsForUninstall: + DetailPrint "Manifest not found, skipping manifest uninstall." + +DoneUninstall: + + ; Remove files + RMDir /r /REBOOTOK "$INSTDIR" DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Netdata" SectionEnd - diff --git a/src/collectors/apps.plugin/apps_os_windows.c b/src/collectors/apps.plugin/apps_os_windows.c index f9098a4dd05116..d2c30de76cfa8a 100644 --- a/src/collectors/apps.plugin/apps_os_windows.c +++ b/src/collectors/apps.plugin/apps_os_windows.c @@ -523,14 +523,10 @@ static char *wchar_to_utf8(WCHAR *s) { static char *ansi_to_utf8(LPCSTR str) { static __thread WCHAR unicode[PATH_MAX]; - static __thread int unicode_size = sizeof(unicode) / sizeof(*unicode); // Step 1: Convert ANSI string (LPSTR) to wide string (UTF-16) - int wideLength = MultiByteToWideChar(CP_ACP, 0, str, -1, NULL, 0); - if (wideLength == 0 || wideLength > unicode_size) - return NULL; - - MultiByteToWideChar(CP_ACP, 0, str, -1, unicode, wideLength); + size_t count = any_to_utf16(CP_ACP, unicode, _countof(unicode), str, -1); + if (!count) return NULL; return wchar_to_utf8(unicode); } diff --git a/src/collectors/apps.plugin/apps_pid.c b/src/collectors/apps.plugin/apps_pid.c index 8d5666275c42d2..949de92860f312 100644 --- a/src/collectors/apps.plugin/apps_pid.c +++ b/src/collectors/apps.plugin/apps_pid.c @@ -327,15 +327,11 @@ static bool is_filename(const char *s) { (*s == '/' && s[1] == '/' && isalpha((uint8_t)s[2]) && s[3] == '/')) { // windows native "//x/" WCHAR ws[FILENAME_MAX]; - int wlen = MultiByteToWideChar(CP_UTF8, 0, s, -1, NULL, 0); - if (wlen <= 0 || (size_t)wlen > sizeof(ws) / sizeof(*ws)) { - return false; // Failed to convert UTF-8 to UTF-16 + if(utf8_to_utf16(ws, _countof(ws), s, -1) > 0) { + DWORD attributes = GetFileAttributesW(ws); + if (attributes != INVALID_FILE_ATTRIBUTES) + return true; } - - MultiByteToWideChar(CP_UTF8, 0, s, -1, ws, wlen); - DWORD attributes = GetFileAttributesW(ws); - if (attributes != INVALID_FILE_ATTRIBUTES) - return true; } #endif diff --git a/src/collectors/ebpf.plugin/ebpf.c b/src/collectors/ebpf.plugin/ebpf.c index c3ab357978770e..0e162c685fd37c 100644 --- a/src/collectors/ebpf.plugin/ebpf.c +++ b/src/collectors/ebpf.plugin/ebpf.c @@ -1411,7 +1411,7 @@ void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em) char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY }; char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL }; - struct aral_statistics *stats = aral_statistics(memory); + struct aral_statistics *stats = aral_get_statistics(memory); ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_usage, ""); write_chart_dimension(mem, (long long)stats->structures.allocated_bytes); diff --git a/src/collectors/proc.plugin/proc_net_dev.c b/src/collectors/proc.plugin/proc_net_dev.c index 213c9e45857ecf..e9aedf3df989bc 100644 --- a/src/collectors/proc.plugin/proc_net_dev.c +++ b/src/collectors/proc.plugin/proc_net_dev.c @@ -252,6 +252,8 @@ static struct netdev { // ---------------------------------------------------------------------------- static void netdev_charts_release(struct netdev *d) { + rrdvar_chart_variable_release(d->st_bandwidth, d->chart_var_speed); + if(d->st_bandwidth) rrdset_is_obsolete___safe_from_collector_thread(d->st_bandwidth); if(d->st_packets) rrdset_is_obsolete___safe_from_collector_thread(d->st_packets); if(d->st_errors) rrdset_is_obsolete___safe_from_collector_thread(d->st_errors); diff --git a/src/collectors/systemd-journal.plugin/systemd-internals.h b/src/collectors/systemd-journal.plugin/systemd-internals.h index 31acb2f200a2e8..a2d13de6496279 100644 --- a/src/collectors/systemd-journal.plugin/systemd-internals.h +++ b/src/collectors/systemd-journal.plugin/systemd-internals.h @@ -153,4 +153,6 @@ static inline bool parse_journal_field(const char *data, size_t data_length, con void systemd_journal_dyncfg_init(struct functions_evloop_globals *wg); +bool is_journal_file(const char *filename, ssize_t len, const char **start_of_extension); + #endif //NETDATA_COLLECTORS_SYSTEMD_INTERNALS_H diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-files.c b/src/collectors/systemd-journal.plugin/systemd-journal-files.c index 9216dd30e2b148..ea0511f7a42e2a 100644 --- a/src/collectors/systemd-journal.plugin/systemd-journal-files.c +++ b/src/collectors/systemd-journal.plugin/systemd-journal-files.c @@ -285,8 +285,8 @@ void journal_file_update_header(const char *filename, struct journal_file *jf) { if(dash_seqnum) { const char *dash_first_msg_ut = strchr(dash_seqnum + 1, '-'); if(dash_first_msg_ut) { - const char *dot_journal = strstr(dash_first_msg_ut + 1, ".journal"); - if(dot_journal) { + const char *dot_journal = NULL; + if(is_journal_file(filename, -1, &dot_journal) && dot_journal && dot_journal > dash_first_msg_ut) { if(dash_seqnum - at - 1 == 32 && dash_first_msg_ut - dash_seqnum - 1 == 16 && dot_journal - dash_first_msg_ut - 1 == 16) { @@ -392,7 +392,7 @@ static void files_registry_insert_cb(const DICTIONARY_ITEM *item, void *value, v char *e = strchr(s, '@'); if(!e) - e = strstr(s, ".journal"); + is_journal_file(s, -1, (const char **)&e); if(e) { const char *d = s; @@ -572,10 +572,39 @@ static void files_registry_delete_cb(const DICTIONARY_ITEM *item, void *value, v string_freez(jf->source); } -void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, const char *dirname, int depth) { - static const char *ext = ".journal"; - static const ssize_t ext_len = sizeof(".journal") - 1; +#define EXT_DOT_JOURNAL ".journal" +#define EXT_DOT_JOURNAL_TILDA ".journal~" + +static struct { + const char *ext; + ssize_t len; +} valid_journal_extension[] = { + { .ext = EXT_DOT_JOURNAL, .len = sizeof(EXT_DOT_JOURNAL) - 1 }, + { .ext = EXT_DOT_JOURNAL_TILDA, .len = sizeof(EXT_DOT_JOURNAL_TILDA) - 1 }, +}; + +bool is_journal_file(const char *filename, ssize_t len, const char **start_of_extension) { + if(len < 0) + len = (ssize_t)strlen(filename); + + for(size_t i = 0; i < _countof(valid_journal_extension) ;i++) { + const char *ext = valid_journal_extension[i].ext; + ssize_t elen = valid_journal_extension[i].len; + + if(len > elen && strcmp(filename + len - elen, ext) == 0) { + if(start_of_extension) + *start_of_extension = filename + len - elen; + return true; + } + } + if(start_of_extension) + *start_of_extension = NULL; + + return false; +} + +void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, const char *dirname, int depth) { if (depth > VAR_LOG_JOURNAL_MAX_DEPTH) return; @@ -605,7 +634,7 @@ void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, con if (entry->d_type == DT_DIR) { journal_directory_scan_recursively(files, dirs, full_path, depth++); } - else if (entry->d_type == DT_REG && len > ext_len && strcmp(full_path + len - ext_len, ext) == 0) { + else if (entry->d_type == DT_REG && is_journal_file(full_path, len, NULL)) { if(files) dictionary_set(files, full_path, NULL, 0); @@ -623,7 +652,7 @@ void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, con journal_directory_scan_recursively(files, dirs, resolved_path, depth++); } } - else if(S_ISREG(info.st_mode) && len > ext_len && strcmp(full_path + len - ext_len, ext) == 0) { + else if(S_ISREG(info.st_mode) && is_journal_file(full_path, len, NULL)) { if(files) dictionary_set(files, full_path, NULL, 0); diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-watcher.c b/src/collectors/systemd-journal.plugin/systemd-journal-watcher.c index 6f12f154e2c238..dd48ccc355514a 100644 --- a/src/collectors/systemd-journal.plugin/systemd-journal-watcher.c +++ b/src/collectors/systemd-journal.plugin/systemd-journal-watcher.c @@ -245,7 +245,7 @@ void process_event(Watcher *watcher, int inotifyFd, struct inotify_event *event) "JOURNAL WATCHER: Received unhandled event with mask %u for directory '%s'", event->mask, fullPath); } - else if(len > sizeof(".journal") - 1 && strcmp(&event->name[len - (sizeof(".journal") - 1)], ".journal") == 0) { + else if(is_journal_file(event->name, (ssize_t)len, NULL)) { // It is a file that ends in .journal // add it to our pending list dictionary_set(watcher->pending, fullPath, NULL, 0); diff --git a/src/collectors/systemd-journal.plugin/systemd-journal.c b/src/collectors/systemd-journal.plugin/systemd-journal.c index 56d37b807fa293..8d8055a8ff9382 100644 --- a/src/collectors/systemd-journal.plugin/systemd-journal.c +++ b/src/collectors/systemd-journal.plugin/systemd-journal.c @@ -85,6 +85,7 @@ struct lqs_extension { #define LQS_SOURCE_TYPE SD_JOURNAL_FILE_SOURCE_TYPE #define LQS_SOURCE_TYPE_ALL SDJF_ALL #define LQS_SOURCE_TYPE_NONE SDJF_NONE +#define LQS_PARAMETER_SOURCE_NAME "Journal Sources" // this is how it is shown to users #define LQS_FUNCTION_GET_INTERNAL_SOURCE_TYPE(value) get_internal_source_type(value) #define LQS_FUNCTION_SOURCE_TO_JSON_ARRAY(wb) available_journal_file_sources_to_json_array(wb) #include "libnetdata/facets/logs_query_status.h" diff --git a/src/collectors/windows-events.plugin/README.md b/src/collectors/windows-events.plugin/README.md new file mode 100644 index 00000000000000..8e05a79b6beeb9 --- /dev/null +++ b/src/collectors/windows-events.plugin/README.md @@ -0,0 +1,289 @@ +# Windows Events plugin + +[KEY FEATURES](#key-features) | [EVENTS SOURCES](#events-sources) | [EVENT FIELDS](#event-fields) | +[PLAY MODE](#play-mode) | [FULL TEXT SEARCH](#full-text-search) | [PERFORMANCE](#query-performance) | +[CONFIGURATION](#configuration-and-maintenance) | [FAQ](#faq) + +The Windows Events plugin by Netdata makes viewing, exploring and analyzing Windows Events simple and +efficient. + +![image](https://github.com/user-attachments/assets/71a1ab1d-5b7b-477e-a4e6-a30275a5710b) + +## Key features + +- Supports **Windows Event Logs (WEL)**. +- Supports **Event Tracing for Windows (ETW)** and **TraceLogging (TL)**, when events are routed to Event Log. +- Allows filtering on all System Events fields. +- Allows **full text search** (`grep`) on all System and User fields. +- Provides a **histogram** for log entries over time, with a break down per field-value, for any System Event field and any + time-frame. +- Supports coloring log entries based on severity. +- In PLAY mode it "tails" all the Events, showing new log entries immediately after they are received. + +### Prerequisites + +`windows-events.plugin` is a Netdata Function Plugin. + +To protect your privacy, as with all Netdata Functions, a free Netdata Cloud user account is required to access it. +For more information check [this discussion](https://github.com/netdata/netdata/discussions/16136). + +## Events Sources + +The plugin automatically detects all the available channels and offers a list of "Event Channels". + +By default, it aggregates events from all event channels, providing a unified systems view of all events. + +> To improve query performance, we recommend selecting the relevant event channels, before doing more +> analysis on the events. + +In the list of events channels, several shortcuts are added, aggregating events according to various attributes: + +- `All`, aggregates events from all available channels. This provides a holistic view of all events in the system. +- `All-Admin`, `All-Operational`, `All-Analytic` and `All-Debug` aggregates events from channels marked `Admin`, `Operational`, `Analytic` and `Debug`, respectively. +- `All-Windows`, aggregates events from `Application`, `Security`, `System` and `Setup`. +- `All-Enabled` and `All-Disabled` aggregates events from channels depending on their status. +- `All-Forwarded` aggregates events from channels owned by `Microsoft-Windows-EventCollector`. +- `All-Classic` aggregates events from channels using the Classic Event Log API. +- `All-Of-X`, where `X` is a provider name, is offered for all providers having more than a channel. +- `All-In-X`, where `X` is `Backup-Mode`, `Overwrite-Mode`, `StopWhenFull-Mode` and `RetainAndBackup-Mode`, aggregate events based on their channel retention policy. + +Channels that are configured but are not queryable, and channels that do not have any events in them, are automatically excluded from the channels list. + +## Event Fields + +Windows Events are structured with both system-defined fields and user-defined fields. +The Windows Events plugin primarily works with the system-defined fields, which are consistently available +across all event types. + +### System-defined fields + +The system-defined fields are: + +1. **EventRecordID** + A unique, sequential identifier for the event within the channel. This ID increases as new events are logged. + +2. **Version** + The version of the event, indicating possible structural changes or updates to the event definition. + + Netdata adds this field automatically when it is not zero. + +3. **Level** + The severity or importance of the event. Levels can include: + - 0: LogAlways (reserved) + - 1: Critical + - 2: Error + - 3: Warning + - 4: Informational + - 5: Verbose + + Additionally, applications may define their own levels. + + Netdata provides 2 fields: `Level` and `LevelID` for the text and numeric representation of it. + +4. **Opcode** + The action or state within a provider when the event was logged. + + Netdata provides 2 fields: `Opcode` and `OpcodeID` for the text and numeric representation of it. + +5. **EventID** + This identifies the event template, linking it to a specific message or event type. Event IDs are provider-specific. + +6. **Task** + Defines a higher-level categorization or logical grouping for the event, often related to a specific function within the application or provider. + + Netdata provides 2 fields: `Task` and `TaskID` for the text and numeric representation of it. + +7. **Qualifiers** + Provides additional detail for interpreting the event and is often specific to the event source. + + Netdata adds this field automatically when it is not zero. + +8. **ProcessID** + The ID of the process that generated the event, useful for pinpointing the source of the event within the system. + +9. **ThreadID** + The ID of the thread within the process that generated the event, which helps in more detailed debugging scenarios. + +10. **Keywords** + A categorization field that can be used for event filtering. Keywords are bit flags that represent categories or purposes of the event, providing additional context. + + Netdata provides 2 fields: `Keywords` and `keywordsID` for the text and numeric representation of it. + +11. **Provider** + The unique identifier (GUID) of the event provider. This is essential for knowing which application or system component generated the event. + + Netdata provides 2 fields: `Provider` and `ProviderGUID` for its name and GUID of it. + +12. **ActivityID** + A GUID that correlates events generated as part of the same operation or transaction, helping to track activities across different components or stages. + + Netdata adds this field automatically when it is not zero. + +13. **RelatedActivityID** + A GUID that links related operations or transactions, allowing for tracing complex workflows where one event triggers or relates to another. + + Netdata adds this field automatically when it is not zero. + +14. **Timestamp** + The timestamp when the event was created. This provides precise timing information about when the event occurred. + +15. **User** + The system user who logged this event. + + Netdata provides 3 fields: `UserAccount`, `UserDomain` and `UserSID`. + +### User-defined fields +Each event log entry can include up to 100 user-defined fields (per event-id). + +Unfortunately, accessing these fields is significantly slower, to a level that is not practical to do so +when there are more than few thousand log entries to explore. So, Netdata presents them +with lazy loading. + +This prevents Netdata for offering filtering for user-defined fields, although Netdata does support +full text search on user-defined field values. + +### Event fields as columns in the table + +The system fields mentioned above are offered as columns on the UI. Use the gear button above the table to +select visible columns. + +### Event fields as filters + +The plugin presents the system fields as filters for the query, with counters for each of the possible values +for the field. This list can be used to quickly check which fields and values are available for the entire +time-frame of the query, across multiple providers and channels. + +### Event fields as histogram sources + +The histogram can be based on any of the system fields that are available as filters. This allows you to +visualize the distribution of events over time based on different criteria such as Level, Provider, or EventID. + +## PLAY mode + +The PLAY mode in this plugin allows real-time monitoring of new events as they are added to the Windows Event +Log. This feature works by continuously querying for new events and updating the display. + +## Full-text search + +The plugin supports searching for text within all system and user fields of the events. This means that while +user-defined fields are not directly filterable, they are searchable through the full-text search feature. + +Keep in mind that query performance is slower while doing full text search, mainly because the plugin +needs to ask from the system to provide all the user fields values. + +## Query performance + +The plugin is optimized to work efficiently with Event Logs. It uses several layers of caching and +similar techniques to offload as much work as possible from the system, offering quick responses even when +hundreds of thousands of events are within the visible timeframe. + +To achieve this level of efficiency, the plugin: + +- pre-loads ETW providers' manifests for resolving numeric Levels, Opcodes, Tasks and Keywords to text. +- caches number to text maps for Levels, Opcodes, Tasks and Keywords per provider for WEL providers. +- caches user SID to account and domain maps. +- lazy loads the "expensive" event Message and XML, so that the system is queried only for the visible events. + +For Full Text Search: + +- requests only the Message and the values of the user-fields from the system, avoiding the "expensive" XML call (which is still lazy-loaded). + +The result is a system that is highly efficient for working with moderate volumes (hundreds of thousands) of events. + +## Configuration and maintenance + +This Netdata plugin does not require any specific configuration. It automatically detects available event logs +on the system. + +## FAQ + +### Can I use this plugin on event centralization servers? + +Yes. You can centralize your Windows Events using Windows Event Forwarding (WEF) or other event collection +mechanisms, and then install Netdata on this events centralization server to explore the events of all your +infrastructure. + +This plugin will automatically provide multi-node views of your events and also give you the ability to +combine the events of multiple servers, as you see fit. + +### Can I use this plugin from a parent Netdata? + +Yes. When your nodes are connected to a Netdata parent, all their functions are available via the parent's UI. +So, from the parent UI, you can access the functions of all your nodes. + +Keep in mind that to protect your privacy, in order to access Netdata functions, you need a free Netdata Cloud +account. + +### Is any of my data exposed to Netdata Cloud from this plugin? + +No. When you access the agent directly, none of your data passes through Netdata Cloud. You need a free Netdata +Cloud account only to verify your identity and enable the use of Netdata Functions. Once this is done, all the +data flow directly from your Netdata agent to your web browser. + +When you access Netdata via https://app.netdata.cloud, your data travel via Netdata Cloud, but they are not stored +in Netdata Cloud. This is to allow you access your Netdata agents from anywhere. All communication from/to +Netdata Cloud is encrypted. + +### What are the different types of event logs supported by this plugin? + +The plugin supports all the kinds of event logs currently supported by the Windows Event Viewer: + +- Windows Event Logs (WEL): The traditional event logging system in Windows. +- Event Tracing for Windows (ETW): A more detailed and efficient event tracing system. +- TraceLogging (TL): An extension of ETW that simplifies the process of adding events to your code. + +The plugin can access all of these when they are routed to the Windows Event Log. + +### How does this plugin handle user-defined fields in Windows Events? + +User-defined fields are not directly exposed as table columns or filters in the plugin interface. However, +they are included in the XML representation of each event, which can be viewed in the info sidebar when +clicking on an event entry. Additionally, the full-text search feature does search through these +user-defined fields, allowing you to find specific information even if it's not in the main system fields. + +### Can I use this plugin to monitor real-time events? + +Yes, the plugin supports a PLAY mode that allows you to monitor events in real-time. When activated, it +continuously updates to show new events as they are logged, similar to the "tail" functionality in +Unix-like systems. + +### How does the plugin handle large volumes of events? + +The plugin is designed to handle moderate volumes of events (hundreds of thousands of events) efficiently. + +It is in our roadmap to port the `systemd-journal` sampling techniques to it, for working with very large +datasets to provide quick responses while still giving accurate representations of the data. However, for +the best performance, we recommend querying smaller time frames or using more specific filters when dealing +with extremely large event volumes. + +### Can I use this plugin to analyze events from multiple servers? + +Yes, if you have set up Windows Event Forwarding (WEF) or another method of centralizing your Windows Events, +you can use this plugin on the central server to analyze events from multiple sources. The plugin will +automatically detect the available event sources. + +### How does the histogram feature work in this plugin? + +The histogram feature provides a visual representation of event frequency over time. You can base the +histogram on any of the system fields available as filters (such as Level, Provider, or EventID). This +allows you to quickly identify patterns or anomalies in your event logs. + +### Is it possible to export or share the results from this plugin? + +While the plugin doesn't have a direct export feature, you can use browser-based methods to save or share +the results. This could include taking screenshots, using browser print/save as PDF functionality, or +copying data from the table view. For more advanced data export needs, you might need to use the Windows +Event Log API directly or other Windows administrative tools. + +### How often does the plugin update its data? + +The plugin updates its data in real-time when in PLAY mode. In normal mode, it refreshes data based on the +query you've submitted. The plugin is designed to provide the most up-to-date information available in the +Windows Event Logs at the time of the query. + +## TODO + +1. Support Sampling, so that the plugin can respond faster even on very busy systems (millions of events visible). +2. Support exploring events from live Tracing sessions. +3. Support exploring events in saved Event Trace Log files (`.etl` files). +4. Support exploring events in saved Event Logs files (`.evtx` files). diff --git a/src/collectors/windows-events.plugin/windows-events-fields-cache.c b/src/collectors/windows-events.plugin/windows-events-fields-cache.c index f5f1f1fe4c1f73..1e1e2a4e96bd1e 100644 --- a/src/collectors/windows-events.plugin/windows-events-fields-cache.c +++ b/src/collectors/windows-events.plugin/windows-events-fields-cache.c @@ -53,16 +53,16 @@ void field_cache_init(void) { static inline bool should_zero_provider(WEVT_FIELD_TYPE type, uint64_t value) { switch(type) { case WEVT_FIELD_TYPE_LEVEL: - return !is_valid_publisher_level(value, true); + return !is_valid_provider_level(value, true); - case WEVT_FIELD_TYPE_KEYWORDS: - return !is_valid_publisher_keywords(value, true); + case WEVT_FIELD_TYPE_KEYWORD: + return !is_valid_provider_keyword(value, true); case WEVT_FIELD_TYPE_OPCODE: - return !is_valid_publisher_opcode(value, true); + return !is_valid_provider_opcode(value, true); case WEVT_FIELD_TYPE_TASK: - return !is_valid_publisher_task(value, true); + return !is_valid_provider_task(value, true); default: return false; diff --git a/src/collectors/windows-events.plugin/windows-events-fields-cache.h b/src/collectors/windows-events.plugin/windows-events-fields-cache.h index 9b0866ab67ef48..a76170d68a3eae 100644 --- a/src/collectors/windows-events.plugin/windows-events-fields-cache.h +++ b/src/collectors/windows-events.plugin/windows-events-fields-cache.h @@ -8,7 +8,7 @@ typedef enum __attribute__((packed)) { WEVT_FIELD_TYPE_LEVEL = 0, WEVT_FIELD_TYPE_OPCODE, - WEVT_FIELD_TYPE_KEYWORDS, + WEVT_FIELD_TYPE_KEYWORD, WEVT_FIELD_TYPE_TASK, // terminator diff --git a/src/collectors/windows-events.plugin/windows-events-publishers.c b/src/collectors/windows-events.plugin/windows-events-providers.c similarity index 61% rename from src/collectors/windows-events.plugin/windows-events-publishers.c rename to src/collectors/windows-events.plugin/windows-events-providers.c index 5928cb8df4dd1e..a0400cab937959 100644 --- a/src/collectors/windows-events.plugin/windows-events-publishers.c +++ b/src/collectors/windows-events.plugin/windows-events-providers.c @@ -1,17 +1,19 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "windows-events-publishers.h" +#include "windows-events-providers.h" -#define MAX_OPEN_HANDLES_PER_PUBLISHER 5 +#define MAX_OPEN_HANDLES_PER_PROVIDER 5 -struct publisher; +struct provider; // typedef as PROVIDER_META_HANDLE in include file struct provider_meta_handle { pid_t owner; // the owner of the handle, or zero uint32_t locks; // the number of locks the owner has on this handle EVT_HANDLE hMetadata; // the handle - struct publisher *publisher; // a pointer back to the publisher + struct provider *provider; // a pointer back to the provider + + usec_t created_monotonic_ut; // the monotonic timestamp this handle was created // double linked list PROVIDER_META_HANDLE *prev; @@ -32,78 +34,156 @@ struct provider_list { struct provider_data *array; // the array of entries, sorted (for binary search) }; -typedef struct publisher { +typedef struct provider_key { ND_UUID uuid; // the Provider GUID - const char *name; // the Provider name (UTF-8) + DWORD len; // the length of the Provider Name + const wchar_t *wname; // the Provider wide-string Name (UTF-16) +} PROVIDER_KEY; + +typedef struct provider { + PROVIDER_KEY key; + const char *name; // the Provider Name (UTF-8) uint32_t total_handles; // the number of handles allocated uint32_t available_handles; // the number of available handles uint32_t deleted_handles; // the number of deleted handles PROVIDER_META_HANDLE *handles; // a double linked list of all the handles - struct provider_list keywords; + WEVT_PROVIDER_PLATFORM platform; + + struct provider_list keyword; struct provider_list tasks; struct provider_list opcodes; struct provider_list levels; -} PUBLISHER; - -// A hashtable implementation for publishers -// using the provider GUID as key and PUBLISHER as value -#define SIMPLE_HASHTABLE_NAME _PROVIDER_GUID -#define SIMPLE_HASHTABLE_VALUE_TYPE PUBLISHER -#define SIMPLE_HASHTABLE_KEY_TYPE ND_UUID -#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION publisher_value_to_key -#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION publisher_cache_compar +} PROVIDER; + +// A hashtable implementation for Providers +// using the Provider GUID as key and PROVIDER as value +#define SIMPLE_HASHTABLE_NAME _PROVIDER +#define SIMPLE_HASHTABLE_VALUE_TYPE PROVIDER +#define SIMPLE_HASHTABLE_KEY_TYPE PROVIDER_KEY +#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION provider_value_to_key +#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION provider_cache_compar #define SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION 1 #include "libnetdata/simple_hashtable.h" static struct { SPINLOCK spinlock; - uint32_t total_publishers; + uint32_t total_providers; uint32_t total_handles; uint32_t deleted_handles; - struct simple_hashtable_PROVIDER_GUID hashtable; - ARAL *aral_publishers; + struct simple_hashtable_PROVIDER hashtable; + ARAL *aral_providers; ARAL *aral_handles; } pbc = { .spinlock = NETDATA_SPINLOCK_INITIALIZER, }; -static void publisher_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, WEVT_VARIANT *property, TXT_UNICODE *dst, struct provider_list *l, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id); +static void provider_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, WEVT_VARIANT *property, TXT_UNICODE *dst, struct provider_list *l, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id); + +const char *provider_get_name(PROVIDER_META_HANDLE *p) { + return (p && p->provider && p->provider->name) ? p->provider->name : "__UNKNOWN PROVIDER__"; +} + +ND_UUID provider_get_uuid(PROVIDER_META_HANDLE *p) { + return (p && p->provider) ? p->provider->key.uuid : UUID_ZERO; +} -static inline ND_UUID *publisher_value_to_key(PUBLISHER *p) { - return &p->uuid; +static inline PROVIDER_KEY *provider_value_to_key(PROVIDER *p) { + return &p->key; } -static inline bool publisher_cache_compar(ND_UUID *a, ND_UUID *b) { - return UUIDeq(*a, *b); +static inline bool provider_cache_compar(PROVIDER_KEY *a, PROVIDER_KEY *b) { + return a->len == b->len && UUIDeq(a->uuid, b->uuid) && memcmp(a->wname, b->wname, a->len) == 0; } -void publisher_cache_init(void) { - simple_hashtable_init_PROVIDER_GUID(&pbc.hashtable, 100000); - pbc.aral_publishers = aral_create("wevt_publishers", sizeof(PUBLISHER), 0, 4096, NULL, NULL, NULL, false, true); +void provider_cache_init(void) { + simple_hashtable_init_PROVIDER(&pbc.hashtable, 100000); + pbc.aral_providers = aral_create("wevt_providers", sizeof(PROVIDER), 0, 4096, NULL, NULL, NULL, false, true); pbc.aral_handles = aral_create("wevt_handles", sizeof(PROVIDER_META_HANDLE), 0, 4096, NULL, NULL, NULL, false, true); } -PROVIDER_META_HANDLE *publisher_get(ND_UUID uuid, LPCWSTR providerName) { - if(!providerName || !providerName[0] || UUIDiszero(uuid)) +static bool provider_property_get(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id) { + DWORD bufferUsed = 0; + + if(!EvtGetPublisherMetadataProperty(h->hMetadata, property_id, 0, 0, NULL, &bufferUsed)) { + DWORD status = GetLastError(); + if (status != ERROR_INSUFFICIENT_BUFFER) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetPublisherMetadataProperty() failed"); + goto cleanup; + } + } + + wevt_variant_resize(content, bufferUsed); + if (!EvtGetPublisherMetadataProperty(h->hMetadata, property_id, 0, content->size, content->data, &bufferUsed)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetPublisherMetadataProperty() failed after resize"); + goto cleanup; + } + + return true; + +cleanup: + return false; +} + +static bool provider_string_property_exists(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id) { + if(!provider_property_get(h, content, property_id)) + return false; + + if(content->data->Type != EvtVarTypeString) + return false; + + if(!content->data->StringVal[0]) + return false; + + return true; +} + +static void provider_detect_platform(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content) { + if(UUIDiszero(h->provider->key.uuid)) + h->provider->platform = WEVT_PLATFORM_WEL; + else if(h->hMetadata) { + if (provider_string_property_exists(h, content, EvtPublisherMetadataMessageFilePath) || + provider_string_property_exists(h, content, EvtPublisherMetadataResourceFilePath) || + provider_string_property_exists(h, content, EvtPublisherMetadataParameterFilePath)) + h->provider->platform = WEVT_PLATFORM_ETW; + else + // The provider cannot be opened, does not have any resource files (message, resource, parameter) + h->provider->platform = WEVT_PLATFORM_TL; + } + else h->provider->platform = WEVT_PLATFORM_ETW; +} + +WEVT_PROVIDER_PLATFORM provider_get_platform(PROVIDER_META_HANDLE *p) { + return p->provider->platform; +} + +PROVIDER_META_HANDLE *provider_get(ND_UUID uuid, LPCWSTR providerName) { + if(!providerName || !providerName[0]) return NULL; - // XXH64_hash_t hash = XXH3_64bits(&uuid, sizeof(uuid)); - uint64_t hash = uuid.parts.low64 + uuid.parts.hig64; + PROVIDER_KEY key = { + .uuid = uuid, + .len = wcslen(providerName), + .wname = providerName, + }; + XXH64_hash_t hash = XXH3_64bits(providerName, wcslen(key.wname) * sizeof(*key.wname)); spinlock_lock(&pbc.spinlock); - SIMPLE_HASHTABLE_SLOT_PROVIDER_GUID *slot = - simple_hashtable_get_slot_PROVIDER_GUID(&pbc.hashtable, hash, &uuid, true); + SIMPLE_HASHTABLE_SLOT_PROVIDER *slot = + simple_hashtable_get_slot_PROVIDER(&pbc.hashtable, hash, &key, true); bool load_it = false; - PUBLISHER *p = SIMPLE_HASHTABLE_SLOT_DATA(slot); + PROVIDER *p = SIMPLE_HASHTABLE_SLOT_DATA(slot); if(!p) { - p = aral_callocz(pbc.aral_publishers); - p->uuid = uuid; - simple_hashtable_set_slot_PROVIDER_GUID(&pbc.hashtable, slot, hash, p); + p = aral_callocz(pbc.aral_providers); + p->key.uuid = key.uuid; + p->key.len = key.len; + p->key.wname = wcsdup(key.wname); + p->name = strdupz(provider2utf8(key.wname)); + simple_hashtable_set_slot_PROVIDER(&pbc.hashtable, slot, hash, p); load_it = true; - pbc.total_publishers++; + pbc.total_providers++; } pid_t me = gettid_cached(); @@ -117,7 +197,8 @@ PROVIDER_META_HANDLE *publisher_get(ND_UUID uuid, LPCWSTR providerName) { if(!h) { h = aral_callocz(pbc.aral_handles); - h->publisher = p; + h->provider = p; + h->created_monotonic_ut = now_monotonic_usec(); h->hMetadata = EvtOpenPublisherMetadata( NULL, // Local machine providerName, // Provider name @@ -147,10 +228,11 @@ PROVIDER_META_HANDLE *publisher_get(ND_UUID uuid, LPCWSTR providerName) { WEVT_VARIANT property = { 0 }; TXT_UNICODE unicode = { 0 }; - publisher_load_list(h, &content, &property, &unicode, &p->keywords, EvtPublisherMetadataKeywords); - publisher_load_list(h, &content, &property, &unicode, &p->levels, EvtPublisherMetadataLevels); - publisher_load_list(h, &content, &property, &unicode, &p->opcodes, EvtPublisherMetadataOpcodes); - publisher_load_list(h, &content, &property, &unicode, &p->tasks, EvtPublisherMetadataTasks); + provider_detect_platform(h, &content); + provider_load_list(h, &content, &property, &unicode, &p->keyword, EvtPublisherMetadataKeywords); + provider_load_list(h, &content, &property, &unicode, &p->levels, EvtPublisherMetadataLevels); + provider_load_list(h, &content, &property, &unicode, &p->opcodes, EvtPublisherMetadataOpcodes); + provider_load_list(h, &content, &property, &unicode, &p->tasks, EvtPublisherMetadataTasks); txt_unicode_cleanup(&unicode); wevt_variant_cleanup(&content); @@ -162,42 +244,72 @@ PROVIDER_META_HANDLE *publisher_get(ND_UUID uuid, LPCWSTR providerName) { return h; } -EVT_HANDLE publisher_handle(PROVIDER_META_HANDLE *h) { +EVT_HANDLE provider_handle(PROVIDER_META_HANDLE *h) { return h ? h->hMetadata : NULL; } -PROVIDER_META_HANDLE *publisher_dup(PROVIDER_META_HANDLE *h) { +PROVIDER_META_HANDLE *provider_dup(PROVIDER_META_HANDLE *h) { if(h) h->locks++; return h; } -void publisher_release(PROVIDER_META_HANDLE *h) { +static void provider_meta_handle_delete(PROVIDER_META_HANDLE *h) { + PROVIDER *p = h->provider; + + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(p->handles, h, prev, next); + + if(h->hMetadata) + EvtClose(h->hMetadata); + + aral_freez(pbc.aral_handles, h); + + fatal_assert(pbc.total_handles && p->total_handles && p->available_handles); + + pbc.total_handles--; + p->total_handles--; + + pbc.deleted_handles++; + p->deleted_handles++; + + p->available_handles--; +} + +void providers_release_unused_handles(void) { + usec_t now_ut = now_monotonic_usec(); + + spinlock_lock(&pbc.spinlock); + for(size_t i = 0; i < pbc.hashtable.size ; i++) { + SIMPLE_HASHTABLE_SLOT_PROVIDER *slot = &pbc.hashtable.hashtable[i]; + PROVIDER *p = SIMPLE_HASHTABLE_SLOT_DATA(slot); + if(!p) continue; + + PROVIDER_META_HANDLE *h = p->handles; + while(h) { + PROVIDER_META_HANDLE *next = h->next; + + if(!h->locks && (now_ut - h->created_monotonic_ut) >= WINDOWS_EVENTS_RELEASE_IDLE_PROVIDER_HANDLES_TIME_UT) + provider_meta_handle_delete(h); + + h = next; + } + } + spinlock_unlock(&pbc.spinlock); +} + +void provider_release(PROVIDER_META_HANDLE *h) { if(!h) return; pid_t me = gettid_cached(); fatal_assert(h->owner == me); fatal_assert(h->locks > 0); if(--h->locks == 0) { - PUBLISHER *p = h->publisher; + PROVIDER *p = h->provider; spinlock_lock(&pbc.spinlock); h->owner = 0; - if(++p->available_handles > MAX_OPEN_HANDLES_PER_PUBLISHER) { - // there are multiple handles on this publisher - DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(p->handles, h, prev, next); - - if(h->hMetadata) - EvtClose(h->hMetadata); - - aral_freez(pbc.aral_handles, h); - - pbc.total_handles--; - p->total_handles--; - - pbc.deleted_handles++; - p->deleted_handles++; - - p->available_handles--; + if(++p->available_handles > MAX_OPEN_HANDLES_PER_PROVIDER) { + // there are too many idle handles on this provider + provider_meta_handle_delete(h); } else if(h->next) { // it is not the last, put it at the end @@ -210,7 +322,7 @@ void publisher_release(PROVIDER_META_HANDLE *h) { } // -------------------------------------------------------------------------------------------------------------------- -// load publisher lists +// load provider lists static bool wevt_get_property_from_array(WEVT_VARIANT *property, EVT_HANDLE handle, DWORD dwIndex, EVT_PUBLISHER_METADATA_PROPERTY_ID PropertyId) { DWORD used = 0; @@ -253,7 +365,7 @@ static int compare_ascending(const void *a, const void *b) { // return 0; //} -static void publisher_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, WEVT_VARIANT *property, TXT_UNICODE *dst, struct provider_list *l, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id) { +static void provider_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, WEVT_VARIANT *property, TXT_UNICODE *dst, struct provider_list *l, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id) { if(!h || !h->hMetadata) return; EVT_PUBLISHER_METADATA_PROPERTY_ID name_id, message_id, value_id; @@ -268,7 +380,7 @@ static void publisher_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, value_id = EvtPublisherMetadataLevelValue; value_bits = 32; compare_func = compare_ascending; - is_valid = is_valid_publisher_level; + is_valid = is_valid_provider_level; break; case EvtPublisherMetadataOpcodes: @@ -276,7 +388,7 @@ static void publisher_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, message_id = EvtPublisherMetadataOpcodeMessageID; value_id = EvtPublisherMetadataOpcodeValue; value_bits = 32; - is_valid = is_valid_publisher_opcode; + is_valid = is_valid_provider_opcode; compare_func = compare_ascending; break; @@ -285,7 +397,7 @@ static void publisher_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, message_id = EvtPublisherMetadataTaskMessageID; value_id = EvtPublisherMetadataTaskValue; value_bits = 32; - is_valid = is_valid_publisher_task; + is_valid = is_valid_provider_task; compare_func = compare_ascending; break; @@ -294,7 +406,7 @@ static void publisher_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, message_id = EvtPublisherMetadataKeywordMessageID; value_id = EvtPublisherMetadataKeywordValue; value_bits = 64; - is_valid = is_valid_publisher_keywords; + is_valid = is_valid_provider_keyword; compare_func = NULL; break; @@ -305,23 +417,11 @@ static void publisher_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, EVT_HANDLE hMetadata = h->hMetadata; EVT_HANDLE hArray = NULL; - DWORD bufferUsed = 0; DWORD itemCount = 0; // Get the metadata array for the list (e.g., opcodes, tasks, or levels) - if (!EvtGetPublisherMetadataProperty(hMetadata, property_id, 0, 0, NULL, &bufferUsed)) { - DWORD status = GetLastError(); - if (status != ERROR_INSUFFICIENT_BUFFER) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetPublisherMetadataProperty() failed"); - goto cleanup; - } - } - - wevt_variant_resize(content, bufferUsed); - if (!EvtGetPublisherMetadataProperty(hMetadata, property_id, 0, content->size, content->data, &bufferUsed)) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetPublisherMetadataProperty() failed after resize"); + if(!provider_property_get(h, content, property_id)) goto cleanup; - } // Get the number of items (e.g., levels, tasks, or opcodes) hArray = content->data->EvtHandleVal; @@ -375,7 +475,7 @@ static void publisher_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, uint32_t messageID = wevt_field_get_uint32(property->data); if (messageID != (uint32_t)-1) { - if (wevt_get_message_unicode(dst, hMetadata, NULL, messageID, EvtFormatMessageId)) { + if (EvtFormatMessage_utf16(dst, hMetadata, NULL, messageID, EvtFormatMessageId)) { size_t len; d->name = unicode2utf8_strdupz(dst->data, &len); d->len = len; @@ -414,7 +514,7 @@ static void publisher_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, // lookup functions // lookup bitmap metdata (returns a comma separated list of strings) -static bool publisher_bitmap_metadata(TXT_UTF8 *dst, struct provider_list *l, uint64_t value) { +static bool provider_bitmap_metadata(TXT_UTF8 *dst, struct provider_list *l, uint64_t value) { if(!(value & l->mask) || !l->total || !l->array || l->exceeds_data_type) return false; @@ -445,7 +545,7 @@ static bool publisher_bitmap_metadata(TXT_UTF8 *dst, struct provider_list *l, ui memcpy(&dst->data[dst->used], s, slen); dst->used += slen; - dst->src = TXT_SOURCE_PUBLISHER; + dst->src = TXT_SOURCE_PROVIDER; added++; } } @@ -460,7 +560,7 @@ static bool publisher_bitmap_metadata(TXT_UTF8 *dst, struct provider_list *l, ui } //// lookup a single value (returns its string) -//static bool publisher_value_metadata_linear(TXT_UTF8 *dst, struct provider_list *l, uint64_t value) { +//static bool provider_value_metadata_linear(TXT_UTF8 *dst, struct provider_list *l, uint64_t value) { // if(value < l->min || value > l->max || !l->total || !l->array || l->exceeds_data_type) // return false; // @@ -477,7 +577,7 @@ static bool publisher_bitmap_metadata(TXT_UTF8 *dst, struct provider_list *l, ui // // memcpy(dst->data, s, slen); // dst->used = slen; -// dst->src = TXT_SOURCE_PUBLISHER; +// dst->src = TXT_SOURCE_PROVIDER; // // break; // } @@ -493,11 +593,11 @@ static bool publisher_bitmap_metadata(TXT_UTF8 *dst, struct provider_list *l, ui // return (dst->used > 0); //} -static bool publisher_value_metadata(TXT_UTF8 *dst, struct provider_list *l, uint64_t value) { +static bool provider_value_metadata(TXT_UTF8 *dst, struct provider_list *l, uint64_t value) { if(value < l->min || value > l->max || !l->total || !l->array || l->exceeds_data_type) return false; - // if(l->total < 3) return publisher_value_metadata_linear(dst, l, value); + // if(l->total < 3) return provider_value_metadata_linear(dst, l, value); dst->used = 0; @@ -519,7 +619,7 @@ static bool publisher_value_metadata(TXT_UTF8 *dst, struct provider_list *l, uin memcpy(dst->data, s, slen); dst->used = slen; dst->data[dst->used++] = 0; - dst->src = TXT_SOURCE_PUBLISHER; + dst->src = TXT_SOURCE_PROVIDER; } break; } @@ -539,38 +639,38 @@ static bool publisher_value_metadata(TXT_UTF8 *dst, struct provider_list *l, uin // -------------------------------------------------------------------------------------------------------------------- // public API to lookup metadata -bool publisher_keywords_cacheable(PROVIDER_META_HANDLE *h) { - return h && !h->publisher->keywords.exceeds_data_type; +bool provider_keyword_cacheable(PROVIDER_META_HANDLE *h) { + return h && !h->provider->keyword.exceeds_data_type; } -bool publisher_tasks_cacheable(PROVIDER_META_HANDLE *h) { - return h && !h->publisher->tasks.exceeds_data_type; +bool provider_tasks_cacheable(PROVIDER_META_HANDLE *h) { + return h && !h->provider->tasks.exceeds_data_type; } -bool is_useful_publisher_for_levels(PROVIDER_META_HANDLE *h) { - return h && !h->publisher->levels.exceeds_data_type; +bool is_useful_provider_for_levels(PROVIDER_META_HANDLE *h) { + return h && !h->provider->levels.exceeds_data_type; } -bool publisher_opcodes_cacheable(PROVIDER_META_HANDLE *h) { - return h && !h->publisher->opcodes.exceeds_data_type; +bool provider_opcodes_cacheable(PROVIDER_META_HANDLE *h) { + return h && !h->provider->opcodes.exceeds_data_type; } -bool publisher_get_keywords(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) { +bool provider_get_keywords(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) { if(!h) return false; - return publisher_bitmap_metadata(dst, &h->publisher->keywords, value); + return provider_bitmap_metadata(dst, &h->provider->keyword, value); } -bool publisher_get_level(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) { +bool provider_get_level(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) { if(!h) return false; - return publisher_value_metadata(dst, &h->publisher->levels, value); + return provider_value_metadata(dst, &h->provider->levels, value); } -bool publisher_get_task(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) { +bool provider_get_task(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) { if(!h) return false; - return publisher_value_metadata(dst, &h->publisher->tasks, value); + return provider_value_metadata(dst, &h->provider->tasks, value); } -bool publisher_get_opcode(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) { +bool provider_get_opcode(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) { if(!h) return false; - return publisher_value_metadata(dst, &h->publisher->opcodes, value); + return provider_value_metadata(dst, &h->provider->opcodes, value); } diff --git a/src/collectors/windows-events.plugin/windows-events-providers.h b/src/collectors/windows-events.plugin/windows-events-providers.h new file mode 100644 index 00000000000000..b6d476c5c34164 --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-providers.h @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_WINDOWS_EVENTS_PROVIDERS_H +#define NETDATA_WINDOWS_EVENTS_PROVIDERS_H + +typedef enum __attribute__((packed)) { + WEVT_PLATFORM_UNKNOWN = 0, + WEVT_PLATFORM_WEL, + WEVT_PLATFORM_ETW, + WEVT_PLATFORM_TL, +} WEVT_PROVIDER_PLATFORM; + +#include "windows-events.h" + +struct provider_meta_handle; +typedef struct provider_meta_handle PROVIDER_META_HANDLE; + +PROVIDER_META_HANDLE *provider_get(ND_UUID uuid, LPCWSTR providerName); +void provider_release(PROVIDER_META_HANDLE *h); +EVT_HANDLE provider_handle(PROVIDER_META_HANDLE *h); +PROVIDER_META_HANDLE *provider_dup(PROVIDER_META_HANDLE *h); + +void providers_release_unused_handles(void); + +const char *provider_get_name(PROVIDER_META_HANDLE *p); +ND_UUID provider_get_uuid(PROVIDER_META_HANDLE *p); + +void provider_cache_init(void); + +bool provider_keyword_cacheable(PROVIDER_META_HANDLE *h); +bool provider_tasks_cacheable(PROVIDER_META_HANDLE *h); +bool is_useful_provider_for_levels(PROVIDER_META_HANDLE *h); +bool provider_opcodes_cacheable(PROVIDER_META_HANDLE *h); + +bool provider_get_keywords(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value); +bool provider_get_level(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value); +bool provider_get_task(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value); +bool provider_get_opcode(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value); +WEVT_PROVIDER_PLATFORM provider_get_platform(PROVIDER_META_HANDLE *p); + +#endif //NETDATA_WINDOWS_EVENTS_PROVIDERS_H diff --git a/src/collectors/windows-events.plugin/windows-events-publishers.h b/src/collectors/windows-events.plugin/windows-events-publishers.h deleted file mode 100644 index b9269dcadb4423..00000000000000 --- a/src/collectors/windows-events.plugin/windows-events-publishers.h +++ /dev/null @@ -1,28 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_WINDOWS_EVENTS_PUBLISHERS_H -#define NETDATA_WINDOWS_EVENTS_PUBLISHERS_H - -#include "windows-events.h" - -struct provider_meta_handle; -typedef struct provider_meta_handle PROVIDER_META_HANDLE; - -PROVIDER_META_HANDLE *publisher_get(ND_UUID uuid, LPCWSTR providerName); -void publisher_release(PROVIDER_META_HANDLE *h); -EVT_HANDLE publisher_handle(PROVIDER_META_HANDLE *h); -PROVIDER_META_HANDLE *publisher_dup(PROVIDER_META_HANDLE *h); - -void publisher_cache_init(void); - -bool publisher_keywords_cacheable(PROVIDER_META_HANDLE *h); -bool publisher_tasks_cacheable(PROVIDER_META_HANDLE *h); -bool is_useful_publisher_for_levels(PROVIDER_META_HANDLE *h); -bool publisher_opcodes_cacheable(PROVIDER_META_HANDLE *h); - -bool publisher_get_keywords(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value); -bool publisher_get_level(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value); -bool publisher_get_task(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value); -bool publisher_get_opcode(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value); - -#endif //NETDATA_WINDOWS_EVENTS_PUBLISHERS_H diff --git a/src/collectors/windows-events.plugin/windows-events-query-evt-variant.c b/src/collectors/windows-events.plugin/windows-events-query-evt-variant.c new file mode 100644 index 00000000000000..9d18f81fcf9a75 --- /dev/null +++ b/src/collectors/windows-events.plugin/windows-events-query-evt-variant.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows-events.h" +#include // For SID string conversion + +// Function to append the separator if the buffer is not empty +static inline void append_separator_if_needed(BUFFER *b, const char *separator) { + if (buffer_strlen(b) > 0 && separator != NULL) + buffer_strcat(b, separator); +} + +// Helper function to convert UTF16 strings to UTF8 and append to the buffer +static inline void append_utf16(BUFFER *b, LPCWSTR utf16Str, const char *separator) { + if (!utf16Str || !*utf16Str) return; + + append_separator_if_needed(b, separator); + + size_t remaining = b->size - b->len; + if(remaining < 128) { + buffer_need_bytes(b, 128); + remaining = b->size - b->len; + } + + size_t used = utf16_to_utf8(&b->buffer[b->len], remaining, utf16Str, -1); + if(used >= remaining) { + // oops, we need to resize + size_t needed = utf16_to_utf8(NULL, 0, utf16Str, -1); // find the size needed + buffer_need_bytes(b, needed); + remaining = b->size - b->len; + used = utf16_to_utf8(&b->buffer[b->len], remaining, utf16Str, -1); + } + + if(used) { + b->len += used - 1; + + internal_fatal(buffer_strlen(b) != strlen(buffer_tostring(b)), + "Buffer length mismatch."); + } +} + +// Function to append binary data to the buffer +static inline void append_binary(BUFFER *b, PBYTE data, DWORD size, const char *separator) { + if (data == NULL || size == 0) return; + + append_separator_if_needed(b, separator); + + buffer_need_bytes(b, size * 4); + for (DWORD i = 0; i < size; i++) { + uint8_t value = data[i]; + b->buffer[b->len++] = hex_digits[(value & 0xf0) >> 4]; + b->buffer[b->len++] = hex_digits[(value & 0x0f)]; + } +} + +// Function to append size_t to the buffer +static inline void append_size_t(BUFFER *b, size_t size, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64(b, size); +} + +// Function to append HexInt32 in hexadecimal format +static inline void append_uint32_hex(BUFFER *b, UINT32 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64_hex(b, n); +} + +// Function to append HexInt64 in hexadecimal format +static inline void append_uint64_hex(BUFFER *b, UINT64 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64_hex(b, n); +} + +// Function to append various data types to the buffer +static inline void append_uint64(BUFFER *b, UINT64 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64(b, n); +} + +static inline void append_int64(BUFFER *b, INT64 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_int64(b, n); +} + +static inline void append_double(BUFFER *b, double n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_netdata_double(b, n); +} + +static inline void append_guid(BUFFER *b, GUID *guid, const char *separator) { + fatal_assert(sizeof(GUID) == sizeof(nd_uuid_t)); + + append_separator_if_needed(b, separator); + + ND_UUID *uuid = (ND_UUID *)guid; + buffer_need_bytes(b, UUID_STR_LEN); + uuid_unparse_lower(uuid->uuid, &b->buffer[b->len]); + b->len += UUID_STR_LEN - 1; + + internal_fatal(buffer_strlen(b) != strlen(buffer_tostring(b)), + "Buffer length mismatch."); +} + +static inline void append_systime(BUFFER *b, SYSTEMTIME *st, const char *separator) { + append_separator_if_needed(b, separator); + buffer_sprintf(b, "%04d-%02d-%02d %02d:%02d:%02d", + st->wYear, st->wMonth, st->wDay, st->wHour, st->wMinute, st->wSecond); +} + +static inline void append_filetime(BUFFER *b, FILETIME *ft, const char *separator) { + SYSTEMTIME st; + if (FileTimeToSystemTime(ft, &st)) + append_systime(b, &st, separator); +} + +static inline void append_sid(BUFFER *b, PSID sid, const char *separator) { + buffer_sid_to_sid_str_and_name(sid, b, separator); +} + +static inline void append_sbyte(BUFFER *b, INT8 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_int64(b, n); +} + +static inline void append_byte(BUFFER *b, UINT8 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64(b, n); +} + +static inline void append_int16(BUFFER *b, INT16 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_int64(b, n); +} + +static inline void append_uint16(BUFFER *b, UINT16 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64(b, n); +} + +static inline void append_int32(BUFFER *b, INT32 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_int64(b, n); +} + +static inline void append_uint32(BUFFER *b, UINT32 n, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64(b, n); +} + +// Function to append EVT_HANDLE to the buffer +static inline void append_evt_handle(BUFFER *b, EVT_HANDLE h, const char *separator) { + append_separator_if_needed(b, separator); + buffer_print_uint64_hex(b, (uintptr_t)h); +} + +// Function to append XML data (UTF-16) to the buffer +static inline void append_evt_xml(BUFFER *b, LPCWSTR xmlData, const char *separator) { + append_utf16(b, xmlData, separator); // XML data is essentially UTF-16 string +} + +void evt_variant_to_buffer(BUFFER *b, EVT_VARIANT *ev, const char *separator) { + if(ev->Type == EvtVarTypeNull) return; + + if (ev->Type & EVT_VARIANT_TYPE_ARRAY) { + for (DWORD i = 0; i < ev->Count; i++) { + switch (ev->Type & EVT_VARIANT_TYPE_MASK) { + case EvtVarTypeString: + append_utf16(b, ev->StringArr[i], separator); + break; + + case EvtVarTypeAnsiString: + if (ev->AnsiStringArr[i] != NULL) { + append_utf16(b, (LPCWSTR)ev->AnsiStringArr[i], separator); + } + break; + + case EvtVarTypeSByte: + append_sbyte(b, ev->SByteArr[i], separator); + break; + + case EvtVarTypeByte: + append_byte(b, ev->ByteArr[i], separator); + break; + + case EvtVarTypeInt16: + append_int16(b, ev->Int16Arr[i], separator); + break; + + case EvtVarTypeUInt16: + append_uint16(b, ev->UInt16Arr[i], separator); + break; + + case EvtVarTypeInt32: + append_int32(b, ev->Int32Arr[i], separator); + break; + + case EvtVarTypeUInt32: + append_uint32(b, ev->UInt32Arr[i], separator); + break; + + case EvtVarTypeInt64: + append_int64(b, ev->Int64Arr[i], separator); + break; + + case EvtVarTypeUInt64: + append_uint64(b, ev->UInt64Arr[i], separator); + break; + + case EvtVarTypeSingle: + append_double(b, ev->SingleArr[i], separator); + break; + + case EvtVarTypeDouble: + append_double(b, ev->DoubleArr[i], separator); + break; + + case EvtVarTypeGuid: + append_guid(b, &ev->GuidArr[i], separator); + break; + + case EvtVarTypeFileTime: + append_filetime(b, &ev->FileTimeArr[i], separator); + break; + + case EvtVarTypeSysTime: + append_systime(b, &ev->SysTimeArr[i], separator); + break; + + case EvtVarTypeSid: + append_sid(b, ev->SidArr[i], separator); + break; + + case EvtVarTypeBinary: + append_binary(b, ev->BinaryVal, ev->Count, separator); + break; + + case EvtVarTypeSizeT: + append_size_t(b, ev->SizeTArr[i], separator); + break; + + case EvtVarTypeHexInt32: + append_uint32_hex(b, ev->UInt32Arr[i], separator); + break; + + case EvtVarTypeHexInt64: + append_uint64_hex(b, ev->UInt64Arr[i], separator); + break; + + case EvtVarTypeEvtHandle: + append_evt_handle(b, ev->EvtHandleVal, separator); + break; + + case EvtVarTypeEvtXml: + append_evt_xml(b, ev->XmlValArr[i], separator); + break; + + default: + // Skip unknown array types + break; + } + } + } else { + switch (ev->Type & EVT_VARIANT_TYPE_MASK) { + case EvtVarTypeNull: + // Do nothing for null types + break; + + case EvtVarTypeString: + append_utf16(b, ev->StringVal, separator); + break; + + case EvtVarTypeAnsiString: + append_utf16(b, (LPCWSTR)ev->AnsiStringVal, separator); + break; + + case EvtVarTypeSByte: + append_sbyte(b, ev->SByteVal, separator); + break; + + case EvtVarTypeByte: + append_byte(b, ev->ByteVal, separator); + break; + + case EvtVarTypeInt16: + append_int16(b, ev->Int16Val, separator); + break; + + case EvtVarTypeUInt16: + append_uint16(b, ev->UInt16Val, separator); + break; + + case EvtVarTypeInt32: + append_int32(b, ev->Int32Val, separator); + break; + + case EvtVarTypeUInt32: + append_uint32(b, ev->UInt32Val, separator); + break; + + case EvtVarTypeInt64: + append_int64(b, ev->Int64Val, separator); + break; + + case EvtVarTypeUInt64: + append_uint64(b, ev->UInt64Val, separator); + break; + + case EvtVarTypeSingle: + append_double(b, ev->SingleVal, separator); + break; + + case EvtVarTypeDouble: + append_double(b, ev->DoubleVal, separator); + break; + + case EvtVarTypeBoolean: + append_separator_if_needed(b, separator); + buffer_strcat(b, ev->BooleanVal ? "true" : "false"); + break; + + case EvtVarTypeGuid: + append_guid(b, ev->GuidVal, separator); + break; + + case EvtVarTypeBinary: + append_binary(b, ev->BinaryVal, ev->Count, separator); + break; + + case EvtVarTypeSizeT: + append_size_t(b, ev->SizeTVal, separator); + break; + + case EvtVarTypeHexInt32: + append_uint32_hex(b, ev->UInt32Val, separator); + break; + + case EvtVarTypeHexInt64: + append_uint64_hex(b, ev->UInt64Val, separator); + break; + + case EvtVarTypeEvtHandle: + append_evt_handle(b, ev->EvtHandleVal, separator); + break; + + case EvtVarTypeEvtXml: + append_evt_xml(b, ev->XmlVal, separator); + break; + + default: + // Skip unknown types + break; + } + } +} diff --git a/src/collectors/windows-events.plugin/windows-events-query.c b/src/collectors/windows-events.plugin/windows-events-query.c index d4c660c14a23c9..12734fe9d538ad 100644 --- a/src/collectors/windows-events.plugin/windows-events-query.c +++ b/src/collectors/windows-events.plugin/windows-events-query.c @@ -2,48 +2,13 @@ #include "windows-events.h" +static void wevt_event_done(WEVT_LOG *log); + static uint64_t wevt_log_file_size(const wchar_t *channel); -#define FIELD_RECORD_NUMBER (0) -#define FIELD_EVENT_ID (1) -#define FIELD_LEVEL (2) -#define FIELD_OPCODE (3) -#define FIELD_KEYWORDS (4) -#define FIELD_VERSION (5) -#define FIELD_TASK (6) -#define FIELD_PROCESS_ID (7) -#define FIELD_THREAD_ID (8) -#define FIELD_TIME_CREATED (9) -#define FIELD_CHANNEL (10) -#define FIELD_COMPUTER_NAME (11) -#define FIELD_PROVIDER_NAME (12) -#define FIELD_EVENT_SOURCE_NAME (13) -#define FIELD_PROVIDER_GUID (14) -#define FIELD_CORRELATION_ACTIVITY_ID (15) -#define FIELD_USER_ID (16) - -// These are the fields we extract from the logs -static const wchar_t *RENDER_ITEMS[] = { - L"/Event/System/EventRecordID", - L"/Event/System/EventID", - L"/Event/System/Level", - L"/Event/System/Opcode", - L"/Event/System/Keywords", - L"/Event/System/Version", - L"/Event/System/Task", - L"/Event/System/Execution/@ProcessID", - L"/Event/System/Execution/@ThreadID", - L"/Event/System/TimeCreated/@SystemTime", - L"/Event/System/Channel", - L"/Event/System/Computer", - L"/Event/System/Provider/@Name", - L"/Event/System/Provider/@EventSourceName", - L"/Event/System/Provider/@Guid", - L"/Event/System/Correlation/@ActivityID", - L"/Event/System/Security/@UserID", -}; - -static const char *wevt_extended_status(void) { +// -------------------------------------------------------------------------------------------------------------------- + +static const char *EvtGetExtendedStatus_utf8(void) { static __thread wchar_t wbuf[4096]; static __thread char buf[4096]; DWORD wbuf_used = 0; @@ -62,7 +27,9 @@ static const char *wevt_extended_status(void) { return buf; } -bool wevt_get_message_unicode(TXT_UNICODE *dst, EVT_HANDLE hMetadata, EVT_HANDLE hEvent, DWORD dwMessageId, EVT_FORMAT_MESSAGE_FLAGS flags) { +// -------------------------------------------------------------------------------------------------------------------- + +bool EvtFormatMessage_utf16(TXT_UNICODE *dst, EVT_HANDLE hMetadata, EVT_HANDLE hEvent, DWORD dwMessageId, EVT_FORMAT_MESSAGE_FLAGS flags) { dst->used = 0; DWORD size = 0; @@ -107,56 +74,44 @@ bool wevt_get_message_unicode(TXT_UNICODE *dst, EVT_HANDLE hMetadata, EVT_HANDLE return false; } -static bool wevt_get_field_from_events_log( - WEVT_LOG *log, PROVIDER_META_HANDLE *p, EVT_HANDLE hEvent, - TXT_UTF8 *dst, EVT_FORMAT_MESSAGE_FLAGS flags) { +static bool EvtFormatMessage_utf8( + TXT_UNICODE *tmp, PROVIDER_META_HANDLE *p, EVT_HANDLE hEvent, + TXT_UTF8 *dst, EVT_FORMAT_MESSAGE_FLAGS flags) { dst->src = TXT_SOURCE_EVENT_LOG; - if(wevt_get_message_unicode(&log->ops.unicode, publisher_handle(p), hEvent, 0, flags)) - return wevt_str_unicode_to_utf8(dst, &log->ops.unicode); + if(EvtFormatMessage_utf16(tmp, provider_handle(p), hEvent, 0, flags)) + return wevt_str_unicode_to_utf8(dst, tmp); wevt_utf8_empty(dst); return false; } -bool wevt_get_event_utf8(WEVT_LOG *log, PROVIDER_META_HANDLE *p, EVT_HANDLE hEvent, TXT_UTF8 *dst) { - return wevt_get_field_from_events_log(log, p, hEvent, dst, EvtFormatMessageEvent); +bool EvtFormatMessage_Event_utf8(TXT_UNICODE *tmp, PROVIDER_META_HANDLE *p, EVT_HANDLE hEvent, TXT_UTF8 *dst) { + return EvtFormatMessage_utf8(tmp, p, hEvent, dst, EvtFormatMessageEvent); } -bool wevt_get_xml_utf8(WEVT_LOG *log, PROVIDER_META_HANDLE *p, EVT_HANDLE hEvent, TXT_UTF8 *dst) { - return wevt_get_field_from_events_log(log, p, hEvent, dst, EvtFormatMessageXml); +bool EvtFormatMessage_Xml_utf8(TXT_UNICODE *tmp, PROVIDER_META_HANDLE *p, EVT_HANDLE hEvent, TXT_UTF8 *dst) { + return EvtFormatMessage_utf8(tmp, p, hEvent, dst, EvtFormatMessageXml); } -static inline void wevt_event_done(WEVT_LOG *log) { - if (log->publisher) { - publisher_release(log->publisher); - log->publisher = NULL; - } - - if (log->hEvent) { - EvtClose(log->hEvent); - log->hEvent = NULL; - } - - log->ops.level.src = TXT_SOURCE_UNKNOWN; - log->ops.keywords.src = TXT_SOURCE_UNKNOWN; - log->ops.opcode.src = TXT_SOURCE_UNKNOWN; - log->ops.task.src = TXT_SOURCE_UNKNOWN; -} +// -------------------------------------------------------------------------------------------------------------------- static void wevt_get_field_from_cache( - WEVT_LOG *log, uint64_t value, PROVIDER_META_HANDLE *h, - TXT_UTF8 *dst, const ND_UUID *provider, - WEVT_FIELD_TYPE cache_type, EVT_FORMAT_MESSAGE_FLAGS flags) { + WEVT_LOG *log, uint64_t value, PROVIDER_META_HANDLE *h, + TXT_UTF8 *dst, const ND_UUID *provider, + WEVT_FIELD_TYPE cache_type, EVT_FORMAT_MESSAGE_FLAGS flags) { if (field_cache_get(cache_type, provider, value, dst)) return; - wevt_get_field_from_events_log(log, h, log->hEvent, dst, flags); + EvtFormatMessage_utf8(&log->ops.unicode, h, log->hEvent, dst, flags); field_cache_set(cache_type, provider, value, dst); } +// -------------------------------------------------------------------------------------------------------------------- +// Level + #define SET_LEN_AND_RETURN(constant) *len = sizeof(constant) - 1; return constant static inline const char *wevt_level_hardcoded(uint64_t level, size_t *len) { @@ -179,9 +134,9 @@ static void wevt_get_level(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE * EVT_FORMAT_MESSAGE_FLAGS flags = EvtFormatMessageLevel; WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_LEVEL; - bool is_publisher = is_valid_publisher_level(value, true); + bool is_provider = is_valid_provider_level(value, true); - if(!is_publisher) { + if(!is_provider) { size_t len; const char *hardcoded = wevt_level_hardcoded(value, &len); if(hardcoded) { @@ -189,12 +144,12 @@ static void wevt_get_level(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE * dst->src = TXT_SOURCE_HARDCODED; } else { - // since this is not a publisher value + // since this is not a provider value // we expect to get the system description of it wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags); } } - else if (!publisher_get_level(dst, h, value)) { + else if (!provider_get_level(dst, h, value)) { // not found in the manifest, get it from the cache wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags); } @@ -203,6 +158,9 @@ static void wevt_get_level(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE * dst, WEVT_PREFIX_LEVEL, sizeof(WEVT_PREFIX_LEVEL) - 1, ev->level); } +// -------------------------------------------------------------------------------------------------------------------- +// Opcode + static inline const char *wevt_opcode_hardcoded(uint64_t opcode, size_t *len) { switch(opcode) { case WEVT_OPCODE_INFO: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_INFO); @@ -228,9 +186,9 @@ static void wevt_get_opcode(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE EVT_FORMAT_MESSAGE_FLAGS flags = EvtFormatMessageOpcode; WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_OPCODE; - bool is_publisher = is_valid_publisher_opcode(value, true); + bool is_provider = is_valid_provider_opcode(value, true); - if(!is_publisher) { + if(!is_provider) { size_t len; const char *hardcoded = wevt_opcode_hardcoded(value, &len); if(hardcoded) { @@ -238,12 +196,12 @@ static void wevt_get_opcode(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE dst->src = TXT_SOURCE_HARDCODED; } else { - // since this is not a publisher value + // since this is not a provider value // we expect to get the system description of it wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags); } } - else if (!publisher_get_opcode(dst, h, value)) { + else if (!provider_get_opcode(dst, h, value)) { // not found in the manifest, get it from the cache wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags); } @@ -252,6 +210,9 @@ static void wevt_get_opcode(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE dst, WEVT_PREFIX_OPCODE, sizeof(WEVT_PREFIX_OPCODE) - 1, ev->opcode); } +// -------------------------------------------------------------------------------------------------------------------- +// Task + static const char *wevt_task_hardcoded(uint64_t task, size_t *len) { switch(task) { case WEVT_TASK_NONE: SET_LEN_AND_RETURN(WEVT_TASK_NAME_NONE); @@ -267,9 +228,9 @@ static void wevt_get_task(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h EVT_FORMAT_MESSAGE_FLAGS flags = EvtFormatMessageTask; WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_TASK; - bool is_publisher = is_valid_publisher_task(value, true); + bool is_provider = is_valid_provider_task(value, true); - if(!is_publisher) { + if(!is_provider) { size_t len; const char *hardcoded = wevt_task_hardcoded(value, &len); if(hardcoded) { @@ -277,12 +238,12 @@ static void wevt_get_task(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h dst->src = TXT_SOURCE_HARDCODED; } else { - // since this is not a publisher value + // since this is not a provider value // we expect to get the system description of it wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags); } } - else if (!publisher_get_task(dst, h, value)) { + else if (!provider_get_task(dst, h, value)) { // not found in the manifest, get it from the cache wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags); } @@ -291,9 +252,12 @@ static void wevt_get_task(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h dst, WEVT_PREFIX_TASK, sizeof(WEVT_PREFIX_TASK) - 1, ev->task); } +// -------------------------------------------------------------------------------------------------------------------- +// Keyword + #define SET_BITS(msk, txt) { .mask = msk, .name = txt, .len = sizeof(txt) - 1, } -static uint64_t wevt_keywords_handle_reserved(uint64_t value, TXT_UTF8 *dst) { +static uint64_t wevt_keyword_handle_reserved(uint64_t value, TXT_UTF8 *dst) { struct { uint64_t mask; const char *name; @@ -324,7 +288,7 @@ static uint64_t wevt_keywords_handle_reserved(uint64_t value, TXT_UTF8 *dst) { return value & 0x0000FFFFFFFFFFFF; } -static void wevt_get_keywords(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h) { +static void wevt_get_keyword(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h) { TXT_UTF8 *dst = &log->ops.keywords; if(ev->keywords == WEVT_KEYWORD_NONE) { @@ -332,18 +296,18 @@ static void wevt_get_keywords(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDL dst->src = TXT_SOURCE_HARDCODED; } - uint64_t value = wevt_keywords_handle_reserved(ev->keywords, dst); + uint64_t value = wevt_keyword_handle_reserved(ev->keywords, dst); EVT_FORMAT_MESSAGE_FLAGS flags = EvtFormatMessageKeyword; - WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_KEYWORDS; + WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_KEYWORD; if(!value && dst->used <= 1) { // no hardcoded info in the buffer, make it None txt_utf8_set(dst, WEVT_KEYWORD_NAME_NONE, sizeof(WEVT_KEYWORD_NAME_NONE) - 1); dst->src = TXT_SOURCE_HARDCODED; } - else if (value && !publisher_get_keywords(dst, h, value) && dst->used <= 1) { - // the publisher did not provide any info and the description is still empty. + else if (value && !provider_get_keywords(dst, h, value) && dst->used <= 1) { + // the provider did not provide any info and the description is still empty. // the system returns 1 keyword, the highest bit, not a list // so, when we call the system, we pass the original value (ev->keywords) wevt_get_field_from_cache(log, ev->keywords, h, dst, &ev->provider, cache_type, flags); @@ -353,57 +317,86 @@ static void wevt_get_keywords(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDL dst, WEVT_PREFIX_KEYWORDS, sizeof(WEVT_PREFIX_KEYWORDS) - 1, ev->keywords); } -bool wevt_get_next_event_one(WEVT_LOG *log, WEVT_EVENT *ev, bool full) { - bool ret = false; +// -------------------------------------------------------------------------------------------------------------------- +// Fetching Events - // obtain the information from selected events +static inline bool wEvtRender(WEVT_LOG *log, EVT_HANDLE context, WEVT_VARIANT *raw) { DWORD bytes_used = 0, property_count = 0; - if (!EvtRender(log->hRenderContext, log->hEvent, EvtRenderEventValues, log->ops.content.size, log->ops.content.data, &bytes_used, &property_count)) { + if (!EvtRender(context, log->hEvent, EvtRenderEventValues, raw->size, raw->data, &bytes_used, &property_count)) { // information exceeds the allocated space if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtRender() failed, hRenderContext: 0x%lx, hEvent: 0x%lx, content: 0x%lx, size: %zu, extended info: %s", - (uintptr_t)log->hRenderContext, (uintptr_t)log->hEvent, (uintptr_t)log->ops.content.data, log->ops.content.size, wevt_extended_status()); - goto cleanup; + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "EvtRender() failed, hRenderSystemContext: 0x%lx, hEvent: 0x%lx, content: 0x%lx, size: %u, extended info: %s", + (uintptr_t)context, (uintptr_t)log->hEvent, (uintptr_t)raw->data, raw->size, + EvtGetExtendedStatus_utf8()); + return false; } - wevt_variant_resize(&log->ops.content, bytes_used); - if (!EvtRender(log->hRenderContext, log->hEvent, EvtRenderEventValues, log->ops.content.size, log->ops.content.data, &bytes_used, &property_count)) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtRender() failed, after bytes_used increase, extended info: %s", - wevt_extended_status()); - goto cleanup; + wevt_variant_resize(raw, bytes_used); + if (!EvtRender(context, log->hEvent, EvtRenderEventValues, raw->size, raw->data, &bytes_used, &property_count)) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "EvtRender() failed, after bytes_used increase, extended info: %s", + EvtGetExtendedStatus_utf8()); + return false; } } - log->ops.content.used = bytes_used; - - EVT_VARIANT *content = log->ops.content.data; + raw->used = bytes_used; + raw->count = property_count; - ev->id = wevt_field_get_uint64(&content[FIELD_RECORD_NUMBER]); - ev->event_id = wevt_field_get_uint16(&content[FIELD_EVENT_ID]); - ev->level = wevt_field_get_uint8(&content[FIELD_LEVEL]); - ev->opcode = wevt_field_get_uint8(&content[FIELD_OPCODE]); - ev->keywords = wevt_field_get_uint64_hex(&content[FIELD_KEYWORDS]); - ev->version = wevt_field_get_uint8(&content[FIELD_VERSION]); - ev->task = wevt_field_get_uint16(&content[FIELD_TASK]); - ev->process_id = wevt_field_get_uint32(&content[FIELD_PROCESS_ID]); - ev->thread_id = wevt_field_get_uint32(&content[FIELD_THREAD_ID]); - ev->created_ns = wevt_field_get_filetime_to_ns(&content[FIELD_TIME_CREATED]); + return true; +} - if(full) { - wevt_field_get_string_utf8(&content[FIELD_CHANNEL], &log->ops.channel); - wevt_field_get_string_utf8(&content[FIELD_COMPUTER_NAME], &log->ops.computer); - wevt_field_get_string_utf8(&content[FIELD_PROVIDER_NAME], &log->ops.provider); - wevt_field_get_string_utf8(&content[FIELD_EVENT_SOURCE_NAME], &log->ops.source); - wevt_get_uuid_by_type(&content[FIELD_PROVIDER_GUID], &ev->provider); - wevt_get_uuid_by_type(&content[FIELD_CORRELATION_ACTIVITY_ID], &ev->correlation_activity_id); - wevt_field_get_sid(&content[FIELD_USER_ID], &log->ops.user); +static bool wevt_get_next_event_one(WEVT_LOG *log, WEVT_EVENT *ev) { + bool ret = false; - PROVIDER_META_HANDLE *h = log->publisher = - publisher_get(ev->provider, content[FIELD_PROVIDER_NAME].StringVal); + if(!wEvtRender(log, log->hRenderSystemContext, &log->ops.raw.system)) + goto cleanup; - wevt_get_level(log, ev, h); - wevt_get_task(log, ev, h); - wevt_get_opcode(log, ev, h); - wevt_get_keywords(log, ev, h); + EVT_VARIANT *content = log->ops.raw.system.data; + + ev->id = wevt_field_get_uint64(&content[EvtSystemEventRecordId]); + ev->event_id = wevt_field_get_uint16(&content[EvtSystemEventID]); + ev->level = wevt_field_get_uint8(&content[EvtSystemLevel]); + ev->opcode = wevt_field_get_uint8(&content[EvtSystemOpcode]); + ev->keywords = wevt_field_get_uint64_hex(&content[EvtSystemKeywords]); + ev->version = wevt_field_get_uint8(&content[EvtSystemVersion]); + ev->task = wevt_field_get_uint16(&content[EvtSystemTask]); + ev->qualifiers = wevt_field_get_uint16(&content[EvtSystemQualifiers]); + ev->process_id = wevt_field_get_uint32(&content[EvtSystemProcessID]); + ev->thread_id = wevt_field_get_uint32(&content[EvtSystemThreadID]); + ev->created_ns = wevt_field_get_filetime_to_ns(&content[EvtSystemTimeCreated]); + + if(log->type & WEVT_QUERY_EXTENDED) { + wevt_field_get_string_utf8(&content[EvtSystemChannel], &log->ops.channel); + wevt_field_get_string_utf8(&content[EvtSystemComputer], &log->ops.computer); + wevt_field_get_string_utf8(&content[EvtSystemProviderName], &log->ops.provider); + wevt_get_uuid_by_type(&content[EvtSystemProviderGuid], &ev->provider); + wevt_get_uuid_by_type(&content[EvtSystemActivityID], &ev->activity_id); + wevt_get_uuid_by_type(&content[EvtSystemRelatedActivityID], &ev->related_activity_id); + wevt_field_get_sid(&content[EvtSystemUserID], &log->ops.account, &log->ops.domain, &log->ops.sid); + + PROVIDER_META_HANDLE *p = log->provider = + provider_get(ev->provider, content[EvtSystemProviderName].StringVal); + + ev->platform = provider_get_platform(p); + + wevt_get_level(log, ev, p); + wevt_get_task(log, ev, p); + wevt_get_opcode(log, ev, p); + wevt_get_keyword(log, ev, p); + + if(log->type & WEVT_QUERY_EVENT_DATA && wEvtRender(log, log->hRenderUserContext, &log->ops.raw.user)) { +#if (ON_FTS_PRELOAD_MESSAGE == 1) + EvtFormatMessage_Event_utf8(&log->ops.unicode, log->provider, log->hEvent, &log->ops.event); +#endif +#if (ON_FTS_PRELOAD_XML == 1) + EvtFormatMessage_Xml_utf8(&log->ops.unicode, log->provider, log->hEvent, &log->ops.xml); +#endif +#if (ON_FTS_PRELOAD_EVENT_DATA == 1) + for(size_t i = 0; i < log->ops.raw.user.count ;i++) + evt_variant_to_buffer(log->ops.event_data, &log->ops.raw.user.data[i], " ||| "); +#endif + } } ret = true; @@ -412,11 +405,11 @@ bool wevt_get_next_event_one(WEVT_LOG *log, WEVT_EVENT *ev, bool full) { return ret; } -bool wevt_get_next_event(WEVT_LOG *log, WEVT_EVENT *ev, bool full) { - DWORD size = full ? BATCH_NEXT_EVENT : 1; +bool wevt_get_next_event(WEVT_LOG *log, WEVT_EVENT *ev) { + DWORD size = (log->type & WEVT_QUERY_EXTENDED) ? BATCH_NEXT_EVENT : 1; DWORD max_failures = 10; - fatal_assert(log && log->hQuery && log->hRenderContext); + fatal_assert(log && log->hQuery && log->hRenderSystemContext); while(max_failures > 0) { if (log->batch.used >= log->batch.size) { @@ -433,7 +426,7 @@ bool wevt_get_next_event(WEVT_LOG *log, WEVT_EVENT *ev, bool full) { if(size == 1) { nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtNext() failed, hQuery: 0x%lx, size: %zu, extended info: %s", - (uintptr_t)log->hQuery, (size_t)size, wevt_extended_status()); + (uintptr_t)log->hQuery, (size_t)size, EvtGetExtendedStatus_utf8()); return false; } @@ -455,7 +448,7 @@ bool wevt_get_next_event(WEVT_LOG *log, WEVT_EVENT *ev, bool full) { log->batch.hEvents[log->batch.used] = NULL; log->batch.used++; - if(wevt_get_next_event_one(log, ev, full)) + if(wevt_get_next_event_one(log, ev)) return true; else { log->query_stats.failed_count++; @@ -467,6 +460,69 @@ bool wevt_get_next_event(WEVT_LOG *log, WEVT_EVENT *ev, bool full) { return false; } +static void wevt_event_done(WEVT_LOG *log) { + if (log->provider) { + provider_release(log->provider); + log->provider = NULL; + } + + if (log->hEvent) { + EvtClose(log->hEvent); + log->hEvent = NULL; + } + + log->ops.channel.src = TXT_SOURCE_UNKNOWN; + log->ops.provider.src = TXT_SOURCE_UNKNOWN; + log->ops.computer.src = TXT_SOURCE_UNKNOWN; + log->ops.account.src = TXT_SOURCE_UNKNOWN; + log->ops.domain.src = TXT_SOURCE_UNKNOWN; + log->ops.sid.src = TXT_SOURCE_UNKNOWN; + + log->ops.event.src = TXT_SOURCE_UNKNOWN; + log->ops.level.src = TXT_SOURCE_UNKNOWN; + log->ops.keywords.src = TXT_SOURCE_UNKNOWN; + log->ops.opcode.src = TXT_SOURCE_UNKNOWN; + log->ops.task.src = TXT_SOURCE_UNKNOWN; + log->ops.xml.src = TXT_SOURCE_UNKNOWN; + + log->ops.channel.used = 0; + log->ops.provider.used = 0; + log->ops.computer.used = 0; + log->ops.account.used = 0; + log->ops.domain.used = 0; + log->ops.sid.used = 0; + + log->ops.event.used = 0; + log->ops.level.used = 0; + log->ops.keywords.used = 0; + log->ops.opcode.used = 0; + log->ops.task.used = 0; + log->ops.xml.used = 0; + + if(log->ops.event_data) + log->ops.event_data->len = 0; +} + +// -------------------------------------------------------------------------------------------------------------------- +// Query management + +bool wevt_query(WEVT_LOG *log, LPCWSTR channel, LPCWSTR query, EVT_QUERY_FLAGS direction) { + wevt_query_done(log); + log->log_stats.queries_count++; + + EVT_HANDLE hQuery = EvtQuery(NULL, channel, query, EvtQueryChannelPath | (direction & (EvtQueryReverseDirection | EvtQueryForwardDirection)) | EvtQueryTolerateQueryErrors); + if (!hQuery) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() failed, query: %s | extended info: %s", + query2utf8(query), EvtGetExtendedStatus_utf8()); + + log->log_stats.queries_failed++; + return false; + } + + log->hQuery = hQuery; + return true; +} + void wevt_query_done(WEVT_LOG *log) { // close the last working hEvent wevt_event_done(log); @@ -490,19 +546,59 @@ void wevt_query_done(WEVT_LOG *log) { log->query_stats.failed_count = 0; } +// -------------------------------------------------------------------------------------------------------------------- +// Log management + +WEVT_LOG *wevt_openlog6(WEVT_QUERY_TYPE type) { + WEVT_LOG *log = callocz(1, sizeof(*log)); + log->type = type; + + // create the system render + log->hRenderSystemContext = EvtCreateRenderContext(0, NULL, EvtRenderContextSystem); + if (!log->hRenderSystemContext) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "EvtCreateRenderContext() on system context failed, extended info: %s", + EvtGetExtendedStatus_utf8()); + goto cleanup; + } + + if(type & WEVT_QUERY_EVENT_DATA) { + log->hRenderUserContext = EvtCreateRenderContext(0, NULL, EvtRenderContextUser); + if (!log->hRenderUserContext) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "EvtCreateRenderContext failed, on user context failed, extended info: %s", + EvtGetExtendedStatus_utf8()); + goto cleanup; + } + + log->ops.event_data = buffer_create(4096, NULL); + } + + return log; + +cleanup: + wevt_closelog6(log); + return NULL; +} + void wevt_closelog6(WEVT_LOG *log) { wevt_query_done(log); - if (log->hRenderContext) - EvtClose(log->hRenderContext); + if (log->hRenderSystemContext) + EvtClose(log->hRenderSystemContext); + + if (log->hRenderUserContext) + EvtClose(log->hRenderUserContext); - wevt_variant_cleanup(&log->ops.content); + wevt_variant_cleanup(&log->ops.raw.system); + wevt_variant_cleanup(&log->ops.raw.user); txt_unicode_cleanup(&log->ops.unicode); txt_utf8_cleanup(&log->ops.channel); txt_utf8_cleanup(&log->ops.provider); - txt_utf8_cleanup(&log->ops.source); txt_utf8_cleanup(&log->ops.computer); - txt_utf8_cleanup(&log->ops.user); + txt_utf8_cleanup(&log->ops.account); + txt_utf8_cleanup(&log->ops.domain); + txt_utf8_cleanup(&log->ops.sid); txt_utf8_cleanup(&log->ops.event); txt_utf8_cleanup(&log->ops.level); @@ -510,9 +606,15 @@ void wevt_closelog6(WEVT_LOG *log) { txt_utf8_cleanup(&log->ops.opcode); txt_utf8_cleanup(&log->ops.task); txt_utf8_cleanup(&log->ops.xml); + + buffer_free(log->ops.event_data); + freez(log); } +// -------------------------------------------------------------------------------------------------------------------- +// Retention + bool wevt_channel_retention(WEVT_LOG *log, const wchar_t *channel, const wchar_t *query, EVT_RETENTION *retention) { bool ret = false; @@ -525,15 +627,15 @@ bool wevt_channel_retention(WEVT_LOG *log, const wchar_t *channel, const wchar_t if (!log->hQuery) { if (GetLastError() == ERROR_EVT_CHANNEL_NOT_FOUND) nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() for retention failed, channel '%s' not found, cannot get retention, extended info: %s", - channel2utf8(channel), wevt_extended_status()); + channel2utf8(channel), EvtGetExtendedStatus_utf8()); else nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() for retention on channel '%s' failed, cannot get retention, extended info: %s", - channel2utf8(channel), wevt_extended_status()); + channel2utf8(channel), EvtGetExtendedStatus_utf8()); goto cleanup; } - if (!wevt_get_next_event(log, &retention->first_event, false)) + if (!wevt_get_next_event(log, &retention->first_event)) goto cleanup; if (!retention->first_event.id) { @@ -548,15 +650,15 @@ bool wevt_channel_retention(WEVT_LOG *log, const wchar_t *channel, const wchar_t if (!log->hQuery) { if (GetLastError() == ERROR_EVT_CHANNEL_NOT_FOUND) nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() for retention failed, channel '%s' not found, extended info: %s", - channel2utf8(channel), wevt_extended_status()); + channel2utf8(channel), EvtGetExtendedStatus_utf8()); else nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() for retention on channel '%s' failed, extended info: %s", - channel2utf8(channel), wevt_extended_status()); + channel2utf8(channel), EvtGetExtendedStatus_utf8()); goto cleanup; } - if (!wevt_get_next_event(log, &retention->last_event, false) || retention->last_event.id == 0) { + if (!wevt_get_next_event(log, &retention->last_event) || retention->last_event.id == 0) { // no data in eventlog retention->last_event = retention->first_event; } @@ -582,24 +684,6 @@ bool wevt_channel_retention(WEVT_LOG *log, const wchar_t *channel, const wchar_t return ret; } -WEVT_LOG *wevt_openlog6(void) { - size_t RENDER_ITEMS_count = (sizeof(RENDER_ITEMS) / sizeof(const wchar_t *)); - - WEVT_LOG *log = callocz(1, sizeof(*log)); - - // create the system render - log->hRenderContext = EvtCreateRenderContext(RENDER_ITEMS_count, RENDER_ITEMS, EvtRenderContextValues); - if (!log->hRenderContext) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtCreateRenderContext failed, extended info: %s", wevt_extended_status()); - freez(log); - log = NULL; - goto cleanup; - } - -cleanup: - return log; -} - static uint64_t wevt_log_file_size(const wchar_t *channel) { EVT_HANDLE hLog = NULL; EVT_VARIANT evtVariant; @@ -610,14 +694,14 @@ static uint64_t wevt_log_file_size(const wchar_t *channel) { hLog = EvtOpenLog(NULL, channel, EvtOpenChannelPath); if (!hLog) { nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtOpenLog() on channel '%s' failed, extended info: %s", - channel2utf8(channel), wevt_extended_status()); + channel2utf8(channel), EvtGetExtendedStatus_utf8()); goto cleanup; } // Get the file size of the log if (!EvtGetLogInfo(hLog, EvtLogFileSize, sizeof(evtVariant), &evtVariant, &bufferUsed)) { nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetLogInfo() on channel '%s' failed, extended info: %s", - channel2utf8(channel), wevt_extended_status()); + channel2utf8(channel), EvtGetExtendedStatus_utf8()); goto cleanup; } @@ -630,20 +714,3 @@ static uint64_t wevt_log_file_size(const wchar_t *channel) { return file_size; } - -bool wevt_query(WEVT_LOG *log, LPCWSTR channel, LPCWSTR query, EVT_QUERY_FLAGS direction) { - wevt_query_done(log); - log->log_stats.queries_count++; - - EVT_HANDLE hQuery = EvtQuery(NULL, channel, query, EvtQueryChannelPath | (direction & (EvtQueryReverseDirection | EvtQueryForwardDirection)) | EvtQueryTolerateQueryErrors); - if (!hQuery) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() failed, query: %s | extended info: %s", - query2utf8(query), wevt_extended_status()); - - log->log_stats.queries_failed++; - return false; - } - - log->hQuery = hQuery; - return true; -} diff --git a/src/collectors/windows-events.plugin/windows-events-query.h b/src/collectors/windows-events.plugin/windows-events-query.h index c822da74b85ca9..c43c3c9b5044a1 100644 --- a/src/collectors/windows-events.plugin/windows-events-query.h +++ b/src/collectors/windows-events.plugin/windows-events-query.h @@ -4,6 +4,7 @@ #define NETDATA_WINDOWS_EVENTS_QUERY_H #include "libnetdata/libnetdata.h" +#include "windows-events.h" #define BATCH_NEXT_EVENT 500 @@ -11,23 +12,27 @@ typedef struct wevt_event { uint64_t id; // EventRecordId (unique and sequential per channel) uint8_t version; uint8_t level; // The severity of event - uint8_t opcode; // we receive this as 8bit, but publishers use 32bit + uint8_t opcode; // we receive this as 8bit, but providers use 32bit uint16_t event_id; // This is the template that defines the message to be shown uint16_t task; + uint16_t qualifiers; uint32_t process_id; uint32_t thread_id; uint64_t keywords; // Categorization of the event ND_UUID provider; - ND_UUID correlation_activity_id; + ND_UUID activity_id; + ND_UUID related_activity_id; nsec_t created_ns; + WEVT_PROVIDER_PLATFORM platform; } WEVT_EVENT; #define WEVT_EVENT_EMPTY (WEVT_EVENT){ .id = 0, .created_ns = 0, } typedef struct { EVT_VARIANT *data; - size_t size; - size_t used; + DWORD size; + DWORD used; + DWORD count; } WEVT_VARIANT; typedef struct { @@ -41,6 +46,16 @@ typedef struct { struct provider_meta_handle; +typedef enum __attribute__((packed)) { + WEVT_QUERY_BASIC = (1 << 0), + WEVT_QUERY_EXTENDED = (1 << 1), + WEVT_QUERY_EVENT_DATA = (1 << 2), +} WEVT_QUERY_TYPE; + +#define WEVT_QUERY_RETENTION WEVT_QUERY_BASIC +#define WEVT_QUERY_NORMAL (WEVT_QUERY_BASIC | WEVT_QUERY_EXTENDED) +#define WEVT_QUERY_FTS (WEVT_QUERY_BASIC | WEVT_QUERY_EXTENDED | WEVT_QUERY_EVENT_DATA) + typedef struct wevt_log { struct { DWORD size; @@ -50,13 +65,19 @@ typedef struct wevt_log { EVT_HANDLE hEvent; EVT_HANDLE hQuery; - EVT_HANDLE hRenderContext; - struct provider_meta_handle *publisher; + EVT_HANDLE hRenderSystemContext; + EVT_HANDLE hRenderUserContext; + struct provider_meta_handle *provider; + + WEVT_QUERY_TYPE type; struct { - // temp buffer used for rendering event log messages - // never use directly - WEVT_VARIANT content; + struct { + // temp buffer used for rendering event log messages + // never use directly + WEVT_VARIANT system; + WEVT_VARIANT user; + } raw; // temp buffer used for fetching and converting UNICODE and UTF-8 // every string operation overwrites it, multiple times per event log entry @@ -75,16 +96,19 @@ typedef struct wevt_log { TXT_UTF8 channel; TXT_UTF8 provider; - TXT_UTF8 source; TXT_UTF8 computer; - TXT_UTF8 user; + TXT_UTF8 account; + TXT_UTF8 domain; + TXT_UTF8 sid; - TXT_UTF8 event; + TXT_UTF8 event; // the message to be shown to the user TXT_UTF8 level; TXT_UTF8 keywords; TXT_UTF8 opcode; TXT_UTF8 task; TXT_UTF8 xml; + + BUFFER *event_data; } ops; struct { @@ -102,7 +126,7 @@ typedef struct wevt_log { } WEVT_LOG; -WEVT_LOG *wevt_openlog6(void); +WEVT_LOG *wevt_openlog6(WEVT_QUERY_TYPE type); void wevt_closelog6(WEVT_LOG *log); bool wevt_channel_retention(WEVT_LOG *log, const wchar_t *channel, const wchar_t *query, EVT_RETENTION *retention); @@ -110,12 +134,14 @@ bool wevt_channel_retention(WEVT_LOG *log, const wchar_t *channel, const wchar_t bool wevt_query(WEVT_LOG *log, LPCWSTR channel, LPCWSTR query, EVT_QUERY_FLAGS direction); void wevt_query_done(WEVT_LOG *log); -bool wevt_get_next_event(WEVT_LOG *log, WEVT_EVENT *ev, bool full); +bool wevt_get_next_event(WEVT_LOG *log, WEVT_EVENT *ev); + +bool EvtFormatMessage_utf16(TXT_UNICODE *dst, EVT_HANDLE hMetadata, EVT_HANDLE hEvent, DWORD dwMessageId, EVT_FORMAT_MESSAGE_FLAGS flags); -bool wevt_get_message_unicode(TXT_UNICODE *dst, EVT_HANDLE hMetadata, EVT_HANDLE hEvent, DWORD dwMessageId, EVT_FORMAT_MESSAGE_FLAGS flags); +bool EvtFormatMessage_Event_utf8(TXT_UNICODE *tmp, struct provider_meta_handle *p, EVT_HANDLE hEvent, TXT_UTF8 *dst); +bool EvtFormatMessage_Xml_utf8(TXT_UNICODE *tmp, struct provider_meta_handle *p, EVT_HANDLE hEvent, TXT_UTF8 *dst); -bool wevt_get_event_utf8(WEVT_LOG *log, struct provider_meta_handle *p, EVT_HANDLE hEvent, TXT_UTF8 *dst); -bool wevt_get_xml_utf8(WEVT_LOG *log, struct provider_meta_handle *p, EVT_HANDLE hEvent, TXT_UTF8 *dst); +void evt_variant_to_buffer(BUFFER *b, EVT_VARIANT *ev, const char *separator); static inline void wevt_variant_cleanup(WEVT_VARIANT *v) { freez(v->data); @@ -130,6 +156,10 @@ static inline void wevt_variant_resize(WEVT_VARIANT *v, size_t required_size) { v->data = mallocz(v->size); } +static inline void wevt_variant_count_from_used(WEVT_VARIANT *v) { + v->count = v->used / sizeof(*v->data); +} + static inline uint8_t wevt_field_get_uint8(EVT_VARIANT *ev) { if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) return 0; @@ -180,16 +210,17 @@ static inline bool wevt_field_get_string_utf8(EVT_VARIANT *ev, TXT_UTF8 *dst) { return wevt_str_wchar_to_utf8(dst, ev->StringVal, -1); } -bool wevt_convert_user_id_to_name(PSID sid, TXT_UTF8 *dst); - -static inline bool wevt_field_get_sid(EVT_VARIANT *ev, TXT_UTF8 *dst) { +bool wevt_convert_user_id_to_name(PSID sid, TXT_UTF8 *dst_account, TXT_UTF8 *dst_domain, TXT_UTF8 *dst_sid_str); +static inline bool wevt_field_get_sid(EVT_VARIANT *ev, TXT_UTF8 *dst_account, TXT_UTF8 *dst_domain, TXT_UTF8 *dst_sid_str) { if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) { - wevt_utf8_empty(dst); + wevt_utf8_empty(dst_account); + wevt_utf8_empty(dst_domain); + wevt_utf8_empty(dst_sid_str); return false; } fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeSid); - return wevt_convert_user_id_to_name(ev->SidVal, dst); + return wevt_convert_user_id_to_name(ev->SidVal, dst_account, dst_domain, dst_sid_str); } static inline uint64_t wevt_field_get_filetime_to_ns(EVT_VARIANT *ev) { @@ -222,42 +253,42 @@ static inline bool wevt_get_uuid_by_type(EVT_VARIANT *ev, ND_UUID *dst) { } // https://learn.microsoft.com/en-us/windows/win32/wes/defining-severity-levels -static inline bool is_valid_publisher_level(uint64_t level, bool strict) { +static inline bool is_valid_provider_level(uint64_t level, bool strict) { if(strict) - // when checking if the name is publisher independent + // when checking if the name is provider independent return level >= 16 && level <= 255; else - // when checking acceptable values in publisher manifests + // when checking acceptable values in provider manifests return level <= 255; } // https://learn.microsoft.com/en-us/windows/win32/wes/defining-tasks-and-opcodes -static inline bool is_valid_publisher_opcode(uint64_t opcode, bool strict) { +static inline bool is_valid_provider_opcode(uint64_t opcode, bool strict) { if(strict) - // when checking if the name is publisher independent + // when checking if the name is provider independent return opcode >= 10 && opcode <= 239; else - // when checking acceptable values in publisher manifests + // when checking acceptable values in provider manifests return opcode <= 255; } // https://learn.microsoft.com/en-us/windows/win32/wes/defining-tasks-and-opcodes -static inline bool is_valid_publisher_task(uint64_t task, bool strict) { +static inline bool is_valid_provider_task(uint64_t task, bool strict) { if(strict) - // when checking if the name is publisher independent + // when checking if the name is provider independent return task > 0 && task <= 0xFFFF; else - // when checking acceptable values in publisher manifests + // when checking acceptable values in provider manifests return task <= 0xFFFF; } // https://learn.microsoft.com/en-us/windows/win32/wes/defining-keywords-used-to-classify-types-of-events -static inline bool is_valid_publisher_keywords(uint64_t keyword, bool strict) { +static inline bool is_valid_provider_keyword(uint64_t keyword, bool strict) { if(strict) - // when checking if the name is publisher independent + // when checking if the name is provider independent return keyword > 0 && keyword <= 0x0000FFFFFFFFFFFF; else - // when checking acceptable values in publisher manifests + // when checking acceptable values in provider manifests return true; } diff --git a/src/collectors/windows-events.plugin/windows-events-sid.c b/src/collectors/windows-events.plugin/windows-events-sid.c index 6aefd22698b50f..8f92fb495d169c 100644 --- a/src/collectors/windows-events.plugin/windows-events-sid.c +++ b/src/collectors/windows-events.plugin/windows-events-sid.c @@ -9,8 +9,18 @@ typedef struct { } SID_KEY; typedef struct { - const char *user; - size_t user_len; + // IMPORTANT: + // This is malloc'd ! You have to manually set fields to zero. + + const char *account; + const char *domain; + const char *full; + const char *sid_str; + + uint32_t account_len; + uint32_t domain_len; + uint32_t full_len; + uint32_t sid_str_len; // this needs to be last, because of its variable size SID_KEY key; @@ -43,48 +53,46 @@ void sid_cache_init(void) { simple_hashtable_init_SID(&sid_globals.hashtable, 100); } -static bool update_user(SID_VALUE *found, TXT_UTF8 *dst) { - if(found && found->user) { - txt_utf8_resize(dst, found->user_len + 1, false); - memcpy(dst->data, found->user, found->user_len + 1); - dst->used = found->user_len + 1; - return true; - } - - txt_utf8_resize(dst, 1, false); - dst->data[0] = '\0'; - dst->used = 1; - return false; -} - -static void lookup_user(PSID *sid, TXT_UTF8 *dst) { +static void lookup_user(SID_VALUE *sv) { static __thread wchar_t account_unicode[256]; static __thread wchar_t domain_unicode[256]; + static __thread char tmp[512 + 2]; + DWORD account_name_size = sizeof(account_unicode) / sizeof(account_unicode[0]); DWORD domain_name_size = sizeof(domain_unicode) / sizeof(domain_unicode[0]); SID_NAME_USE sid_type; - txt_utf8_resize(dst, 1024, false); - - if (LookupAccountSidW(NULL, sid, account_unicode, &account_name_size, domain_unicode, &domain_name_size, &sid_type)) { - const char *user = account2utf8(account_unicode); + if (LookupAccountSidW(NULL, sv->key.sid, account_unicode, &account_name_size, domain_unicode, &domain_name_size, &sid_type)) { + const char *account = account2utf8(account_unicode); const char *domain = domain2utf8(domain_unicode); - dst->used = snprintfz(dst->data, dst->size, "%s\\%s", domain, user) + 1; + snprintfz(tmp, sizeof(tmp), "%s\\%s", domain, account); + sv->domain = strdupz(domain); sv->domain_len = strlen(sv->domain); + sv->account = strdupz(account); sv->account_len = strlen(sv->account); + sv->full = strdupz(tmp); sv->full_len = strlen(sv->full); } else { - wchar_t *sid_string = NULL; - if (ConvertSidToStringSidW(sid, &sid_string)) { - const char *user = account2utf8(sid_string); - dst->used = snprintfz(dst->data, dst->size, "%s", user) + 1; - } - else - dst->used = snprintfz(dst->data, dst->size, "[invalid]") + 1; + sv->domain = NULL; + sv->account = NULL; + sv->full = NULL; + sv->domain_len = 0; + sv->account_len = 0; + sv->full_len = 0; + } + + wchar_t *sid_string = NULL; + if (ConvertSidToStringSidW(sv->key.sid, &sid_string)) { + sv->sid_str = strdupz(account2utf8(sid_string)); + sv->sid_str_len = strlen(sv->sid_str); + } + else { + sv->sid_str = NULL; + sv->sid_str_len = 0; } } -bool wevt_convert_user_id_to_name(PSID sid, TXT_UTF8 *dst) { +static SID_VALUE *lookup_or_convert_user_id_to_name_lookup(PSID sid) { if(!sid || !IsValidSid(sid)) - return update_user(NULL, dst); + return NULL; size_t size = GetLengthSid(sid); @@ -98,21 +106,76 @@ bool wevt_convert_user_id_to_name(PSID sid, TXT_UTF8 *dst) { spinlock_lock(&sid_globals.spinlock); SID_VALUE *found = simple_hashtable_get_SID(&sid_globals.hashtable, &tmp->key, tmp_key_size); spinlock_unlock(&sid_globals.spinlock); - if(found) return update_user(found, dst); + if(found) return found; // allocate the SID_VALUE found = mallocz(tmp_size); memcpy(found, buf, tmp_size); - // lookup the user - lookup_user(sid, dst); - found->user = strdupz(dst->data); - found->user_len = dst->used - 1; + lookup_user(found); // add it to the cache spinlock_lock(&sid_globals.spinlock); simple_hashtable_set_SID(&sid_globals.hashtable, &found->key, tmp_key_size, found); spinlock_unlock(&sid_globals.spinlock); - return update_user(found, dst); + return found; +} + +bool wevt_convert_user_id_to_name(PSID sid, TXT_UTF8 *dst_account, TXT_UTF8 *dst_domain, TXT_UTF8 *dst_sid_str) { + SID_VALUE *found = lookup_or_convert_user_id_to_name_lookup(sid); + + if(found) { + if (found->account) { + txt_utf8_resize(dst_account, found->account_len + 1, false); + memcpy(dst_account->data, found->account, found->account_len + 1); + dst_account->used = found->account_len + 1; + } + else wevt_utf8_empty(dst_account); + + if (found->domain) { + txt_utf8_resize(dst_domain, found->domain_len + 1, false); + memcpy(dst_domain->data, found->domain, found->domain_len + 1); + dst_domain->used = found->domain_len + 1; + } + else wevt_utf8_empty(dst_domain); + + if (found->sid_str) { + txt_utf8_resize(dst_sid_str, found->sid_str_len + 1, false); + memcpy(dst_sid_str->data, found->sid_str, found->sid_str_len + 1); + dst_sid_str->used = found->sid_str_len + 1; + } + else wevt_utf8_empty(dst_sid_str); + + return true; + } + + wevt_utf8_empty(dst_account); + wevt_utf8_empty(dst_domain); + wevt_utf8_empty(dst_sid_str); + return false; +} + +bool buffer_sid_to_sid_str_and_name(PSID sid, BUFFER *dst, const char *prefix) { + SID_VALUE *found = lookup_or_convert_user_id_to_name_lookup(sid); + size_t added = 0; + + if(found) { + if (found->full) { + if (prefix && *prefix) + buffer_strcat(dst, prefix); + + buffer_fast_strcat(dst, found->full, found->full_len); + added++; + } + if (found->sid_str) { + if (prefix && *prefix) + buffer_strcat(dst, prefix); + + buffer_fast_strcat(dst, found->sid_str, found->sid_str_len); + added++; + } + } + + return added > 0; } diff --git a/src/collectors/windows-events.plugin/windows-events-sid.h b/src/collectors/windows-events.plugin/windows-events-sid.h index 8723ade58c817c..aca8e77e34c1fe 100644 --- a/src/collectors/windows-events.plugin/windows-events-sid.h +++ b/src/collectors/windows-events.plugin/windows-events-sid.h @@ -6,7 +6,8 @@ #include "windows-events.h" struct wevt_log; -bool wevt_convert_user_id_to_name(PSID sid, TXT_UTF8 *dst); +bool wevt_convert_user_id_to_name(PSID sid, TXT_UTF8 *dst_account, TXT_UTF8 *dst_domain, TXT_UTF8 *dst_sid_str); +bool buffer_sid_to_sid_str_and_name(PSID sid, BUFFER *dst, const char *prefix); void sid_cache_init(void); #endif //NETDATA_WINDOWS_EVENTS_SID_H diff --git a/src/collectors/windows-events.plugin/windows-events-sources.c b/src/collectors/windows-events.plugin/windows-events-sources.c index 204e01ca9a3fd7..b931ed059f451b 100644 --- a/src/collectors/windows-events.plugin/windows-events-sources.c +++ b/src/collectors/windows-events.plugin/windows-events-sources.c @@ -137,41 +137,32 @@ // } //}; +ENUM_STR_MAP_DEFINE(WEVT_SOURCE_TYPE) = { + { .id = WEVTS_ALL, .name = WEVT_SOURCE_ALL_NAME }, + { .id = WEVTS_ADMIN, .name = WEVT_SOURCE_ALL_ADMIN_NAME }, + { .id = WEVTS_OPERATIONAL, .name = WEVT_SOURCE_ALL_OPERATIONAL_NAME }, + { .id = WEVTS_ANALYTIC, .name = WEVT_SOURCE_ALL_ANALYTIC_NAME }, + { .id = WEVTS_DEBUG, .name = WEVT_SOURCE_ALL_DEBUG_NAME }, + { .id = WEVTS_WINDOWS, .name = WEVT_SOURCE_ALL_WINDOWS_NAME }, + { .id = WEVTS_ENABLED, .name = WEVT_SOURCE_ALL_ENABLED_NAME }, + { .id = WEVTS_DISABLED, .name = WEVT_SOURCE_ALL_DISABLED_NAME }, + { .id = WEVTS_FORWARDED, .name = WEVT_SOURCE_ALL_FORWARDED_NAME }, + { .id = WEVTS_CLASSIC, .name = WEVT_SOURCE_ALL_CLASSIC_NAME }, + { .id = WEVTS_BACKUP_MODE, .name = WEVT_SOURCE_ALL_BACKUP_MODE_NAME }, + { .id = WEVTS_OVERWRITE_MODE, .name = WEVT_SOURCE_ALL_OVERWRITE_MODE_NAME }, + { .id = WEVTS_STOP_WHEN_FULL_MODE, .name = WEVT_SOURCE_ALL_STOP_WHEN_FULL_MODE_NAME }, + { .id = WEVTS_RETAIN_AND_BACKUP_MODE, .name = WEVT_SOURCE_ALL_RETAIN_AND_BACKUP_MODE_NAME }, + + // terminator + { . id = 0, .name = NULL } +}; + +BITMAP_STR_DEFINE_FUNCTIONS(WEVT_SOURCE_TYPE, WEVTS_NONE, ""); + DICTIONARY *wevt_sources = NULL; DICTIONARY *used_hashes_registry = NULL; static usec_t wevt_session = 0; -WEVT_SOURCE_TYPE wevt_internal_source_type(const char *value) { - if(strcmp(value, WEVT_SOURCE_ALL_NAME) == 0) - return WEVTS_ALL; - - if(strcmp(value, WEVT_SOURCE_ALL_ADMIN_NAME) == 0) - return WEVTS_ADMIN; - - if(strcmp(value, WEVT_SOURCE_ALL_OPERATIONAL_NAME) == 0) - return WEVTS_OPERATIONAL; - - if(strcmp(value, WEVT_SOURCE_ALL_ANALYTIC_NAME) == 0) - return WEVTS_ANALYTIC; - - if(strcmp(value, WEVT_SOURCE_ALL_DEBUG_NAME) == 0) - return WEVTS_DEBUG; - - if(strcmp(value, WEVT_SOURCE_ALL_DIAGNOSTIC_NAME) == 0) - return WEVTS_DIAGNOSTIC; - - if(strcmp(value, WEVT_SOURCE_ALL_TRACING_NAME) == 0) - return WEVTS_TRACING; - - if(strcmp(value, WEVT_SOURCE_ALL_PERFORMANCE_NAME) == 0) - return WEVTS_PERFORMANCE; - - if(strcmp(value, WEVT_SOURCE_ALL_WINDOWS_NAME) == 0) - return WEVTS_WINDOWS; - - return WEVTS_NONE; -} - void wevt_sources_del_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { LOGS_QUERY_SOURCE *src = value; freez((void *)src->fullname); @@ -265,7 +256,14 @@ int wevt_sources_dict_items_forward_compar(const void *a, const void *b) { // -------------------------------------------------------------------------------------------------------------------- +typedef enum { + wevt_source_type_internal, + wevt_source_type_provider, + wevt_source_type_channel, +} wevt_source_type; + struct wevt_source { + wevt_source_type type; usec_t first_ut; usec_t last_ut; size_t count; @@ -279,6 +277,15 @@ static int wevt_source_to_json_array_cb(const DICTIONARY_ITEM *item, void *entry const char *name = dictionary_acquired_item_name(item); + if(s->count == 1 && strncmp(name, WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX, sizeof(WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX) - 1) == 0) + // do not include "All-Of-X" when there is only 1 channel + return 0; + + bool default_selected = (s->type == wevt_source_type_channel); + if(default_selected && (strcmp(name, "NetdataWEL") == 0 || strcmp(name, "Netdata/Access") == 0)) + // do not select Netdata Access logs by default + default_selected = false; + buffer_json_add_array_item_object(wb); { char size_for_humans[128]; @@ -300,6 +307,7 @@ static int wevt_source_to_json_array_cb(const DICTIONARY_ITEM *item, void *entry buffer_json_member_add_string(wb, "name", name); buffer_json_member_add_string(wb, "pill", size_for_humans); buffer_json_member_add_string(wb, "info", info); + buffer_json_member_add_boolean(wb, "default_selected", default_selected); } buffer_json_object_close(wb); // options object @@ -337,61 +345,142 @@ void wevt_sources_to_json_array(BUFFER *wb) { t.size = src->size; t.entries = src->entries; - dictionary_set(dict, WEVT_SOURCE_ALL_NAME, &t, sizeof(t)); - - if(src->source_type & WEVTS_ADMIN) - dictionary_set(dict, WEVT_SOURCE_ALL_ADMIN_NAME, &t, sizeof(t)); - - if(src->source_type & WEVTS_OPERATIONAL) - dictionary_set(dict, WEVT_SOURCE_ALL_OPERATIONAL_NAME, &t, sizeof(t)); - - if(src->source_type & WEVTS_ANALYTIC) - dictionary_set(dict, WEVT_SOURCE_ALL_ANALYTIC_NAME, &t, sizeof(t)); - - if(src->source_type & WEVTS_DEBUG) - dictionary_set(dict, WEVT_SOURCE_ALL_DEBUG_NAME, &t, sizeof(t)); - - if(src->source_type & WEVTS_DIAGNOSTIC) - dictionary_set(dict, WEVT_SOURCE_ALL_DIAGNOSTIC_NAME, &t, sizeof(t)); - - if(src->source_type & WEVTS_TRACING) - dictionary_set(dict, WEVT_SOURCE_ALL_TRACING_NAME, &t, sizeof(t)); - - if(src->source_type & WEVTS_PERFORMANCE) - dictionary_set(dict, WEVT_SOURCE_ALL_PERFORMANCE_NAME, &t, sizeof(t)); + src->source_type |= WEVTS_ALL; + t.type = wevt_source_type_internal; + for(size_t i = 0; WEVT_SOURCE_TYPE_names[i].name ;i++) { + if(src->source_type & WEVT_SOURCE_TYPE_names[i].id) + dictionary_set(dict, WEVT_SOURCE_TYPE_names[i].name, &t, sizeof(t)); + } - if(src->source_type & WEVTS_WINDOWS) - dictionary_set(dict, WEVT_SOURCE_ALL_WINDOWS_NAME, &t, sizeof(t)); + if(src->provider) { + t.type = wevt_source_type_provider; + dictionary_set(dict, string2str(src->provider), &t, sizeof(t)); + } - if(src->source) + if(src->source) { + t.type = wevt_source_type_channel; dictionary_set(dict, string2str(src->source), &t, sizeof(t)); + } } dfe_done(jf); dictionary_sorted_walkthrough_read(dict, wevt_source_to_json_array_cb, wb); } -static bool check_and_remove_suffix(char *name, size_t len, const char *suffix) { - char s[strlen(suffix) + 2]; - s[0] = '/'; - memcpy(&s[1], suffix, sizeof(s) - 1); - size_t slen = sizeof(s) - 1; +static bool ndEvtGetChannelConfigProperty(EVT_HANDLE hChannelConfig, WEVT_VARIANT *pr, EVT_CHANNEL_CONFIG_PROPERTY_ID id) { + if (!EvtGetChannelConfigProperty(hChannelConfig, id, 0, pr->size, pr->data, &pr->used)) { + DWORD status = GetLastError(); + if (ERROR_INSUFFICIENT_BUFFER == status) { + wevt_variant_resize(pr, pr->used); + if(!EvtGetChannelConfigProperty(hChannelConfig, id, 0, pr->size, pr->data, &pr->used)) { + pr->used = 0; + pr->count = 0; + return false; + } + } + } - if(slen + 1 >= len) return false; + wevt_variant_count_from_used(pr); + return true; +} - char *match = &name[len - slen]; - if(strcasecmp(match, s) == 0) { - *match = '\0'; - return true; +WEVT_SOURCE_TYPE categorize_channel(const wchar_t *channel_path, const char **provider, WEVT_VARIANT *property) { + EVT_HANDLE hChannelConfig = NULL; + WEVT_SOURCE_TYPE result = WEVTS_ALL; + + // Open the channel configuration + hChannelConfig = EvtOpenChannelConfig(NULL, channel_path, 0); + if (!hChannelConfig) + goto cleanup; + + if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelConfigType) & + property->count && + property->data[0].Type == EvtVarTypeUInt32) { + switch (property->data[0].UInt32Val) { + case EvtChannelTypeAdmin: + result |= WEVTS_ADMIN; + break; + + case EvtChannelTypeOperational: + result |= WEVTS_OPERATIONAL; + break; + + case EvtChannelTypeAnalytic: + result |= WEVTS_ANALYTIC; + break; + + case EvtChannelTypeDebug: + result |= WEVTS_DEBUG; + break; + + default: + break; + } } - s[0] = '-'; - if(strcasecmp(match, s) == 0) { - *match = '\0'; - return true; + if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelConfigClassicEventlog) && + property->count && + property->data[0].Type == EvtVarTypeBoolean && + property->data[0].BooleanVal) + result |= WEVTS_CLASSIC; + + if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelConfigOwningPublisher) && + property->count && + property->data[0].Type == EvtVarTypeString) { + *provider = provider2utf8(property->data[0].StringVal); + if(wcscasecmp(property->data[0].StringVal, L"Microsoft-Windows-EventCollector") == 0) + result |= WEVTS_FORWARDED; + } + else + *provider = NULL; + + if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelConfigEnabled) && + property->count && + property->data[0].Type == EvtVarTypeBoolean) { + if(property->data[0].BooleanVal) + result |= WEVTS_ENABLED; + else + result |= WEVTS_DISABLED; } - return false; + bool got_retention = false; + bool retained = false; + if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelLoggingConfigRetention) && + property->count && + property->data[0].Type == EvtVarTypeBoolean) { + got_retention = true; + retained = property->data[0].BooleanVal; + } + + bool got_auto_backup = false; + bool auto_backup = false; + if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelLoggingConfigAutoBackup) && + property->count && + property->data[0].Type == EvtVarTypeBoolean) { + got_auto_backup = true; + auto_backup = property->data[0].BooleanVal; + } + + if(got_retention && got_auto_backup) { + if(!retained) { + if(auto_backup) + result |= WEVTS_BACKUP_MODE; + else + result |= WEVTS_OVERWRITE_MODE; + } + else { + if(auto_backup) + result |= WEVTS_STOP_WHEN_FULL_MODE; + else + result |= WEVTS_RETAIN_AND_BACKUP_MODE; + } + } + +cleanup: + if (hChannelConfig) + EvtClose(hChannelConfig); + + return result; } void wevt_sources_scan(void) { @@ -400,8 +489,9 @@ void wevt_sources_scan(void) { EVT_HANDLE hChannelEnum = NULL; if(spinlock_trylock(&spinlock)) { - const usec_t now_monotonic_ut = now_monotonic_usec(); + const usec_t started_ut = now_monotonic_usec(); + WEVT_VARIANT property = { 0 }; DWORD dwChannelBufferSize = 0; DWORD dwChannelBufferUsed = 0; DWORD status = ERROR_SUCCESS; @@ -414,7 +504,7 @@ void wevt_sources_scan(void) { goto cleanup; } - WEVT_LOG *log = wevt_openlog6(); + WEVT_LOG *log = wevt_openlog6(WEVT_QUERY_RETENTION); if(!log) goto cleanup; while (true) { @@ -438,32 +528,26 @@ void wevt_sources_scan(void) { if(!wevt_channel_retention(log, channel, NULL, &retention)) continue; + LOGS_QUERY_SOURCE *found = dictionary_get(wevt_sources, channel2utf8(channel)); + if(found) { + // we just need to update its retention + + found->last_scan_monotonic_ut = now_monotonic_usec(); + found->msg_first_id = retention.first_event.id; + found->msg_last_id = retention.last_event.id; + found->msg_first_ut = retention.first_event.created_ns / NSEC_PER_USEC; + found->msg_last_ut = retention.last_event.created_ns / NSEC_PER_USEC; + found->size = retention.size_bytes; + continue; + } + const char *name = channel2utf8(channel); const char *fullname = strdupz(name); + const char *provider; - WEVT_SOURCE_TYPE sources = WEVTS_ALL; - size_t len = strlen(fullname); - if(check_and_remove_suffix((char *)name, len, "Admin")) - sources |= WEVTS_ADMIN; - else if(check_and_remove_suffix((char *)name, len, "Operational")) - sources |= WEVTS_OPERATIONAL; - else if(check_and_remove_suffix((char *)name, len, "Analytic")) - sources |= WEVTS_ANALYTIC; - else if(check_and_remove_suffix((char *)name, len, "Debug") || - check_and_remove_suffix((char *)name, len, "Verbose")) - sources |= WEVTS_DEBUG; - else if(check_and_remove_suffix((char *)name, len, "Diagnostic")) - sources |= WEVTS_DIAGNOSTIC; - else if(check_and_remove_suffix((char *)name, len, "Trace") || - check_and_remove_suffix((char *)name, len, "Tracing")) - sources |= WEVTS_TRACING; - else if(check_and_remove_suffix((char *)name, len, "Performance") || - check_and_remove_suffix((char *)name, len, "Perf")) - sources |= WEVTS_PERFORMANCE; - + WEVT_SOURCE_TYPE sources = categorize_channel(channel, &provider, &property); char *slash = strchr(name, '/'); - if(slash) - *slash = '\0'; + if(slash) *slash = '\0'; if(strcasecmp(name, "Application") == 0) sources |= WEVTS_WINDOWS; @@ -485,9 +569,27 @@ void wevt_sources_scan(void) { .msg_last_ut = retention.last_event.created_ns / NSEC_PER_USEC, .size = retention.size_bytes, .source_type = sources, - .source = string_strdupz(name), + .source = string_strdupz(fullname), }; + if(strncmp(fullname, "Netdata", 7) == 0) + // WEL based providers of Netdata are named NetdataX + provider = "Netdata"; + + if(provider && *provider) { + char buf[sizeof(WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX) + strlen(provider)]; // sizeof() includes terminator + snprintf(buf, sizeof(buf), WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX "%s", provider); + + if(trim_all(buf) != NULL) { + for (size_t i = 0; i < sizeof(buf) - 1; i++) { + // remove character that may interfere with our parsing + if (isspace((uint8_t) buf[i]) || buf[i] == '%' || buf[i] == '+' || buf[i] == '|' || buf[i] == ':') + buf[i] = '_'; + } + src.provider = string_strdupz(buf); + } + } + dictionary_set(wevt_sources, src.fullname, &src, sizeof(src)); } @@ -519,13 +621,21 @@ void wevt_sources_scan(void) { LOGS_QUERY_SOURCE *src; dfe_start_write(wevt_sources, src) { - if(src->last_scan_monotonic_ut < now_monotonic_ut) + if(src->last_scan_monotonic_ut < started_ut) { + src->msg_first_id = 0; + src->msg_last_id = 0; + src->msg_first_ut = 0; + src->msg_last_ut = 0; + src->size = 0; dictionary_del(wevt_sources, src->fullname); + } } dfe_done(src); dictionary_garbage_collect(wevt_sources); spinlock_unlock(&spinlock); + + wevt_variant_cleanup(&property); } cleanup: diff --git a/src/collectors/windows-events.plugin/windows-events-sources.h b/src/collectors/windows-events.plugin/windows-events-sources.h index 6ee6ee4c03d9d9..4ad4880d7df7f1 100644 --- a/src/collectors/windows-events.plugin/windows-events-sources.h +++ b/src/collectors/windows-events.plugin/windows-events-sources.h @@ -6,18 +6,42 @@ #include "libnetdata/libnetdata.h" typedef enum { - WEVTS_NONE = 0, - WEVTS_ALL = (1 << 0), - WEVTS_ADMIN = (1 << 1), - WEVTS_OPERATIONAL = (1 << 2), - WEVTS_ANALYTIC = (1 << 3), - WEVTS_DEBUG = (1 << 4), - WEVTS_DIAGNOSTIC = (1 << 5), - WEVTS_TRACING = (1 << 6), - WEVTS_PERFORMANCE = (1 << 7), - WEVTS_WINDOWS = (1 << 8), + WEVTS_NONE = 0, + WEVTS_ALL = (1 << 0), + WEVTS_ADMIN = (1 << 1), + WEVTS_OPERATIONAL = (1 << 2), + WEVTS_ANALYTIC = (1 << 3), + WEVTS_DEBUG = (1 << 4), + WEVTS_WINDOWS = (1 << 5), + WEVTS_ENABLED = (1 << 6), + WEVTS_DISABLED = (1 << 7), + WEVTS_FORWARDED = (1 << 8), + WEVTS_CLASSIC = (1 << 9), + WEVTS_BACKUP_MODE = (1 << 10), + WEVTS_OVERWRITE_MODE = (1 << 11), + WEVTS_STOP_WHEN_FULL_MODE = (1 << 12), + WEVTS_RETAIN_AND_BACKUP_MODE = (1 << 13), } WEVT_SOURCE_TYPE; +BITMAP_STR_DEFINE_FUNCTIONS_EXTERN(WEVT_SOURCE_TYPE) + +#define WEVT_SOURCE_ALL_NAME "All" +#define WEVT_SOURCE_ALL_ADMIN_NAME "All-Admin" +#define WEVT_SOURCE_ALL_OPERATIONAL_NAME "All-Operational" +#define WEVT_SOURCE_ALL_ANALYTIC_NAME "All-Analytic" +#define WEVT_SOURCE_ALL_DEBUG_NAME "All-Debug" +#define WEVT_SOURCE_ALL_WINDOWS_NAME "All-Windows" +#define WEVT_SOURCE_ALL_ENABLED_NAME "All-Enabled" +#define WEVT_SOURCE_ALL_DISABLED_NAME "All-Disabled" +#define WEVT_SOURCE_ALL_FORWARDED_NAME "All-Forwarded" +#define WEVT_SOURCE_ALL_CLASSIC_NAME "All-Classic" +#define WEVT_SOURCE_ALL_BACKUP_MODE_NAME "All-In-Backup-Mode" +#define WEVT_SOURCE_ALL_OVERWRITE_MODE_NAME "All-In-Overwrite-Mode" +#define WEVT_SOURCE_ALL_STOP_WHEN_FULL_MODE_NAME "All-In-StopWhenFull-Mode" +#define WEVT_SOURCE_ALL_RETAIN_AND_BACKUP_MODE_NAME "All-In-RetainAndBackup-Mode" + +#define WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX "All-Of-" + typedef struct { const char *fullname; size_t fullname_len; @@ -25,6 +49,7 @@ typedef struct { const wchar_t *custom_query; STRING *source; + STRING *provider; WEVT_SOURCE_TYPE source_type; usec_t msg_first_ut; usec_t msg_last_ut; @@ -40,16 +65,6 @@ typedef struct { extern DICTIONARY *wevt_sources; extern DICTIONARY *used_hashes_registry; -#define WEVT_SOURCE_ALL_NAME "All" -#define WEVT_SOURCE_ALL_ADMIN_NAME "All-Admin" -#define WEVT_SOURCE_ALL_OPERATIONAL_NAME "All-Operational" -#define WEVT_SOURCE_ALL_ANALYTIC_NAME "All-Analytic" -#define WEVT_SOURCE_ALL_DEBUG_NAME "All-Debug" -#define WEVT_SOURCE_ALL_DIAGNOSTIC_NAME "All-Diagnostic" -#define WEVT_SOURCE_ALL_TRACING_NAME "All-Tracing" -#define WEVT_SOURCE_ALL_PERFORMANCE_NAME "All-Performance" -#define WEVT_SOURCE_ALL_WINDOWS_NAME "All-Windows" - void wevt_sources_init(void); void wevt_sources_scan(void); void buffer_json_wevt_versions(BUFFER *wb); diff --git a/src/collectors/windows-events.plugin/windows-events-unicode.c b/src/collectors/windows-events.plugin/windows-events-unicode.c index 3208ee774fd55b..f84418dc76ee9c 100644 --- a/src/collectors/windows-events.plugin/windows-events-unicode.c +++ b/src/collectors/windows-events.plugin/windows-events-unicode.c @@ -5,7 +5,7 @@ inline void utf82unicode(wchar_t *dst, size_t dst_size, const char *src) { if (src) { // Convert from UTF-8 to wide char (UTF-16) - if (MultiByteToWideChar(CP_UTF8, 0, src, -1, dst, (int)dst_size) == 0) + if (utf8_to_utf16(dst, dst_size, src, -1) == 0) wcsncpy(dst, L"[failed conv.]", dst_size - 1); } else @@ -41,7 +41,7 @@ char *unicode2utf8_strdupz(const wchar_t *src, size_t *utf8_len) { wchar_t *channel2unicode(const char *utf8str) { static __thread wchar_t buffer[1024]; - utf82unicode(buffer, sizeof(buffer) / sizeof(wchar_t), utf8str); + utf82unicode(buffer, _countof(buffer), utf8str); return buffer; } @@ -69,6 +69,12 @@ char *query2utf8(const wchar_t *query) { return buffer; } +char *provider2utf8(const wchar_t *provider) { + static __thread char buffer[256]; + unicode2utf8(buffer, sizeof(buffer), provider); + return buffer; +} + bool wevt_str_wchar_to_utf8(TXT_UTF8 *dst, const wchar_t *src, int src_len_with_null) { if(!src || !src_len_with_null) goto cleanup; diff --git a/src/collectors/windows-events.plugin/windows-events-unicode.h b/src/collectors/windows-events.plugin/windows-events-unicode.h index fe11d04a15f909..fa2f4a49ebb375 100644 --- a/src/collectors/windows-events.plugin/windows-events-unicode.h +++ b/src/collectors/windows-events.plugin/windows-events-unicode.h @@ -9,7 +9,7 @@ typedef enum __attribute__((packed)) { TXT_SOURCE_UNKNOWN = 0, - TXT_SOURCE_PUBLISHER, + TXT_SOURCE_PROVIDER, TXT_SOURCE_FIELD_CACHE, TXT_SOURCE_EVENT_LOG, TXT_SOURCE_HARDCODED, @@ -177,6 +177,7 @@ char *channel2utf8(const wchar_t *channel); wchar_t *channel2unicode(const char *utf8str); char *query2utf8(const wchar_t *query); +char *provider2utf8(const wchar_t *provider); char *unicode2utf8_strdupz(const wchar_t *src, size_t *utf8_len); diff --git a/src/collectors/windows-events.plugin/windows-events.c b/src/collectors/windows-events.plugin/windows-events.c index 24f8f59a597a97..14ca9d6ceab408 100644 --- a/src/collectors/windows-events.plugin/windows-events.c +++ b/src/collectors/windows-events.plugin/windows-events.c @@ -18,14 +18,17 @@ static bool plugin_should_exit = false; #define WEVT_KEYS_INCLUDED_IN_FACETS \ "|" WEVT_FIELD_COMPUTER \ "|" WEVT_FIELD_PROVIDER \ - "|" WEVT_FIELD_SOURCE \ "|" WEVT_FIELD_LEVEL \ "|" WEVT_FIELD_KEYWORDS \ "|" WEVT_FIELD_OPCODE \ "|" WEVT_FIELD_TASK \ - "|" WEVT_FIELD_USER \ + "|" WEVT_FIELD_ACCOUNT \ + "|" WEVT_FIELD_DOMAIN \ + "|" WEVT_FIELD_SID \ "" +#define query_has_fts(lqs) ((lqs)->rq.query != NULL) + static inline WEVT_QUERY_STATUS check_stop(const bool *cancelled, const usec_t *stop_monotonic_ut) { if(cancelled && __atomic_load_n(cancelled, __ATOMIC_RELAXED)) { nd_log(NDLS_COLLECTORS, NDLP_INFO, "Function has been cancelled"); @@ -66,9 +69,10 @@ FACET_ROW_SEVERITY wevt_levelid_to_facet_severity(FACETS *facets __maybe_unused, struct wevt_bin_data { bool rendered; + WEVT_EVENT ev; WEVT_LOG *log; EVT_HANDLE hEvent; - PROVIDER_META_HANDLE *publisher; + PROVIDER_META_HANDLE *provider; }; static void wevt_cleanup_bin_data(void *data) { @@ -77,21 +81,30 @@ static void wevt_cleanup_bin_data(void *data) { if(d->hEvent) EvtClose(d->hEvent); - publisher_release(d->publisher); + provider_release(d->provider); freez(d); } -static inline void wevt_facets_register_bin_data(WEVT_LOG *log, FACETS *facets, WEVT_EVENT *ev __maybe_unused) { +static inline void wevt_facets_register_bin_data(WEVT_LOG *log, FACETS *facets, WEVT_EVENT *ev) { struct wevt_bin_data *d = mallocz(sizeof(struct wevt_bin_data)); +#ifdef NETDATA_INTERNAL_CHECKS + internal_fatal(strcmp(log->ops.provider.data, provider_get_name(log->provider)) != 0, + "Provider name mismatch in data!"); + + internal_fatal(!UUIDeq(ev->provider, provider_get_uuid(log->provider)), + "Provider UUID mismatch in data!"); +#endif + + d->ev = *ev; d->log = log; d->rendered = false; // take the bookmark d->hEvent = log->hEvent; log->hEvent = NULL; - // dup the publisher - d->publisher = publisher_dup(log->publisher); + // dup the provider + d->provider = provider_dup(log->provider); facets_row_bin_data_set(facets, wevt_cleanup_bin_data, d); } @@ -99,12 +112,26 @@ static inline void wevt_facets_register_bin_data(WEVT_LOG *log, FACETS *facets, static void wevt_lazy_loading_event_and_xml(struct wevt_bin_data *d, FACET_ROW *row __maybe_unused) { if(d->rendered) return; - wevt_get_xml_utf8(d->log, d->publisher, d->hEvent, &d->log->ops.xml); - wevt_get_event_utf8(d->log, d->publisher, d->hEvent, &d->log->ops.event); +#ifdef NETDATA_INTERNAL_CHECKS + const FACET_ROW_KEY_VALUE *provider_rkv = dictionary_get(row->dict, WEVT_FIELD_PROVIDER); + internal_fatal(!provider_rkv || strcmp(buffer_tostring(provider_rkv->wb), provider_get_name(d->provider)) != 0, + "Provider of row does not match the bin data associated with it"); + + uint64_t event_record_id = UINT64_MAX; + const FACET_ROW_KEY_VALUE *event_record_id_rkv = dictionary_get(row->dict, WEVT_FIELD_EVENTRECORDID); + if(event_record_id_rkv) + event_record_id = str2uint64_t(buffer_tostring(event_record_id_rkv->wb), NULL); + internal_fatal(event_record_id != d->ev.id, + "Event Record ID of row does not match the bin data associated with it"); +#endif + + // the message needs the xml + EvtFormatMessage_Xml_utf8(&d->log->ops.unicode, d->provider, d->hEvent, &d->log->ops.xml); + EvtFormatMessage_Event_utf8(&d->log->ops.unicode, d->provider, d->hEvent, &d->log->ops.event); d->rendered = true; } -static void wevt_render_xml( +static void wevt_lazy_load_xml( FACETS *facets, BUFFER *json_array, FACET_ROW_KEY_VALUE *rkv __maybe_unused, @@ -121,7 +148,7 @@ static void wevt_render_xml( buffer_json_add_array_item_string(json_array, d->log->ops.xml.data); } -static void wevt_render_message( +static void wevt_lazy_load_message( FACETS *facets, BUFFER *json_array, FACET_ROW_KEY_VALUE *rkv __maybe_unused, @@ -217,18 +244,22 @@ static void wevt_register_fields(LOGS_QUERY_STATUS *lqs) { facets_register_key_name( facets, WEVT_FIELD_CHANNEL, - FACET_KEY_OPTION_FTS); + rq->default_facet | FACET_KEY_OPTION_FTS); facets_register_key_name( facets, WEVT_FIELD_PROVIDER, rq->default_facet | FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS); facets_register_key_name( - facets, WEVT_FIELD_SOURCE, + facets, WEVT_FIELD_ACCOUNT, rq->default_facet | FACET_KEY_OPTION_FTS); facets_register_key_name( - facets, WEVT_FIELD_USER, + facets, WEVT_FIELD_DOMAIN, + rq->default_facet | FACET_KEY_OPTION_FTS); + + facets_register_key_name( + facets, WEVT_FIELD_SID, rq->default_facet | FACET_KEY_OPTION_FTS); facets_register_key_name( @@ -236,6 +267,11 @@ static void wevt_register_fields(LOGS_QUERY_STATUS *lqs) { rq->default_facet | FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS); + facets_register_key_name( + facets, WEVT_FIELD_EVENTS_API, + rq->default_facet | + FACET_KEY_OPTION_FTS); + facets_register_key_name( facets, WEVT_FIELD_LEVEL, rq->default_facet | FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_EXPANDED_FILTER); @@ -277,14 +313,32 @@ static void wevt_register_fields(LOGS_QUERY_STATUS *lqs) { FACET_KEY_OPTION_NONE); facets_register_dynamic_key_name( - facets, WEVT_FIELD_MESSAGE, + facets, + WEVT_FIELD_MESSAGE, FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | FACET_KEY_OPTION_VISIBLE, - wevt_render_message, NULL); + wevt_lazy_load_message, + NULL); facets_register_dynamic_key_name( - facets, WEVT_FIELD_XML, - FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_PRETTY_XML, - wevt_render_xml, NULL); + facets, + WEVT_FIELD_XML, + FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_PRETTY_XML, + wevt_lazy_load_xml, + NULL); + + if(query_has_fts(lqs)) { + facets_register_key_name( + facets, WEVT_FIELD_EVENT_MESSAGE_HIDDEN, + FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_HIDDEN | FACET_KEY_OPTION_NEVER_FACET); + + facets_register_key_name( + facets, WEVT_FIELD_EVENT_XML_HIDDEN, + FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_HIDDEN | FACET_KEY_OPTION_NEVER_FACET); + + facets_register_key_name( + facets, WEVT_FIELD_EVENT_DATA_HIDDEN, + FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_HIDDEN | FACET_KEY_OPTION_NEVER_FACET); + } #ifdef NETDATA_INTERNAL_CHECKS facets_register_key_name( @@ -315,8 +369,8 @@ static const char *source_to_str(TXT_UTF8 *txt) { case TXT_SOURCE_EVENT_LOG: return "event-log"; - case TXT_SOURCE_PUBLISHER: - return "publisher"; + case TXT_SOURCE_PROVIDER: + return "provider"; case TXT_SOURCE_FIELD_CACHE: return "fields-cache"; @@ -327,34 +381,55 @@ static const char *source_to_str(TXT_UTF8 *txt) { } #endif +static const char *events_api_to_str(WEVT_PROVIDER_PLATFORM platform) { + switch(platform) { + case WEVT_PLATFORM_WEL: + return "Windows Event Log"; + + case WEVT_PLATFORM_ETW: + return "Event Tracing for Windows"; + + case WEVT_PLATFORM_TL: + return "TraceLogging"; + + default: + return "Unknown"; + } +} + static inline size_t wevt_process_event(WEVT_LOG *log, FACETS *facets, LOGS_QUERY_SOURCE *src, usec_t *msg_ut __maybe_unused, WEVT_EVENT *ev) { - size_t len, bytes = log->ops.content.used; + static __thread char uuid_str[UUID_STR_LEN]; - if(log->ops.provider.used > 1) { - bytes += log->ops.provider.used * 2; // unicode is double + size_t len, bytes = log->ops.raw.system.used + log->ops.raw.user.used; + + if(!UUIDiszero(ev->provider)) { + uuid_unparse_lower(ev->provider.uuid, uuid_str); facets_add_key_value_length( - facets, WEVT_FIELD_PROVIDER, sizeof(WEVT_FIELD_PROVIDER) - 1, - log->ops.provider.data, log->ops.provider.used - 1); + facets, WEVT_FIELD_PROVIDER_GUID, sizeof(WEVT_FIELD_PROVIDER_GUID) - 1, + uuid_str, sizeof(uuid_str) - 1); } - if(log->ops.source.used > 1) { - bytes += log->ops.source.used * 2; + if(!UUIDiszero(ev->activity_id)) { + uuid_unparse_lower(ev->activity_id.uuid, uuid_str); facets_add_key_value_length( - facets, WEVT_FIELD_SOURCE, sizeof(WEVT_FIELD_SOURCE) - 1, - log->ops.source.data, log->ops.source.used - 1); + facets, WEVT_FIELD_ACTIVITY_ID, sizeof(WEVT_FIELD_ACTIVITY_ID) - 1, + uuid_str, sizeof(uuid_str) - 1); } - if(log->ops.channel.used > 1) { - bytes += log->ops.channel.used * 2; + if(!UUIDiszero(ev->related_activity_id)) { + uuid_unparse_lower(ev->related_activity_id.uuid, uuid_str); facets_add_key_value_length( - facets, WEVT_FIELD_CHANNEL, sizeof(WEVT_FIELD_CHANNEL) - 1, - log->ops.channel.data, log->ops.channel.used - 1); + facets, WEVT_FIELD_RELATED_ACTIVITY_ID, sizeof(WEVT_FIELD_RELATED_ACTIVITY_ID) - 1, + uuid_str, sizeof(uuid_str) - 1); } - else { - bytes += src->fullname_len * 2; + + if(ev->qualifiers) { + static __thread char qualifiers[UINT64_HEX_MAX_LENGTH]; + len = print_uint64_hex(qualifiers, ev->qualifiers); + bytes += len; facets_add_key_value_length( - facets, WEVT_FIELD_CHANNEL, sizeof(WEVT_FIELD_CHANNEL) - 1, - src->fullname, src->fullname_len); + facets, WEVT_FIELD_QUALIFIERS, sizeof(WEVT_FIELD_QUALIFIERS) - 1, + qualifiers, len); } { @@ -366,6 +441,35 @@ static inline size_t wevt_process_event(WEVT_LOG *log, FACETS *facets, LOGS_QUER event_record_id_str, len); } + if(ev->version) { + static __thread char version[UINT64_MAX_LENGTH]; + len = print_uint64(version, ev->version); + bytes += len; + facets_add_key_value_length( + facets, WEVT_FIELD_VERSION, sizeof(WEVT_FIELD_VERSION) - 1, + version, len); + } + + if(log->ops.provider.used > 1) { + bytes += log->ops.provider.used * 2; // unicode is double + facets_add_key_value_length( + facets, WEVT_FIELD_PROVIDER, sizeof(WEVT_FIELD_PROVIDER) - 1, + log->ops.provider.data, log->ops.provider.used - 1); + } + + if(log->ops.channel.used > 1) { + bytes += log->ops.channel.used * 2; + facets_add_key_value_length( + facets, WEVT_FIELD_CHANNEL, sizeof(WEVT_FIELD_CHANNEL) - 1, + log->ops.channel.data, log->ops.channel.used - 1); + } + else { + bytes += src->fullname_len * 2; + facets_add_key_value_length( + facets, WEVT_FIELD_CHANNEL, sizeof(WEVT_FIELD_CHANNEL) - 1, + src->fullname, src->fullname_len); + } + if(log->ops.level.used > 1) { bytes += log->ops.level.used * 2; facets_add_key_value_length( @@ -401,11 +505,28 @@ static inline size_t wevt_process_event(WEVT_LOG *log, FACETS *facets, LOGS_QUER log->ops.task.data, log->ops.task.used - 1); } - if(log->ops.user.used > 1) { - bytes += log->ops.user.used * 2; + if(log->ops.account.used > 1) { + bytes += log->ops.account.used * 2; + facets_add_key_value_length( + facets, + WEVT_FIELD_ACCOUNT, sizeof(WEVT_FIELD_ACCOUNT) - 1, + log->ops.account.data, log->ops.account.used - 1); + } + + if(log->ops.domain.used > 1) { + bytes += log->ops.domain.used * 2; facets_add_key_value_length( - facets, WEVT_FIELD_USER, sizeof(WEVT_FIELD_USER) - 1, - log->ops.user.data, log->ops.user.used - 1); + facets, + WEVT_FIELD_DOMAIN, sizeof(WEVT_FIELD_DOMAIN) - 1, + log->ops.domain.data, log->ops.domain.used - 1); + } + + if(log->ops.sid.used > 1) { + bytes += log->ops.sid.used * 2; + facets_add_key_value_length( + facets, + WEVT_FIELD_SID, sizeof(WEVT_FIELD_SID) - 1, + log->ops.sid.data, log->ops.sid.used - 1); } { @@ -417,6 +538,12 @@ static inline size_t wevt_process_event(WEVT_LOG *log, FACETS *facets, LOGS_QUER event_id_str, len); } + { + const char *s = events_api_to_str(ev->platform); + facets_add_key_value_length( + facets, WEVT_FIELD_EVENTS_API, sizeof(WEVT_FIELD_EVENTS_API) - 1, s, strlen(s)); + } + if(ev->process_id) { static __thread char process_id_str[UINT64_MAX_LENGTH]; len = print_uint64(process_id_str, ev->process_id); @@ -467,6 +594,30 @@ static inline size_t wevt_process_event(WEVT_LOG *log, FACETS *facets, LOGS_QUER facets, WEVT_FIELD_TASK "ID", sizeof(WEVT_FIELD_TASK) + 2 - 1, str, len); } + if(log->type & WEVT_QUERY_EVENT_DATA) { + // the query has full text-search + if(log->ops.event.used > 1) { + bytes += log->ops.event.used; + facets_add_key_value_length( + facets, WEVT_FIELD_EVENT_MESSAGE_HIDDEN, sizeof(WEVT_FIELD_EVENT_MESSAGE_HIDDEN) - 1, + log->ops.event.data, log->ops.event.used - 1); + } + + if(log->ops.xml.used > 1) { + bytes += log->ops.xml.used; + facets_add_key_value_length( + facets, WEVT_FIELD_EVENT_XML_HIDDEN, sizeof(WEVT_FIELD_EVENT_XML_HIDDEN) - 1, + log->ops.xml.data, log->ops.xml.used - 1); + } + + if(log->ops.event_data->len) { + bytes += log->ops.event_data->len; + facets_add_key_value_length( + facets, WEVT_FIELD_EVENT_DATA_HIDDEN, sizeof(WEVT_FIELD_EVENT_DATA_HIDDEN) - 1, + buffer_tostring(log->ops.event_data), buffer_strlen(log->ops.event_data)); + } + } + wevt_facets_register_bin_data(log, facets, ev); #ifdef NETDATA_INTERNAL_CHECKS @@ -536,7 +687,7 @@ static WEVT_QUERY_STATUS wevt_query_backward( facets_rows_begin(facets); WEVT_EVENT e; - while (status == WEVT_OK && wevt_get_next_event(log, &e, true)) { + while (status == WEVT_OK && wevt_get_next_event(log, &e)) { usec_t msg_ut = e.created_ns / NSEC_PER_USEC; if(unlikely(!msg_ut)) { @@ -650,7 +801,7 @@ static WEVT_QUERY_STATUS wevt_query_forward( facets_rows_begin(facets); WEVT_EVENT e; - while (status == WEVT_OK && wevt_get_next_event(log, &e, true)) { + while (status == WEVT_OK && wevt_get_next_event(log, &e)) { usec_t msg_ut = e.created_ns / NSEC_PER_USEC; if(unlikely(!msg_ut)) { @@ -754,8 +905,20 @@ static WEVT_QUERY_STATUS wevt_query_one_channel( } static bool source_is_mine(LOGS_QUERY_SOURCE *src, LOGS_QUERY_STATUS *lqs) { - if((lqs->rq.source_type == WEVTS_NONE && !lqs->rq.sources) || (src->source_type & lqs->rq.source_type) || - (lqs->rq.sources && simple_pattern_matches(lqs->rq.sources, string2str(src->source)))) { + if( + // no source is requested + (lqs->rq.source_type == WEVTS_NONE && !lqs->rq.sources) || + + // matches our internal source types + (src->source_type & lqs->rq.source_type) || + + // matches the source name + (lqs->rq.sources && src->source && simple_pattern_matches(lqs->rq.sources, string2str(src->source))) || + + // matches the provider (providers start with a special prefix to avoid mix and match) + (lqs->rq.sources && src->provider && simple_pattern_matches(lqs->rq.sources, string2str(src->provider))) + + ) { if(!src->msg_last_ut) // the file is not scanned yet, or the timestamps have not been updated, @@ -837,7 +1000,7 @@ static int wevt_master_query(BUFFER *wb __maybe_unused, LOGS_QUERY_STATUS *lqs _ usec_t ended_ut = started_ut; usec_t duration_ut, max_duration_ut = 0; - WEVT_LOG *log = wevt_openlog6(); + WEVT_LOG *log = wevt_openlog6(query_has_fts(lqs) ? WEVT_QUERY_FTS : WEVT_QUERY_NORMAL); if(!log) { // release the files for(size_t f = 0; f < files_used ;f++) @@ -1135,7 +1298,7 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) { // initialization wevt_sources_init(); - publisher_cache_init(); + provider_cache_init(); sid_cache_init(); field_cache_init(); @@ -1209,6 +1372,7 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) { const usec_t step_ut = 100 * USEC_PER_MS; usec_t send_newline_ut = 0; usec_t since_last_scan_ut = WINDOWS_EVENTS_SCAN_EVERY_USEC * 2; // something big to trigger scanning at start + usec_t since_last_providers_release_ut = 0; const bool tty = isatty(fileno(stdout)) == 1; heartbeat_t hb; @@ -1220,7 +1384,13 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) { since_last_scan_ut = 0; } + if(since_last_providers_release_ut > WINDOWS_EVENTS_RELEASE_PROVIDERS_HANDLES_EVERY_UT) { + providers_release_unused_handles(); + since_last_providers_release_ut = 0; + } + usec_t dt_ut = heartbeat_next(&hb, step_ut); + since_last_providers_release_ut += dt_ut; since_last_scan_ut += dt_ut; send_newline_ut += dt_ut; diff --git a/src/collectors/windows-events.plugin/windows-events.h b/src/collectors/windows-events.plugin/windows-events.h index 631855824d6259..4cb5b50cadc446 100644 --- a/src/collectors/windows-events.plugin/windows-events.h +++ b/src/collectors/windows-events.plugin/windows-events.h @@ -128,10 +128,15 @@ typedef enum { #include "windows-events-unicode.h" #include "windows-events-sid.h" #include "windows-events-xml.h" -#include "windows-events-publishers.h" +#include "windows-events-providers.h" #include "windows-events-fields-cache.h" #include "windows-events-query.h" +// enable or disable preloading on full-text-search +#define ON_FTS_PRELOAD_MESSAGE 1 +#define ON_FTS_PRELOAD_XML 0 +#define ON_FTS_PRELOAD_EVENT_DATA 1 + #define WEVT_FUNCTION_DESCRIPTION "View, search and analyze the Microsoft Windows Events log." #define WEVT_FUNCTION_NAME "windows-events" @@ -139,26 +144,40 @@ typedef enum { #define WINDOWS_EVENTS_DEFAULT_TIMEOUT 600 #define WINDOWS_EVENTS_SCAN_EVERY_USEC (5 * 60 * USEC_PER_SEC) #define WINDOWS_EVENTS_PROGRESS_EVERY_UT (250 * USEC_PER_MS) - #define FUNCTION_PROGRESS_EVERY_ROWS (2000) #define FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS (1000) #define ANCHOR_DELTA_UT (10 * USEC_PER_SEC) +// run providers release every 5 mins +#define WINDOWS_EVENTS_RELEASE_PROVIDERS_HANDLES_EVERY_UT (5 * 60 * USEC_PER_SEC) +// release idle handles that are older than 5 mins +#define WINDOWS_EVENTS_RELEASE_IDLE_PROVIDER_HANDLES_TIME_UT (5 * 60 * USEC_PER_SEC) + #define WEVT_FIELD_COMPUTER "Computer" #define WEVT_FIELD_CHANNEL "Channel" #define WEVT_FIELD_PROVIDER "Provider" -#define WEVT_FIELD_SOURCE "Source" +#define WEVT_FIELD_PROVIDER_GUID "ProviderGUID" #define WEVT_FIELD_EVENTRECORDID "EventRecordID" +#define WEVT_FIELD_VERSION "Version" +#define WEVT_FIELD_QUALIFIERS "Qualifiers" #define WEVT_FIELD_EVENTID "EventID" #define WEVT_FIELD_LEVEL "Level" #define WEVT_FIELD_KEYWORDS "Keywords" #define WEVT_FIELD_OPCODE "Opcode" -#define WEVT_FIELD_USER "User" +#define WEVT_FIELD_ACCOUNT "UserAccount" +#define WEVT_FIELD_DOMAIN "UserDomain" +#define WEVT_FIELD_SID "UserSID" #define WEVT_FIELD_TASK "Task" #define WEVT_FIELD_PROCESSID "ProcessID" #define WEVT_FIELD_THREADID "ThreadID" +#define WEVT_FIELD_ACTIVITY_ID "ActivityID" +#define WEVT_FIELD_RELATED_ACTIVITY_ID "RelatedActivityID" #define WEVT_FIELD_XML "XML" #define WEVT_FIELD_MESSAGE "Message" +#define WEVT_FIELD_EVENTS_API "EventsAPI" +#define WEVT_FIELD_EVENT_DATA_HIDDEN "__HIDDEN__EVENT__DATA__" +#define WEVT_FIELD_EVENT_MESSAGE_HIDDEN "__HIDDEN__MESSAGE__DATA__" +#define WEVT_FIELD_EVENT_XML_HIDDEN "__HIDDEN__XML__DATA__" // functions needed by LQS @@ -237,7 +256,8 @@ struct lqs_extension { #define LQS_SOURCE_TYPE WEVT_SOURCE_TYPE #define LQS_SOURCE_TYPE_ALL WEVTS_ALL #define LQS_SOURCE_TYPE_NONE WEVTS_NONE -#define LQS_FUNCTION_GET_INTERNAL_SOURCE_TYPE(value) wevt_internal_source_type(value) +#define LQS_PARAMETER_SOURCE_NAME "Event Channels" // this is how it is shown to users +#define LQS_FUNCTION_GET_INTERNAL_SOURCE_TYPE(value) WEVT_SOURCE_TYPE_2id_one(value) #define LQS_FUNCTION_SOURCE_TO_JSON_ARRAY(wb) wevt_sources_to_json_array(wb) #include "libnetdata/facets/logs_query_status.h" diff --git a/src/collectors/windows.plugin/perflib-mssql.c b/src/collectors/windows.plugin/perflib-mssql.c index e8f7ff986e66b4..0abab52817129f 100644 --- a/src/collectors/windows.plugin/perflib-mssql.c +++ b/src/collectors/windows.plugin/perflib-mssql.c @@ -870,11 +870,12 @@ int dict_mssql_locks_charts_cb(const DICTIONARY_ITEM *item __maybe_unused, void rrddim_set_by_pointer(mli->parent->st_deadLocks, mli->rd_deadLocks, (collected_number)mli->deadLocks.current.Data); + + return 1; } static void do_mssql_locks(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) { - char id[RRD_ID_LENGTH_MAX + 1]; PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_LOCKS]); if (!pObjectType) return; @@ -1250,11 +1251,12 @@ int dict_mssql_databases_charts_cb(const DICTIONARY_ITEM *item __maybe_unused, v for (i = 0; transaction_chart[i]; i++) { transaction_chart[i](mli, db, *update_every); } + + return 1; } static void do_mssql_databases(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) { - char id[RRD_ID_LENGTH_MAX + 1]; PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_DATABASE]); if (!pObjectType) return; diff --git a/src/collectors/windows.plugin/perflib-netframework.c b/src/collectors/windows.plugin/perflib-netframework.c new file mode 100644 index 00000000000000..c58375f6499edf --- /dev/null +++ b/src/collectors/windows.plugin/perflib-netframework.c @@ -0,0 +1,878 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "windows_plugin.h" +#include "windows-internals.h" + +enum netdata_netframework_metrics { + NETDATA_NETFRAMEWORK_EXCEPTIONS, + NETDATA_NETFRAMEWORK_INTEROP, + NETDATA_NETFRAMEWORK_JIT, + NETDATA_NETFRAMEWORK_LOADING, + + NETDATA_NETFRAMEWORK_END +}; + +struct net_framework_instances { + RRDSET *st_clrexception_thrown; + RRDDIM *rd_clrexception_thrown; + + RRDSET *st_clrexception_filters; + RRDDIM *rd_clrexception_filters; + + RRDSET *st_clrexception_finallys; + RRDDIM *rd_clrexception_finallys; + + RRDSET *st_clrexception_total_catch_depth; + RRDDIM *rd_clrexception_total_catch_depth; + + RRDSET *st_clrinterop_com_callable_wrappers; + RRDDIM *rd_clrinterop_com_callable_wrappers; + + RRDSET *st_clrinterop_marshalling; + RRDDIM *rd_clrinterop_marshalling; + + RRDSET *st_clrinterop_interop_stubs_created; + RRDDIM *rd_clrinterop_interop_stubs_created; + + RRDSET *st_clrjit_methods; + RRDDIM *rd_clrjit_methods; + + RRDSET *st_clrjit_time; + RRDDIM *rd_clrjit_time; + + RRDSET *st_clrjit_standard_failures; + RRDDIM *rd_clrjit_standard_failures; + + RRDSET *st_clrjit_il_bytes; + RRDDIM *rd_clrjit_il_bytes; + + RRDSET *st_clrloading_heap_size; + RRDDIM *rd_clrloading_heap_size; + + RRDSET *st_clrloading_app_domains_loaded; + RRDDIM *rd_clrloading_app_domains_loaded; + + RRDSET *st_clrloading_app_domains_unloaded; + RRDDIM *rd_clrloading_app_domains_unloaded; + + RRDSET *st_clrloading_assemblies_loaded; + RRDDIM *rd_clrloading_assemblies_loaded; + + RRDSET *st_clrloading_classes_loaded; + RRDDIM *rd_clrloading_classes_loaded; + + RRDSET *st_clrloading_class_load_failure; + RRDDIM *rd_clrloading_class_load_failure; + + COUNTER_DATA NETFrameworkCLRExceptionThrown; + COUNTER_DATA NETFrameworkCLRExceptionFilters; + COUNTER_DATA NETFrameworkCLRExceptionFinallys; + COUNTER_DATA NETFrameworkCLRExceptionTotalCatchDepth; + + COUNTER_DATA NETFrameworkCLRInteropCOMCallableWrappers; + COUNTER_DATA NETFrameworkCLRInteropMarshalling; + COUNTER_DATA NETFrameworkCLRInteropStubsCreated; + + COUNTER_DATA NETFrameworkCLRJITMethods; + COUNTER_DATA NETFrameworkCLRJITPercentTime; + COUNTER_DATA NETFrameworkCLRJITFrequencyTime; + COUNTER_DATA NETFrameworkCLRJITStandardFailures; + COUNTER_DATA NETFrameworkCLRJITIlBytes; + + COUNTER_DATA NETFrameworkCLRLoadingHeapSize; + COUNTER_DATA NETFrameworkCLRLoadingAppDomainsLoaded; + COUNTER_DATA NETFrameworkCLRLoadingAppDomainsUnloaded; + COUNTER_DATA NETFrameworkCLRLoadingAssembliesLoaded; + COUNTER_DATA NETFrameworkCLRLoadingClassesLoaded; + COUNTER_DATA NETFrameworkCLRLoadingClassLoadFailure; +}; + +static inline void initialize_net_framework_processes_keys(struct net_framework_instances *p) { + p->NETFrameworkCLRExceptionFilters.key = "# of Filters / sec"; + p->NETFrameworkCLRExceptionFinallys.key = "# of Finallys / sec"; + p->NETFrameworkCLRExceptionThrown.key = "# of Exceps Thrown / sec"; + p->NETFrameworkCLRExceptionTotalCatchDepth.key = "Throw To Catch Depth / sec"; + + p->NETFrameworkCLRInteropCOMCallableWrappers.key = "# of CCWs"; + p->NETFrameworkCLRInteropMarshalling.key = "# of Stubs"; + p->NETFrameworkCLRInteropStubsCreated.key = "# of marshalling"; + + p->NETFrameworkCLRJITMethods.key = "# of Methods Jitted"; + p->NETFrameworkCLRJITPercentTime.key = "% Time in Jit"; + p->NETFrameworkCLRJITFrequencyTime.key = "IL Bytes Jitted / sec"; + p->NETFrameworkCLRJITStandardFailures.key = "Standard Jit Failures"; + p->NETFrameworkCLRJITIlBytes.key = "# of IL Bytes Jitted"; + + p->NETFrameworkCLRLoadingHeapSize.key = "Bytes in Loader Heap"; + p->NETFrameworkCLRLoadingAppDomainsLoaded.key = "Rate of appdomains"; + p->NETFrameworkCLRLoadingAppDomainsUnloaded.key = "Total appdomains unloaded"; + p->NETFrameworkCLRLoadingAssembliesLoaded.key = "Total Assemblies"; + p->NETFrameworkCLRLoadingClassesLoaded.key = "Total Classes Loaded"; + p->NETFrameworkCLRLoadingClassLoadFailure.key = "Total # of Load Failures"; +} + +void dict_net_framework_processes_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + struct net_framework_instances *p = value; + initialize_net_framework_processes_keys(p); +} + +static DICTIONARY *processes = NULL; + +static void initialize(void) { + processes = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | + DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct net_framework_instances)); + + dictionary_register_insert_callback(processes, dict_net_framework_processes_insert_cb, NULL); +} + +static void netdata_framework_clr_exceptions(PERF_DATA_BLOCK *pDataBlock, + PERF_OBJECT_TYPE *pObjectType, + int update_every) +{ + PERF_INSTANCE_DEFINITION *pi = NULL; + char id[RRD_ID_LENGTH_MAX + 1]; + for(LONG i = 0; i < pObjectType->NumInstances ; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + if(strcasecmp(windows_shared_buffer, "_Global_") == 0) + continue; + + netdata_fix_chart_name(windows_shared_buffer); + struct net_framework_instances *p = dictionary_set(processes, windows_shared_buffer, NULL, sizeof(*p)); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRExceptionThrown)) { + if (!p->st_clrexception_thrown) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrexception_thrown", windows_shared_buffer); + p->st_clrexception_thrown = rrdset_create_localhost("netframework" + , id, NULL + , "exceptions" + , "netframework.clrexception_thrown" + , "Thrown exceptions" + , "exceptions/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_EXCEPTION_THROWN + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrexception_thrown_total", windows_shared_buffer); + p->rd_clrexception_thrown = rrddim_add(p->st_clrexception_thrown, + id, + "exceptions", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrexception_thrown->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrexception_thrown, + p->rd_clrexception_thrown, + (collected_number)p->NETFrameworkCLRExceptionThrown.current.Data); + rrdset_done(p->st_clrexception_thrown); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRExceptionFilters)) { + if (!p->st_clrexception_filters) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrexception_filters", windows_shared_buffer); + p->st_clrexception_filters = rrdset_create_localhost("netframework" + , id, NULL + , "exceptions" + , "netframework.clrexception_filters" + , "Thrown exceptions filters" + , "filters/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_EXCEPTION_FILTERS + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrexception_filters_total", windows_shared_buffer); + p->rd_clrexception_filters = rrddim_add(p->st_clrexception_filters, + id, + "filters", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrexception_filters->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrexception_filters, + p->rd_clrexception_filters, + (collected_number)p->NETFrameworkCLRExceptionFilters.current.Data); + rrdset_done(p->st_clrexception_filters); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRExceptionFinallys)) { + if (!p->st_clrexception_finallys) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrexception_finallys", windows_shared_buffer); + p->st_clrexception_finallys = rrdset_create_localhost("netframework" + , id, NULL + , "exceptions" + , "netframework.clrexception_finallys" + , "Executed finally blocks" + , "finallys/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_EXCEPTION_FINALLYS + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrexception_finallys_total", windows_shared_buffer); + p->rd_clrexception_finallys = rrddim_add(p->st_clrexception_finallys, + id, + "finallys", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrexception_finallys->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrexception_finallys, + p->rd_clrexception_finallys, + (collected_number)p->NETFrameworkCLRExceptionFinallys.current.Data); + rrdset_done(p->st_clrexception_finallys); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRExceptionTotalCatchDepth)) { + if (!p->st_clrexception_total_catch_depth) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrexception_throw_to_catch_depth", windows_shared_buffer); + p->st_clrexception_total_catch_depth = rrdset_create_localhost("netframework" + , id, NULL + , "exceptions" + , "netframework.clrexception_throw_to_catch_depth" + , "Traversed stack frames" + , "stack_frames/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_EXCEPTION_THROW_TO_CATCH_DEPTH + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrexception_throw_to_catch_depth_total", windows_shared_buffer); + p->rd_clrexception_total_catch_depth = rrddim_add(p->st_clrexception_total_catch_depth, + id, + "traversed", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrexception_total_catch_depth->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrexception_total_catch_depth, + p->rd_clrexception_total_catch_depth, + (collected_number)p->NETFrameworkCLRExceptionTotalCatchDepth.current.Data); + rrdset_done(p->st_clrexception_total_catch_depth); + } + } +} + +static void netdata_framework_clr_interop(PERF_DATA_BLOCK *pDataBlock, + PERF_OBJECT_TYPE *pObjectType, + int update_every) +{ + PERF_INSTANCE_DEFINITION *pi = NULL; + char id[RRD_ID_LENGTH_MAX + 1]; + for(LONG i = 0; i < pObjectType->NumInstances ; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + if(strcasecmp(windows_shared_buffer, "_Global_") == 0) + continue; + + netdata_fix_chart_name(windows_shared_buffer); + struct net_framework_instances *p = dictionary_set(processes, windows_shared_buffer, NULL, sizeof(*p)); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRInteropCOMCallableWrappers)) { + if (!p->st_clrinterop_com_callable_wrappers) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrinterop_com_callable_wrappers", windows_shared_buffer); + p->st_clrinterop_com_callable_wrappers = rrdset_create_localhost("netframework" + , id, NULL + , "interop" + , "netframework.clrinterop_com_callable_wrappers" + , "COM callable wrappers (CCW)" + , "ccw/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_INTEROP_CCW + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrinterop_com_callable_wrappers_total", windows_shared_buffer); + p->rd_clrinterop_com_callable_wrappers = rrddim_add(p->st_clrinterop_com_callable_wrappers, + id, + "com_callable_wrappers", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrinterop_com_callable_wrappers->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrinterop_com_callable_wrappers, + p->rd_clrinterop_com_callable_wrappers, + (collected_number)p->NETFrameworkCLRInteropCOMCallableWrappers.current.Data); + rrdset_done(p->st_clrinterop_com_callable_wrappers); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRInteropMarshalling)) { + if (!p->st_clrinterop_marshalling) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrinterop_interop_marshalling", windows_shared_buffer); + p->st_clrinterop_marshalling = rrdset_create_localhost("netframework" + , id, NULL + , "interop" + , "netframework.clrinterop_interop_marshallings" + , "Arguments and return values marshallings" + , "marshalling/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_INTEROP_MARSHALLING + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrinterop_interop_marshalling_total", windows_shared_buffer); + p->rd_clrinterop_marshalling = rrddim_add(p->st_clrinterop_marshalling, + id, + "marshallings", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrinterop_marshalling->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrinterop_marshalling, + p->rd_clrinterop_marshalling, + (collected_number)p->NETFrameworkCLRInteropMarshalling.current.Data); + rrdset_done(p->st_clrinterop_marshalling); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRInteropStubsCreated)) { + if (!p->st_clrinterop_interop_stubs_created) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrinterop_interop_stubs_created", windows_shared_buffer); + p->st_clrinterop_interop_stubs_created = rrdset_create_localhost("netframework" + , id, NULL + , "interop" + , "netframework.clrinterop_interop_stubs_created" + , "Created stubs" + , "stubs/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_INTEROP_STUBS_CREATED + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrinterop_interop_stubs_created_total", windows_shared_buffer); + p->rd_clrinterop_interop_stubs_created = rrddim_add(p->st_clrinterop_interop_stubs_created, + id, + "created", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrinterop_interop_stubs_created->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrinterop_interop_stubs_created, + p->rd_clrinterop_interop_stubs_created, + (collected_number)p->NETFrameworkCLRInteropStubsCreated.current.Data); + rrdset_done(p->st_clrinterop_interop_stubs_created); + } + } +} + +static void netdata_framework_clr_jit(PERF_DATA_BLOCK *pDataBlock, + PERF_OBJECT_TYPE *pObjectType, + int update_every) +{ + PERF_INSTANCE_DEFINITION *pi = NULL; + char id[RRD_ID_LENGTH_MAX + 1]; + for(LONG i = 0; i < pObjectType->NumInstances ; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + if(strcasecmp(windows_shared_buffer, "_Global_") == 0) + continue; + + netdata_fix_chart_name(windows_shared_buffer); + struct net_framework_instances *p = dictionary_set(processes, windows_shared_buffer, NULL, sizeof(*p)); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITMethods)) { + if (!p->st_clrjit_methods) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrjit_methods", windows_shared_buffer); + p->st_clrjit_methods = rrdset_create_localhost("netframework" + , id, NULL + , "jit" + , "netframework.clrjit_methods" + , "JIT-compiled methods" + , "methods/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_JIT_METHODS + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrjit_methods_total", windows_shared_buffer); + p->rd_clrjit_methods = rrddim_add(p->st_clrjit_methods, + id, + "jit-compiled", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrjit_methods->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrjit_methods, + p->rd_clrjit_methods, + (collected_number)p->NETFrameworkCLRJITMethods.current.Data); + rrdset_done(p->st_clrjit_methods); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITFrequencyTime) && + perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITPercentTime)) { + if (!p->st_clrjit_time) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrjit_time", windows_shared_buffer); + p->st_clrjit_time = rrdset_create_localhost("netframework" + , id, NULL + , "jit" + , "netframework.clrjit_time" + , "Time spent in JIT compilation" + , "percentage" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_JIT_TIME + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrjit_time_percent", windows_shared_buffer); + p->rd_clrjit_time = rrddim_add(p->st_clrjit_time, + id, + "time", + 1, + 100, + RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_clrjit_time->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + double percTime = (double) p->NETFrameworkCLRJITPercentTime.current.Data; + percTime /= (double) p->NETFrameworkCLRJITFrequencyTime.current.Data; + percTime *= 100; + rrddim_set_by_pointer(p->st_clrjit_time, + p->rd_clrjit_time, + (collected_number) percTime); + rrdset_done(p->st_clrjit_time); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITStandardFailures)) { + if (!p->st_clrjit_standard_failures) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrjit_standard_failures", windows_shared_buffer); + p->st_clrjit_standard_failures = rrdset_create_localhost("netframework" + , id, NULL + , "jit" + , "netframework.clrjit_standard_failures" + , "JIT compiler failures" + , "failures/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_JIT_STANDARD_FAILURES + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrjit_standard_failures_total", windows_shared_buffer); + p->rd_clrjit_standard_failures = rrddim_add(p->st_clrjit_standard_failures, + id, + "failures", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrjit_standard_failures->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrjit_standard_failures, + p->rd_clrjit_standard_failures, + (collected_number)p->NETFrameworkCLRJITStandardFailures.current.Data); + rrdset_done(p->st_clrjit_standard_failures); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITIlBytes)) { + if (!p->st_clrjit_il_bytes) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrjit_il_bytes", windows_shared_buffer); + p->st_clrjit_il_bytes = rrdset_create_localhost("netframework" + , id, NULL + , "jit" + , "netframework.clrjit_il_bytes" + , "Compiled Microsoft intermediate language (MSIL) bytes" + , "bytes/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_JIT_IL_BYTES + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrjit_il_bytes_total", windows_shared_buffer); + p->rd_clrjit_il_bytes = rrddim_add(p->st_clrjit_il_bytes, + id, + "compiled_msil", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrjit_il_bytes->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrjit_il_bytes, + p->rd_clrjit_il_bytes, + (collected_number)p->NETFrameworkCLRJITIlBytes.current.Data); + rrdset_done(p->st_clrjit_il_bytes); + } + } +} + +static void netdata_framework_clr_loading(PERF_DATA_BLOCK *pDataBlock, + PERF_OBJECT_TYPE *pObjectType, + int update_every) +{ + char id[RRD_ID_LENGTH_MAX + 1]; + PERF_INSTANCE_DEFINITION *pi = NULL; + for(LONG i = 0; i < pObjectType->NumInstances ; i++) { + pi = perflibForEachInstance(pDataBlock, pObjectType, pi); + if (!pi) + break; + + if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer))) + strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1); + + if(strcasecmp(windows_shared_buffer, "_Global_") == 0) + continue; + + netdata_fix_chart_name(windows_shared_buffer); + struct net_framework_instances *p = dictionary_set(processes, windows_shared_buffer, NULL, sizeof(*p)); + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingHeapSize)) { + if (!p->st_clrloading_heap_size) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_loader_heap_size", windows_shared_buffer); + p->st_clrloading_heap_size = rrdset_create_localhost("netframework" + , id, NULL + , "loading" + , "netframework.clrloading_loader_heap_size" + , "Memory committed by class loader" + , "bytes" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_LOADING_HEAP_SIZE + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrloading_loader_heap_size_bytes", windows_shared_buffer); + p->rd_clrloading_heap_size = rrddim_add(p->st_clrloading_heap_size, + id, + "committed", + 1, + 1, + RRD_ALGORITHM_ABSOLUTE); + + rrdlabels_add(p->st_clrloading_heap_size->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrloading_heap_size, + p->rd_clrloading_heap_size, + (collected_number)p->NETFrameworkCLRLoadingHeapSize.current.Data); + rrdset_done(p->st_clrloading_heap_size); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingAppDomainsLoaded)) { + if (!p->st_clrloading_app_domains_loaded) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_appdomains_loaded", windows_shared_buffer); + p->st_clrloading_app_domains_loaded = rrdset_create_localhost("netframework" + , id, NULL + , "loading" + , "netframework.clrloading_appdomains_loaded" + , "Loaded application domains" + , "domain/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_LOADING_APP_DOMAINS_LOADED + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrloading_appdomains_loaded_total", windows_shared_buffer); + p->rd_clrloading_app_domains_loaded = rrddim_add(p->st_clrloading_app_domains_loaded, + id, + "loaded", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrloading_app_domains_loaded->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrloading_app_domains_loaded, + p->rd_clrloading_app_domains_loaded, + (collected_number)p->NETFrameworkCLRLoadingAppDomainsLoaded.current.Data); + rrdset_done(p->st_clrloading_app_domains_loaded); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingAppDomainsUnloaded)) { + if (!p->st_clrloading_app_domains_unloaded) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_appdomains_unloaded", windows_shared_buffer); + p->st_clrloading_app_domains_unloaded = rrdset_create_localhost("netframework" + , id, NULL + , "loading" + , "netframework.clrloading_appdomains_unloaded" + , "Unloaded application domains" + , "domain/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_LOADING_APP_DOMAINS_UNLOADED + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrloading_appdomains_unloaded_total", windows_shared_buffer); + p->rd_clrloading_app_domains_unloaded = rrddim_add(p->st_clrloading_app_domains_unloaded, + id, + "unloaded", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrloading_app_domains_unloaded->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrloading_app_domains_unloaded, + p->rd_clrloading_app_domains_unloaded, + (collected_number)p->NETFrameworkCLRLoadingAppDomainsUnloaded.current.Data); + rrdset_done(p->st_clrloading_app_domains_unloaded); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingAssembliesLoaded)) { + if (!p->st_clrloading_assemblies_loaded) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_assemblies_loaded", windows_shared_buffer); + p->st_clrloading_assemblies_loaded = rrdset_create_localhost("netframework" + , id, NULL + , "loading" + , "netframework.clrloading_assemblies_loaded" + , "Loaded assemblies" + , "assemblies/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_LOADING_ASSEMBLIES_LOADED + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrloading_assemblies_loaded_total", windows_shared_buffer); + p->rd_clrloading_assemblies_loaded = rrddim_add(p->st_clrloading_assemblies_loaded, + id, + "loaded", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrloading_assemblies_loaded->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrloading_assemblies_loaded, + p->rd_clrloading_assemblies_loaded, + (collected_number)p->NETFrameworkCLRLoadingAssembliesLoaded.current.Data); + rrdset_done(p->st_clrloading_assemblies_loaded); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingClassesLoaded)) { + if (!p->st_clrloading_classes_loaded) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_classes_loaded", windows_shared_buffer); + p->st_clrloading_classes_loaded = rrdset_create_localhost("netframework" + , id, NULL + , "loading" + , "netframework.clrloading_classes_loaded" + , "Loaded classes in all assemblies" + , "classes/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_LOADING_CLASSES_LOADED + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrloading_classes_loaded_total", windows_shared_buffer); + p->rd_clrloading_classes_loaded = rrddim_add(p->st_clrloading_classes_loaded, + id, + "loaded", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrloading_classes_loaded->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrloading_classes_loaded, + p->rd_clrloading_classes_loaded, + (collected_number)p->NETFrameworkCLRLoadingClassesLoaded.current.Data); + rrdset_done(p->st_clrloading_classes_loaded); + } + + if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingClassLoadFailure)) { + if (!p->st_clrloading_class_load_failure) { + snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_class_load_failure", windows_shared_buffer); + p->st_clrloading_class_load_failure = rrdset_create_localhost("netframework" + , id, NULL + , "loading" + , "netframework.clrloading_class_load_failures" + , "Class load failures" + , "failures/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetFramework" + , PRIO_NETFRAMEWORK_CLR_LOADING_CLASS_LOAD_FAILURE + , update_every + , RRDSET_TYPE_LINE + ); + + snprintfz(id, RRD_ID_LENGTH_MAX, "netframework_%s_clrloading_class_load_failures_total", windows_shared_buffer); + p->rd_clrloading_class_load_failure = rrddim_add(p->st_clrloading_class_load_failure, + id, + "class_load", + 1, + 1, + RRD_ALGORITHM_INCREMENTAL); + + rrdlabels_add(p->st_clrloading_class_load_failure->rrdlabels, + "process", + windows_shared_buffer, + RRDLABEL_SRC_AUTO); + } + + rrddim_set_by_pointer(p->st_clrloading_class_load_failure, + p->rd_clrloading_class_load_failure, + (collected_number)p->NETFrameworkCLRLoadingClassLoadFailure.current.Data); + rrdset_done(p->st_clrloading_class_load_failure); + } + } +} + +struct netdata_netframework_objects { + char *object; + void (*fnct)(PERF_DATA_BLOCK *, PERF_OBJECT_TYPE *, int); +} netframewrk_obj[NETDATA_NETFRAMEWORK_END] = { + { + .fnct = netdata_framework_clr_exceptions, + .object = ".NET CLR Exceptions" + }, + { + .fnct = netdata_framework_clr_interop, + .object = ".NET CLR Interop" + }, + { + .fnct = netdata_framework_clr_jit, + .object = ".NET CLR Jit" + }, + { + .fnct = netdata_framework_clr_loading, + .object = ".NET CLR Loading" + } +}; + +int do_PerflibNetFramework(int update_every, usec_t dt __maybe_unused) { + static bool initialized = false; + + if(unlikely(!initialized)) { + initialize(); + initialized = true; + } + + int i; + for (i = 0; i < NETDATA_NETFRAMEWORK_END; i++) { + DWORD id = RegistryFindIDByName(netframewrk_obj[i].object); + if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) + continue; + + PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); + if(!pDataBlock) continue; + + PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, netframewrk_obj[i].object); + if(!pObjectType) continue; + + netframewrk_obj[i].fnct(pDataBlock, pObjectType, update_every); + } + + return 0; +} diff --git a/src/collectors/windows.plugin/perflib-network.c b/src/collectors/windows.plugin/perflib-network.c index 308984c6bbfd1e..1e5f8c04ff90d4 100644 --- a/src/collectors/windows.plugin/perflib-network.c +++ b/src/collectors/windows.plugin/perflib-network.c @@ -486,6 +486,7 @@ static bool do_network_protocol(PERF_DATA_BLOCK *pDataBlock, int update_every, s // network interfaces struct network_interface { + usec_t last_collected; bool collected_metadata; struct { @@ -498,6 +499,8 @@ struct network_interface { } packets; struct { + const RRDVAR_ACQUIRED *chart_var_speed; + COUNTER_DATA received; COUNTER_DATA sent; @@ -505,16 +508,96 @@ struct network_interface { RRDDIM *rd_received; RRDDIM *rd_sent; } traffic; + + struct { + COUNTER_DATA current_bandwidth; + RRDSET *st; + RRDDIM *rd; + } speed; + + struct { + COUNTER_DATA received; + COUNTER_DATA outbound; + + RRDSET *st; + RRDDIM *rd_received; + RRDDIM *rd_outbound; + } discards; + + struct { + COUNTER_DATA received; + COUNTER_DATA outbound; + + RRDSET *st; + RRDDIM *rd_received; + RRDDIM *rd_outbound; + } errors; + + struct { + COUNTER_DATA length; + RRDSET *st; + RRDDIM *rd; + } queue; + + struct { + COUNTER_DATA connections; + RRDSET *st; + RRDDIM *rd; + } chimney; + + struct { + COUNTER_DATA connections; + COUNTER_DATA packets; + COUNTER_DATA exceptions; + COUNTER_DATA average_packet_size; + + RRDSET *st_connections; + RRDDIM *rd_connections; + + RRDSET *st_packets; + RRDDIM *rd_packets; + + RRDSET *st_exceptions; + RRDDIM *rd_exceptions; + + RRDSET *st_average_packet_size; + RRDDIM *rd_average_packet_size; + } rsc; }; static DICTIONARY *physical_interfaces = NULL, *virtual_interfaces = NULL; -static void network_interface_init(struct network_interface *ni) { - ni->packets.received.key = "Packets Received/sec"; - ni->packets.sent.key = "Packets Sent/sec"; +static void network_interface_init(struct network_interface *d) { + d->packets.received.key = "Packets Received/sec"; + d->packets.sent.key = "Packets Sent/sec"; + d->traffic.received.key = "Bytes Received/sec"; + d->traffic.sent.key = "Bytes Sent/sec"; + d->speed.current_bandwidth.key = "Current Bandwidth"; + d->discards.received.key = "Packets Received Discarded"; + d->discards.outbound.key = "Packets Outbound Discarded"; + d->errors.received.key = "Packets Received Errors"; + d->errors.outbound.key = "Packets Outbound Errors"; + d->queue.length.key = "Output Queue Length"; + d->chimney.connections.key = "Offloaded Connections"; + d->rsc.connections.key = "TCP Active RSC Connections"; + d->rsc.packets.key = "TCP RSC Coalesced Packets/sec"; + d->rsc.exceptions.key = "TCP RSC Exceptions/sec"; + d->rsc.average_packet_size.key = "TCP RSC Average Packet Size"; +} - ni->traffic.received.key = "Bytes Received/sec"; - ni->traffic.sent.key = "Bytes Sent/sec"; +static void network_interface_cleanup(struct network_interface *d) { + rrdvar_chart_variable_release(d->traffic.st, d->traffic.chart_var_speed); + rrdset_is_obsolete___safe_from_collector_thread(d->packets.st); + rrdset_is_obsolete___safe_from_collector_thread(d->traffic.st); + rrdset_is_obsolete___safe_from_collector_thread(d->speed.st); + rrdset_is_obsolete___safe_from_collector_thread(d->discards.st); + rrdset_is_obsolete___safe_from_collector_thread(d->errors.st); + rrdset_is_obsolete___safe_from_collector_thread(d->queue.st); + rrdset_is_obsolete___safe_from_collector_thread(d->chimney.st); + rrdset_is_obsolete___safe_from_collector_thread(d->rsc.st_connections); + rrdset_is_obsolete___safe_from_collector_thread(d->rsc.st_packets); + rrdset_is_obsolete___safe_from_collector_thread(d->rsc.st_exceptions); + rrdset_is_obsolete___safe_from_collector_thread(d->rsc.st_average_packet_size); } void dict_interface_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { @@ -543,8 +626,8 @@ static bool is_physical_interface(const char *name) { return d ? true : false; } -static bool do_network_interface(PERF_DATA_BLOCK *pDataBlock, int update_every, bool physical) { - DICTIONARY *dict = physical_interfaces; +static bool do_network_interface(PERF_DATA_BLOCK *pDataBlock, int update_every, bool physical, usec_t now_ut) { + DICTIONARY *dict = physical ? physical_interfaces : virtual_interfaces; PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, physical ? "Network Interface" : "Network Adapter"); if(!pObjectType) return false; @@ -567,6 +650,7 @@ static bool do_network_interface(PERF_DATA_BLOCK *pDataBlock, int update_every, continue; struct network_interface *d = dictionary_set(dict, windows_shared_buffer, NULL, sizeof(*d)); + d->last_collected = now_ut; if(!d->collected_metadata) { // TODO - get metadata about the network interface @@ -577,7 +661,7 @@ static bool do_network_interface(PERF_DATA_BLOCK *pDataBlock, int update_every, perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->traffic.sent)) { if(d->traffic.received.current.Data == 0 && d->traffic.sent.current.Data == 0) - // this interface has not received or sent any traffic + // this interface has not received or sent any traffic yet continue; if (unlikely(!d->traffic.st)) { @@ -601,6 +685,9 @@ static bool do_network_interface(PERF_DATA_BLOCK *pDataBlock, int update_every, d->traffic.rd_received = rrddim_add(d->traffic.st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); d->traffic.rd_sent = rrddim_add(d->traffic.st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL); + + d->traffic.chart_var_speed = rrdvar_chart_variable_add_and_acquire(d->traffic.st, "nic_speed_max"); + rrdvar_chart_variable_set(d->traffic.st, d->traffic.chart_var_speed, NAN); } total_received += d->traffic.received.current.Data; @@ -631,7 +718,7 @@ static bool do_network_interface(PERF_DATA_BLOCK *pDataBlock, int update_every, rrdset_flag_set(d->packets.st, RRDSET_FLAG_DETAIL); - add_interface_labels(d->traffic.st, windows_shared_buffer, physical); + add_interface_labels(d->packets.st, windows_shared_buffer, physical); d->packets.rd_received = rrddim_add(d->packets.st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); d->packets.rd_sent = rrddim_add(d->packets.st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); @@ -641,6 +728,261 @@ static bool do_network_interface(PERF_DATA_BLOCK *pDataBlock, int update_every, rrddim_set_by_pointer(d->packets.st, d->packets.rd_sent, (collected_number)d->packets.sent.current.Data); rrdset_done(d->packets.st); } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->speed.current_bandwidth)) { + if(unlikely(!d->speed.st)) { + d->speed.st = rrdset_create_localhost( + "net_speed" + , windows_shared_buffer + , NULL + , windows_shared_buffer + , "net.speed" + , "Interface Speed" + , "kilobits/s" + , PLUGIN_WINDOWS_NAME + , "PerflibNetwork" + , NETDATA_CHART_PRIO_FIRST_NET_IFACE + 10 + , update_every + , RRDSET_TYPE_LINE + ); + + rrdset_flag_set(d->speed.st, RRDSET_FLAG_DETAIL); + + add_interface_labels(d->speed.st, windows_shared_buffer, physical); + + d->speed.rd = rrddim_add(d->speed.st, "speed", NULL, 1, BITS_IN_A_KILOBIT, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(d->speed.st, d->speed.rd, (collected_number)d->speed.current_bandwidth.current.Data); + rrdset_done(d->speed.st); + + rrdvar_chart_variable_set(d->traffic.st, d->traffic.chart_var_speed, + (NETDATA_DOUBLE)d->speed.current_bandwidth.current.Data / BITS_IN_A_KILOBIT); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->errors.received) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->errors.outbound)) { + + if (unlikely(!d->errors.st)) { + d->errors.st = rrdset_create_localhost( + "net_errors", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.errors", + "Interface Errors", + "errors/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 3, + update_every, + RRDSET_TYPE_LINE); + + rrdset_flag_set(d->errors.st, RRDSET_FLAG_DETAIL); + + add_interface_labels(d->errors.st, windows_shared_buffer, physical); + + d->errors.rd_received = rrddim_add(d->errors.st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->errors.rd_outbound = rrddim_add(d->errors.st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(d->errors.st, d->errors.rd_received, (collected_number)d->errors.received.current.Data); + rrddim_set_by_pointer(d->errors.st, d->errors.rd_outbound, (collected_number)d->errors.outbound.current.Data); + rrdset_done(d->errors.st); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->discards.received) && + perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->discards.outbound)) { + + if (unlikely(!d->discards.st)) { + d->discards.st = rrdset_create_localhost( + "net_drops", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.drops", + "Interface Drops", + "drops/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 4, + update_every, + RRDSET_TYPE_LINE); + + rrdset_flag_set(d->discards.st, RRDSET_FLAG_DETAIL); + + add_interface_labels(d->discards.st, windows_shared_buffer, physical); + + d->discards.rd_received = rrddim_add(d->discards.st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + d->discards.rd_outbound = rrddim_add(d->discards.st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(d->discards.st, d->discards.rd_received, (collected_number)d->discards.received.current.Data); + rrddim_set_by_pointer(d->discards.st, d->discards.rd_outbound, (collected_number)d->discards.outbound.current.Data); + rrdset_done(d->discards.st); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->queue.length)) { + if (unlikely(!d->queue.st)) { + d->queue.st = rrdset_create_localhost( + "net_queue_length", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.queue_length", + "Interface Output Queue Length", + "packets", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 5, + update_every, + RRDSET_TYPE_LINE); + + rrdset_flag_set(d->queue.st, RRDSET_FLAG_DETAIL); + + add_interface_labels(d->queue.st, windows_shared_buffer, physical); + + d->queue.rd = rrddim_add(d->queue.st, "length", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(d->queue.st, d->queue.rd, (collected_number)d->queue.length.current.Data); + rrdset_done(d->queue.st); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->rsc.connections)) { + if (unlikely(!d->rsc.st_connections)) { + d->rsc.st_connections = rrdset_create_localhost( + "net_rsc_connections", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.rsc_connections", + "Active TCP Connections Offloaded by RSC", + "connections", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 6, + update_every, + RRDSET_TYPE_LINE); + + rrdset_flag_set(d->rsc.st_connections, RRDSET_FLAG_DETAIL); + + add_interface_labels(d->rsc.st_connections, windows_shared_buffer, physical); + + d->rsc.rd_connections = rrddim_add(d->rsc.st_connections, "connections", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(d->rsc.st_connections, d->rsc.rd_connections, (collected_number)d->rsc.connections.current.Data); + rrdset_done(d->rsc.st_connections); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->rsc.packets)) { + if (unlikely(!d->rsc.st_packets)) { + d->rsc.st_packets = rrdset_create_localhost( + "net_rsc_packets", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.rsc_packets", + "TCP RSC Coalesced Packets", + "packets/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 7, + update_every, + RRDSET_TYPE_LINE); + + rrdset_flag_set(d->rsc.st_packets, RRDSET_FLAG_DETAIL); + + add_interface_labels(d->rsc.st_packets, windows_shared_buffer, physical); + + d->rsc.rd_packets = rrddim_add(d->rsc.st_packets, "packets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(d->rsc.st_packets, d->rsc.rd_packets, (collected_number)d->rsc.packets.current.Data); + rrdset_done(d->rsc.st_packets); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->rsc.exceptions)) { + if (unlikely(!d->rsc.st_exceptions)) { + d->rsc.st_exceptions = rrdset_create_localhost( + "net_rsc_exceptions", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.rsc_exceptions", + "TCP RSC Exceptions", + "exceptions/s", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 8, + update_every, + RRDSET_TYPE_LINE); + + rrdset_flag_set(d->rsc.st_exceptions, RRDSET_FLAG_DETAIL); + + add_interface_labels(d->rsc.st_exceptions, windows_shared_buffer, physical); + + d->rsc.rd_exceptions = rrddim_add(d->rsc.st_exceptions, "exceptions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + } + + rrddim_set_by_pointer(d->rsc.st_exceptions, d->rsc.rd_exceptions, (collected_number)d->rsc.exceptions.current.Data); + rrdset_done(d->rsc.st_exceptions); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->rsc.average_packet_size)) { + if (unlikely(!d->rsc.st_average_packet_size)) { + d->rsc.st_average_packet_size = rrdset_create_localhost( + "net_rsc_average_packet_size", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.rsc_average_packet_size", + "TCP RSC Average Packet Size", + "bytes", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 9, + update_every, + RRDSET_TYPE_LINE); + + rrdset_flag_set(d->rsc.st_average_packet_size, RRDSET_FLAG_DETAIL); + + add_interface_labels(d->rsc.st_average_packet_size, windows_shared_buffer, physical); + + d->rsc.rd_average_packet_size = rrddim_add(d->rsc.st_average_packet_size, "average", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(d->rsc.st_average_packet_size, d->rsc.rd_average_packet_size, (collected_number)d->rsc.average_packet_size.current.Data); + rrdset_done(d->rsc.st_average_packet_size); + } + + if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->chimney.connections)) { + if (unlikely(!d->chimney.st)) { + d->chimney.st = rrdset_create_localhost( + "net_chimney_connections", + windows_shared_buffer, + NULL, + windows_shared_buffer, + "net.chimney_connections", + "Active TCP Connections Offloaded with Chimney", + "connections", + PLUGIN_WINDOWS_NAME, + "PerflibNetwork", + NETDATA_CHART_PRIO_FIRST_NET_IFACE + 10, + update_every, + RRDSET_TYPE_LINE); + + rrdset_flag_set(d->chimney.st, RRDSET_FLAG_DETAIL); + + add_interface_labels(d->chimney.st, windows_shared_buffer, physical); + + d->chimney.rd = rrddim_add(d->chimney.st, "connections", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); + } + + rrddim_set_by_pointer(d->chimney.st, d->chimney.rd, (collected_number)d->chimney.connections.current.Data); + rrdset_done(d->chimney.st); + } } if(physical) { @@ -671,6 +1013,19 @@ static bool do_network_interface(PERF_DATA_BLOCK *pDataBlock, int update_every, rrdset_done(st); } + // cleanup + { + struct network_interface *d; + dfe_start_write(dict, d) { + if(d->last_collected < now_ut) { + network_interface_cleanup(d); + dictionary_del(dict, d_dfe.name); + } + } + dfe_done(d); + dictionary_garbage_collect(dict); + } + return true; } @@ -689,8 +1044,9 @@ int do_PerflibNetwork(int update_every, usec_t dt __maybe_unused) { PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id); if(!pDataBlock) return -1; - do_network_interface(pDataBlock, update_every, true); - do_network_interface(pDataBlock, update_every, false); + usec_t now_ut = now_monotonic_usec(); + do_network_interface(pDataBlock, update_every, true, now_ut); + do_network_interface(pDataBlock, update_every, false, now_ut); struct network_protocol *tcp4 = NULL, *tcp6 = NULL; for(size_t i = 0; networks[i].protocol ;i++) { diff --git a/src/collectors/windows.plugin/windows_plugin.c b/src/collectors/windows.plugin/windows_plugin.c index 70a07eed35e543..6f53c738b1d694 100644 --- a/src/collectors/windows.plugin/windows_plugin.c +++ b/src/collectors/windows.plugin/windows_plugin.c @@ -13,23 +13,25 @@ static struct proc_module { } win_modules[] = { // system metrics - {.name = "GetSystemUptime", .dim = "GetSystemUptime", .enabled = CONFIG_BOOLEAN_YES, .func = do_GetSystemUptime}, - {.name = "GetSystemRAM", .dim = "GetSystemRAM", .enabled = CONFIG_BOOLEAN_YES, .func = do_GetSystemRAM}, + {.name = "GetSystemUptime", .dim = "GetSystemUptime", .enabled = CONFIG_BOOLEAN_YES, .func = do_GetSystemUptime}, + {.name = "GetSystemRAM", .dim = "GetSystemRAM", .enabled = CONFIG_BOOLEAN_YES, .func = do_GetSystemRAM}, // the same is provided by PerflibProcessor, with more detailed analysis - //{.name = "GetSystemCPU", .dim = "GetSystemCPU", .enabled = CONFIG_BOOLEAN_YES, .func = do_GetSystemCPU}, + //{.name = "GetSystemCPU", .dim = "GetSystemCPU", .enabled = CONFIG_BOOLEAN_YES, .func = do_GetSystemCPU}, - {.name = "PerflibProcesses", .dim = "PerflibProcesses", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibProcesses}, - {.name = "PerflibProcessor", .dim = "PerflibProcessor", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibProcessor}, - {.name = "PerflibMemory", .dim = "PerflibMemory", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibMemory}, - {.name = "PerflibStorage", .dim = "PerflibStorage", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibStorage}, - {.name = "PerflibNetwork", .dim = "PerflibNetwork", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibNetwork}, - {.name = "PerflibObjects", .dim = "PerflibObjects", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibObjects}, + {.name = "PerflibProcesses", .dim = "PerflibProcesses", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibProcesses}, + {.name = "PerflibProcessor", .dim = "PerflibProcessor", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibProcessor}, + {.name = "PerflibMemory", .dim = "PerflibMemory", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibMemory}, + {.name = "PerflibStorage", .dim = "PerflibStorage", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibStorage}, + {.name = "PerflibNetwork", .dim = "PerflibNetwork", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibNetwork}, + {.name = "PerflibObjects", .dim = "PerflibObjects", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibObjects}, - {.name = "PerflibThermalZone", .dim = "PerflibThermalZone", .enabled = CONFIG_BOOLEAN_NO, .func = do_PerflibThermalZone}, + {.name = "PerflibThermalZone", .dim = "PerflibThermalZone", .enabled = CONFIG_BOOLEAN_NO, .func = do_PerflibThermalZone}, - {.name = "PerflibWebService", .dim = "PerflibWebService", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibWebService}, - {.name = "PerflibMSSQL", .dim = "PerflibMSSQL", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibMSSQL}, + {.name = "PerflibWebService", .dim = "PerflibWebService", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibWebService}, + {.name = "PerflibMSSQL", .dim = "PerflibMSSQL", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibMSSQL}, + + {.name = "PerflibNetFramework", .dim = "PerflibNetFramework", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibNetFramework}, // the terminator of this array {.name = NULL, .dim = NULL, .func = NULL} diff --git a/src/collectors/windows.plugin/windows_plugin.h b/src/collectors/windows.plugin/windows_plugin.h index aee6e526b65ae7..1f71ee7428d775 100644 --- a/src/collectors/windows.plugin/windows_plugin.h +++ b/src/collectors/windows.plugin/windows_plugin.h @@ -27,6 +27,7 @@ int do_PerflibObjects(int update_every, usec_t dt); int do_PerflibThermalZone(int update_every, usec_t dt); int do_PerflibWebService(int update_every, usec_t dt); int do_PerflibMSSQL(int update_every, usec_t dt); +int do_PerflibNetFramework(int update_every, usec_t dt); enum PERFLIB_PRIO { PRIO_WEBSITE_IIS_TRAFFIC = 21000, // PRIO selected, because APPS is using 20YYY @@ -74,7 +75,28 @@ enum PERFLIB_PRIO { PRIO_MSSQL_LOCKS_WAIT, PRIO_MSSQL_LOCKS_DEADLOCK, - PRIO_MSSQL_SQL_ERRORS + PRIO_MSSQL_SQL_ERRORS, + + PRIO_NETFRAMEWORK_CLR_EXCEPTION_THROWN, + PRIO_NETFRAMEWORK_CLR_EXCEPTION_FILTERS, + PRIO_NETFRAMEWORK_CLR_EXCEPTION_FINALLYS, + PRIO_NETFRAMEWORK_CLR_EXCEPTION_THROW_TO_CATCH_DEPTH, + + PRIO_NETFRAMEWORK_CLR_INTEROP_CCW, + PRIO_NETFRAMEWORK_CLR_INTEROP_MARSHALLING, + PRIO_NETFRAMEWORK_CLR_INTEROP_STUBS_CREATED, + + PRIO_NETFRAMEWORK_CLR_JIT_METHODS, + PRIO_NETFRAMEWORK_CLR_JIT_TIME, + PRIO_NETFRAMEWORK_CLR_JIT_STANDARD_FAILURES, + PRIO_NETFRAMEWORK_CLR_JIT_IL_BYTES, + + PRIO_NETFRAMEWORK_CLR_LOADING_HEAP_SIZE, + PRIO_NETFRAMEWORK_CLR_LOADING_APP_DOMAINS_LOADED, + PRIO_NETFRAMEWORK_CLR_LOADING_APP_DOMAINS_UNLOADED, + PRIO_NETFRAMEWORK_CLR_LOADING_ASSEMBLIES_LOADED, + PRIO_NETFRAMEWORK_CLR_LOADING_CLASSES_LOADED, + PRIO_NETFRAMEWORK_CLR_LOADING_CLASS_LOAD_FAILURE }; #endif //NETDATA_WINDOWS_PLUGIN_H diff --git a/src/daemon/buildinfo.c b/src/daemon/buildinfo.c index 905009a52b8399..3cbbe903502be4 100644 --- a/src/daemon/buildinfo.c +++ b/src/daemon/buildinfo.c @@ -1331,7 +1331,7 @@ char *get_value_from_key(char *buffer, char *key) { return s; } -void get_install_type(char **install_type, char **prebuilt_arch, char **prebuilt_dist) { +void get_install_type(char **install_type, char **prebuilt_arch __maybe_unused, char **prebuilt_dist __maybe_unused) { #ifndef OS_WINDOWS char *install_type_filename; diff --git a/src/daemon/main.c b/src/daemon/main.c index 98f32a88d2974b..a3ceaffd5b365c 100644 --- a/src/daemon/main.c +++ b/src/daemon/main.c @@ -847,34 +847,60 @@ static void log_init(void) { nd_log_set_priority_level(config_get(CONFIG_SECTION_LOGS, "level", netdata_log_level)); char filename[FILENAME_MAX + 1]; + char* os_default_method = NULL; +#if defined(OS_LINUX) + os_default_method = is_stderr_connected_to_journal() /* || nd_log_journal_socket_available() */ ? "journal" : NULL; +#elif defined(OS_WINDOWS) +#if defined(HAVE_ETW) + os_default_method = "etw"; +#elif defined(HAVE_WEL) + os_default_method = "wel"; +#endif +#endif + +#if defined(OS_WINDOWS) + // on windows, debug log goes to windows events + snprintfz(filename, FILENAME_MAX, "%s", os_default_method); +#else snprintfz(filename, FILENAME_MAX, "%s/debug.log", netdata_configured_log_dir); +#endif + nd_log_set_user_settings(NDLS_DEBUG, config_get(CONFIG_SECTION_LOGS, "debug", filename)); - bool with_journal = is_stderr_connected_to_journal() /* || nd_log_journal_socket_available() */; - if(with_journal) - snprintfz(filename, FILENAME_MAX, "journal"); + if(os_default_method) + snprintfz(filename, FILENAME_MAX, "%s", os_default_method); else snprintfz(filename, FILENAME_MAX, "%s/daemon.log", netdata_configured_log_dir); nd_log_set_user_settings(NDLS_DAEMON, config_get(CONFIG_SECTION_LOGS, "daemon", filename)); - if(with_journal) - snprintfz(filename, FILENAME_MAX, "journal"); + if(os_default_method) + snprintfz(filename, FILENAME_MAX, "%s", os_default_method); else snprintfz(filename, FILENAME_MAX, "%s/collector.log", netdata_configured_log_dir); nd_log_set_user_settings(NDLS_COLLECTORS, config_get(CONFIG_SECTION_LOGS, "collector", filename)); +#if defined(OS_WINDOWS) + // on windows, access log goes to windows events + snprintfz(filename, FILENAME_MAX, "%s", os_default_method); +#else snprintfz(filename, FILENAME_MAX, "%s/access.log", netdata_configured_log_dir); +#endif nd_log_set_user_settings(NDLS_ACCESS, config_get(CONFIG_SECTION_LOGS, "access", filename)); - if(with_journal) - snprintfz(filename, FILENAME_MAX, "journal"); + if(os_default_method) + snprintfz(filename, FILENAME_MAX, "%s", os_default_method); else snprintfz(filename, FILENAME_MAX, "%s/health.log", netdata_configured_log_dir); nd_log_set_user_settings(NDLS_HEALTH, config_get(CONFIG_SECTION_LOGS, "health", filename)); aclklog_enabled = config_get_boolean(CONFIG_SECTION_CLOUD, "conversation log", CONFIG_BOOLEAN_NO); if (aclklog_enabled) { +#if defined(OS_WINDOWS) + // on windows, aclk log goes to windows events + snprintfz(filename, FILENAME_MAX, "%s", os_default_method); +#else snprintfz(filename, FILENAME_MAX, "%s/aclk.log", netdata_configured_log_dir); +#endif nd_log_set_user_settings(NDLS_ACLK, config_get(CONFIG_SECTION_CLOUD, "conversation log file", filename)); } @@ -2085,12 +2111,6 @@ int netdata_main(int argc, char **argv) { health_set_silencers_filename(); health_initialize_global_silencers(); -// // -------------------------------------------------------------------- -// // Initialize ML configuration -// -// delta_startup_time("initialize ML"); -// ml_init(); - // -------------------------------------------------------------------- // setup process signals diff --git a/src/daemon/winsvc.cc b/src/daemon/winsvc.cc index 23ade2895e780d..c10addac68d868 100644 --- a/src/daemon/winsvc.cc +++ b/src/daemon/winsvc.cc @@ -74,7 +74,7 @@ static HANDLE CreateEventHandle(const char *msg) if (!h) { - netdata_service_log(msg); + netdata_service_log("%s", msg); if (!ReportSvcStatus(SERVICE_STOPPED, GetLastError(), 1000, 0)) { @@ -219,7 +219,11 @@ static bool update_path() { int main(int argc, char *argv[]) { +#if defined(OS_WINDOWS) && defined(RUN_UNDER_CLION) + bool tty = true; +#else bool tty = isatty(fileno(stdin)) == 1; +#endif if (!update_path()) { return 1; diff --git a/src/database/contexts/instance.c b/src/database/contexts/instance.c index 5d841bc82f07fd..ee6a906e7dfe88 100644 --- a/src/database/contexts/instance.c +++ b/src/database/contexts/instance.c @@ -37,6 +37,11 @@ inline STRING *rrdinstance_acquired_units_dup(RRDINSTANCE_ACQUIRED *ria) { inline RRDLABELS *rrdinstance_acquired_labels(RRDINSTANCE_ACQUIRED *ria) { RRDINSTANCE *ri = rrdinstance_acquired_value(ria); + if (rrd_flag_check(ri, RRD_FLAG_OWN_LABELS | RRD_FLAG_DEMAND_LABELS)) { + rrd_flag_clear(ri, RRD_FLAG_DEMAND_LABELS); + load_instance_labels_on_demand(&ri->uuid, ri); + rrdinstance_trigger_updates(ri, __FUNCTION__ ); + } return ri->rrdlabels; } @@ -101,11 +106,11 @@ static void rrdinstance_insert_callback(const DICTIONARY_ITEM *item __maybe_unus if(ri->rrdset) { ri->rrdlabels = ri->rrdset->rrdlabels; - ri->flags &= ~RRD_FLAG_OWN_LABELS; // no need of atomics at the constructor + ri->flags &= ~(RRD_FLAG_OWN_LABELS| RRD_FLAG_DEMAND_LABELS); // no need of atomics at the constructor } else { ri->rrdlabels = rrdlabels_create(); - ri->flags |= RRD_FLAG_OWN_LABELS; // no need of atomics at the constructor + ri->flags |= (RRD_FLAG_OWN_LABELS | RRD_FLAG_DEMAND_LABELS); // no need of atomics at the constructor } if(ri->rrdset) { @@ -213,12 +218,12 @@ static bool rrdinstance_conflict_callback(const DICTIONARY_ITEM *item __maybe_un if(ri->rrdset && rrd_flag_check(ri, RRD_FLAG_OWN_LABELS)) { RRDLABELS *old = ri->rrdlabels; ri->rrdlabels = ri->rrdset->rrdlabels; - rrd_flag_clear(ri, RRD_FLAG_OWN_LABELS); + rrd_flag_clear(ri, RRD_FLAG_OWN_LABELS| RRD_FLAG_DEMAND_LABELS); rrdlabels_destroy(old); } else if(!ri->rrdset && !rrd_flag_check(ri, RRD_FLAG_OWN_LABELS)) { ri->rrdlabels = rrdlabels_create(); - rrd_flag_set(ri, RRD_FLAG_OWN_LABELS); + rrd_flag_set(ri, RRD_FLAG_OWN_LABELS | RRD_FLAG_DEMAND_LABELS); } } @@ -431,8 +436,9 @@ inline void rrdinstance_rrdset_is_freed(RRDSET *st) { if(!rrd_flag_check(ri, RRD_FLAG_OWN_LABELS)) { ri->rrdlabels = rrdlabels_create(); - rrdlabels_copy(ri->rrdlabels, st->rrdlabels); - rrd_flag_set(ri, RRD_FLAG_OWN_LABELS); + // Do not load copy labels, just load on demand + //rrdlabels_copy(ri->rrdlabels, st->rrdlabels); + rrd_flag_set(ri, RRD_FLAG_OWN_LABELS | RRD_FLAG_DEMAND_LABELS); } ri->rrdset = NULL; diff --git a/src/database/contexts/internal.h b/src/database/contexts/internal.h index 270c59649663c2..2c69077e55993a 100644 --- a/src/database/contexts/internal.h +++ b/src/database/contexts/internal.h @@ -39,25 +39,26 @@ typedef enum __attribute__ ((__packed__)) { RRD_FLAG_UPDATED = (1 << 2), // this object has updates to propagate RRD_FLAG_ARCHIVED = (1 << 3), // this object is not currently being collected RRD_FLAG_OWN_LABELS = (1 << 4), // this instance has its own labels - not linked to an RRDSET - RRD_FLAG_LIVE_RETENTION = (1 << 5), // we have got live retention from the database - RRD_FLAG_QUEUED_FOR_HUB = (1 << 6), // this context is currently queued to be dispatched to hub - RRD_FLAG_QUEUED_FOR_PP = (1 << 7), // this context is currently queued to be post-processed - RRD_FLAG_HIDDEN = (1 << 8), // don't expose this to the hub or the API - - RRD_FLAG_UPDATE_REASON_TRIGGERED = (1 << 9), // the update was triggered by the child object - RRD_FLAG_UPDATE_REASON_LOAD_SQL = (1 << 10), // this object has just been loaded from SQL - RRD_FLAG_UPDATE_REASON_NEW_OBJECT = (1 << 11), // this object has just been created - RRD_FLAG_UPDATE_REASON_UPDATED_OBJECT = (1 << 12), // we received an update on this object - RRD_FLAG_UPDATE_REASON_CHANGED_LINKING = (1 << 13), // an instance or a metric switched RRDSET or RRDDIM - RRD_FLAG_UPDATE_REASON_CHANGED_METADATA = (1 << 14), // this context or instance changed uuid, name, units, title, family, chart type, priority, update every, rrd changed flags - RRD_FLAG_UPDATE_REASON_ZERO_RETENTION = (1 << 15), // this object has no retention - RRD_FLAG_UPDATE_REASON_CHANGED_FIRST_TIME_T = (1 << 16), // this object changed its oldest time in the db - RRD_FLAG_UPDATE_REASON_CHANGED_LAST_TIME_T = (1 << 17), // this object change its latest time in the db - RRD_FLAG_UPDATE_REASON_STOPPED_BEING_COLLECTED = (1 << 18), // this object has stopped being collected - RRD_FLAG_UPDATE_REASON_STARTED_BEING_COLLECTED = (1 << 19), // this object has started being collected - RRD_FLAG_UPDATE_REASON_DISCONNECTED_CHILD = (1 << 20), // this context belongs to a host that just disconnected - RRD_FLAG_UPDATE_REASON_UNUSED = (1 << 21), // this context is not used anymore - RRD_FLAG_UPDATE_REASON_DB_ROTATION = (1 << 22), // this context changed because of a db rotation + RRD_FLAG_DEMAND_LABELS = (1 << 5), // this instance should load labels on demand + RRD_FLAG_LIVE_RETENTION = (1 << 6), // we have got live retention from the database + RRD_FLAG_QUEUED_FOR_HUB = (1 << 7), // this context is currently queued to be dispatched to hub + RRD_FLAG_QUEUED_FOR_PP = (1 << 8), // this context is currently queued to be post-processed + RRD_FLAG_HIDDEN = (1 << 9), // don't expose this to the hub or the API + + RRD_FLAG_UPDATE_REASON_TRIGGERED = (1 << 10), // the update was triggered by the child object + RRD_FLAG_UPDATE_REASON_LOAD_SQL = (1 << 11), // this object has just been loaded from SQL + RRD_FLAG_UPDATE_REASON_NEW_OBJECT = (1 << 12), // this object has just been created + RRD_FLAG_UPDATE_REASON_UPDATED_OBJECT = (1 << 13), // we received an update on this object + RRD_FLAG_UPDATE_REASON_CHANGED_LINKING = (1 << 14), // an instance or a metric switched RRDSET or RRDDIM + RRD_FLAG_UPDATE_REASON_CHANGED_METADATA = (1 << 15), // this context or instance changed uuid, name, units, title, family, chart type, priority, update every, rrd changed flags + RRD_FLAG_UPDATE_REASON_ZERO_RETENTION = (1 << 16), // this object has no retention + RRD_FLAG_UPDATE_REASON_CHANGED_FIRST_TIME_T = (1 << 17), // this object changed its oldest time in the db + RRD_FLAG_UPDATE_REASON_CHANGED_LAST_TIME_T = (1 << 18), // this object change its latest time in the db + RRD_FLAG_UPDATE_REASON_STOPPED_BEING_COLLECTED = (1 << 19), // this object has stopped being collected + RRD_FLAG_UPDATE_REASON_STARTED_BEING_COLLECTED = (1 << 20), // this object has started being collected + RRD_FLAG_UPDATE_REASON_DISCONNECTED_CHILD = (1 << 21), // this context belongs to a host that just disconnected + RRD_FLAG_UPDATE_REASON_UNUSED = (1 << 22), // this context is not used anymore + RRD_FLAG_UPDATE_REASON_DB_ROTATION = (1 << 23), // this context changed because of a db rotation RRD_FLAG_MERGED_COLLECTED_RI_TO_RC = (1 << 29), @@ -354,6 +355,7 @@ static inline void rrdcontext_release(RRDCONTEXT_ACQUIRED *rca) { // ---------------------------------------------------------------------------- // Forward definitions +void load_instance_labels_on_demand(nd_uuid_t *uuid, void *data); void rrdcontext_recalculate_context_retention(RRDCONTEXT *rc, RRD_FLAGS reason, bool worker_jobs); void rrdcontext_recalculate_host_retention(RRDHOST *host, RRD_FLAGS reason, bool worker_jobs); diff --git a/src/database/contexts/worker.c b/src/database/contexts/worker.c index 53d17492e3d49a..31451a3ed37463 100644 --- a/src/database/contexts/worker.c +++ b/src/database/contexts/worker.c @@ -24,6 +24,10 @@ static void rrdinstance_load_clabel(SQL_CLABEL_DATA *sld, void *data) { rrdlabels_add(ri->rrdlabels, sld->label_key, sld->label_value, sld->label_source); } +void load_instance_labels_on_demand(nd_uuid_t *uuid, void *data) { + ctx_get_label_list(uuid, rrdinstance_load_clabel, data); +} + static void rrdinstance_load_dimension(SQL_DIMENSION_DATA *sd, void *data) { RRDINSTANCE *ri = data; @@ -73,7 +77,6 @@ static void rrdinstance_load_chart_callback(SQL_CHART_DATA *sc, void *data) { RRDINSTANCE *ri = rrdinstance_acquired_value(ria); ctx_get_dimension_list(&ri->uuid, rrdinstance_load_dimension, ri); - ctx_get_label_list(&ri->uuid, rrdinstance_load_clabel, ri); rrdinstance_trigger_updates(ri, __FUNCTION__ ); rrdinstance_release(ria); rrdcontext_release(rca); diff --git a/src/database/engine/cache.c b/src/database/engine/cache.c index a03df4676f0a71..6346377cd4d9ca 100644 --- a/src/database/engine/cache.c +++ b/src/database/engine/cache.c @@ -1786,7 +1786,7 @@ PGC *pgc_create(const char *name, sizeof(PGC_PAGE) + cache->config.additional_bytes_per_page, 0, 16384, - aral_statistics(pgc_section_pages_aral), + aral_get_statistics(pgc_section_pages_aral), NULL, NULL, false, false); } #endif @@ -1797,7 +1797,7 @@ PGC *pgc_create(const char *name, } struct aral_statistics *pgc_aral_statistics(void) { - return aral_statistics(pgc_section_pages_aral); + return aral_get_statistics(pgc_section_pages_aral); } size_t pgc_aral_structures(void) { diff --git a/src/database/rrdlabels.c b/src/database/rrdlabels.c index 4af1536369f5d4..0edab6e7e715ff 100644 --- a/src/database/rrdlabels.c +++ b/src/database/rrdlabels.c @@ -437,6 +437,7 @@ void rrdlabels_get_value_to_buffer_or_unset(RRDLABELS *labels, BUFFER *wb, const RRDLABEL *lb; RRDLABEL_SRC ls; + bool set = false; lfe_start_read(labels, lb, ls) { if (lb->index.key == this_key) { @@ -444,10 +445,15 @@ void rrdlabels_get_value_to_buffer_or_unset(RRDLABELS *labels, BUFFER *wb, const buffer_strcat(wb, string2str(lb->index.value)); else buffer_strcat(wb, unset); + set = true; break; } } lfe_done(labels); + + if(!set) + buffer_strcat(wb, unset); + string_freez(this_key); } diff --git a/src/database/rrdset.c b/src/database/rrdset.c index d9583a3cd1fe14..396f66835bf6d4 100644 --- a/src/database/rrdset.c +++ b/src/database/rrdset.c @@ -732,6 +732,8 @@ void rrdset_get_retention_of_tier_for_collected_chart(RRDSET *st, time_t *first_ } inline void rrdset_is_obsolete___safe_from_collector_thread(RRDSET *st) { + if(!st) return; + rrdset_pluginsd_receive_unslot(st); if(unlikely(!(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) { diff --git a/src/database/sqlite/sqlite3.c b/src/database/sqlite/sqlite3.c index 61cfb904cf63c6..fa796f86117d90 100644 --- a/src/database/sqlite/sqlite3.c +++ b/src/database/sqlite/sqlite3.c @@ -1,6 +1,6 @@ /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.45.3. By combining all the individual C code files into this +** version 3.46.1. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -18,7 +18,7 @@ ** separate file. This file contains only code for the core SQLite library. ** ** The content in this amalgamation comes from Fossil check-in -** 8653b758870e6ef0c98d46b3ace27849054a. +** c9c2ab54ba1f5f46360f1b4f35d849cd3f08. */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wimplicit-fallthrough" @@ -467,9 +467,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.45.3" -#define SQLITE_VERSION_NUMBER 3045003 -#define SQLITE_SOURCE_ID "2024-04-15 13:34:05 8653b758870e6ef0c98d46b3ace27849054af85da891eb121e9aaa537f1e8355" +#define SQLITE_VERSION "3.46.1" +#define SQLITE_VERSION_NUMBER 3046001 +#define SQLITE_SOURCE_ID "2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -1085,11 +1085,11 @@ struct sqlite3_file { ** ** xLock() upgrades the database file lock. In other words, xLock() moves the ** database file lock in the direction NONE toward EXCLUSIVE. The argument to -** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never +** xLock() is always one of SHARED, RESERVED, PENDING, or EXCLUSIVE, never ** SQLITE_LOCK_NONE. If the database file lock is already at or above the ** requested lock, then the call to xLock() is a no-op. ** xUnlock() downgrades the database file lock to either SHARED or NONE. -* If the lock is already at or below the requested lock state, then the call +** If the lock is already at or below the requested lock state, then the call ** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, @@ -3626,8 +3626,8 @@ SQLITE_API int sqlite3_set_authorizer( #define SQLITE_RECURSIVE 33 /* NULL NULL */ /* -** CAPI3REF: Tracing And Profiling Functions -** METHOD: sqlite3 +** CAPI3REF: Deprecated Tracing And Profiling Functions +** DEPRECATED ** ** These routines are deprecated. Use the [sqlite3_trace_v2()] interface ** instead of the routines described here. @@ -7208,6 +7208,12 @@ SQLITE_API int sqlite3_autovacuum_pages( ** The exceptions defined in this paragraph might change in a future ** release of SQLite. ** +** Whether the update hook is invoked before or after the +** corresponding change is currently unspecified and may differ +** depending on the type of change. Do not rely on the order of the +** hook call with regards to the final result of the operation which +** triggers the hook. +** ** The update hook implementation must not do anything that will modify ** the database connection that invoked the update hook. Any actions ** to modify the database connection must be deferred until after the @@ -8678,7 +8684,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); ** The sqlite3_keyword_count() interface returns the number of distinct ** keywords understood by SQLite. ** -** The sqlite3_keyword_name(N,Z,L) interface finds the N-th keyword and +** The sqlite3_keyword_name(N,Z,L) interface finds the 0-based N-th keyword and ** makes *Z point to that keyword expressed as UTF8 and writes the number ** of bytes in the keyword into *L. The string that *Z points to is not ** zero-terminated. The sqlite3_keyword_name(N,Z,L) routine returns @@ -10257,24 +10263,45 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); **
  • ** ^(If the sqlite3_vtab_distinct() interface returns 2, that means ** that the query planner does not need the rows returned in any particular -** order, as long as rows with the same values in all "aOrderBy" columns -** are adjacent.)^ ^(Furthermore, only a single row for each particular -** combination of values in the columns identified by the "aOrderBy" field -** needs to be returned.)^ ^It is always ok for two or more rows with the same -** values in all "aOrderBy" columns to be returned, as long as all such rows -** are adjacent. ^The virtual table may, if it chooses, omit extra rows -** that have the same value for all columns identified by "aOrderBy". -** ^However omitting the extra rows is optional. +** order, as long as rows with the same values in all columns identified +** by "aOrderBy" are adjacent.)^ ^(Furthermore, when two or more rows +** contain the same values for all columns identified by "colUsed", all but +** one such row may optionally be omitted from the result.)^ +** The virtual table is not required to omit rows that are duplicates +** over the "colUsed" columns, but if the virtual table can do that without +** too much extra effort, it could potentially help the query to run faster. ** This mode is used for a DISTINCT query. **

  • -** ^(If the sqlite3_vtab_distinct() interface returns 3, that means -** that the query planner needs only distinct rows but it does need the -** rows to be sorted.)^ ^The virtual table implementation is free to omit -** rows that are identical in all aOrderBy columns, if it wants to, but -** it is not required to omit any rows. This mode is used for queries +** ^(If the sqlite3_vtab_distinct() interface returns 3, that means the +** virtual table must return rows in the order defined by "aOrderBy" as +** if the sqlite3_vtab_distinct() interface had returned 0. However if +** two or more rows in the result have the same values for all columns +** identified by "colUsed", then all but one such row may optionally be +** omitted.)^ Like when the return value is 2, the virtual table +** is not required to omit rows that are duplicates over the "colUsed" +** columns, but if the virtual table can do that without +** too much extra effort, it could potentially help the query to run faster. +** This mode is used for queries ** that have both DISTINCT and ORDER BY clauses. ** ** +**

    The following table summarizes the conditions under which the +** virtual table is allowed to set the "orderByConsumed" flag based on +** the value returned by sqlite3_vtab_distinct(). This table is a +** restatement of the previous four paragraphs: +** +** +** +**
    sqlite3_vtab_distinct() return value +** Rows are returned in aOrderBy order +** Rows with the same value in all aOrderBy columns are adjacent +** Duplicates over all colUsed columns may be omitted +**
    0yesyesno +**
    1noyesno +**
    2noyesyes +**
    3yesyesyes +**
    +** ** ^For the purposes of comparing virtual table output values to see if the ** values are same value for sorting purposes, two NULL values are considered ** to be the same. In other words, the comparison operator is "IS" @@ -12319,6 +12346,30 @@ SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const c */ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); +/* +** CAPI3REF: Add A Single Change To A Changegroup +** METHOD: sqlite3_changegroup +** +** This function adds the single change currently indicated by the iterator +** passed as the second argument to the changegroup object. The rules for +** adding the change are just as described for [sqlite3changegroup_add()]. +** +** If the change is successfully added to the changegroup, SQLITE_OK is +** returned. Otherwise, an SQLite error code is returned. +** +** The iterator must point to a valid entry when this function is called. +** If it does not, SQLITE_ERROR is returned and no change is added to the +** changegroup. Additionally, the iterator must not have been opened with +** the SQLITE_CHANGESETAPPLY_INVERT flag. In this case SQLITE_ERROR is also +** returned. +*/ +SQLITE_API int sqlite3changegroup_add_change( + sqlite3_changegroup*, + sqlite3_changeset_iter* +); + + + /* ** CAPI3REF: Obtain A Composite Changeset From A Changegroup ** METHOD: sqlite3_changegroup @@ -13123,8 +13174,8 @@ struct Fts5PhraseIter { ** EXTENSION API FUNCTIONS ** ** xUserData(pFts): -** Return a copy of the context pointer the extension function was -** registered with. +** Return a copy of the pUserData pointer passed to the xCreateFunction() +** API when the extension function was registered. ** ** xColumnTotalSize(pFts, iCol, pnToken): ** If parameter iCol is less than zero, set output variable *pnToken @@ -14322,6 +14373,8 @@ struct fts5_api { # define SQLITE_OMIT_ALTERTABLE #endif +#define SQLITE_DIGIT_SEPARATOR '_' + /* ** Return true (non-zero) if the input is an integer that is too large ** to fit in 32-bits. This macro is used inside of various testcase() @@ -14614,8 +14667,8 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_TRUEFALSE 170 #define TK_ISNOT 171 #define TK_FUNCTION 172 -#define TK_UMINUS 173 -#define TK_UPLUS 174 +#define TK_UPLUS 173 +#define TK_UMINUS 174 #define TK_TRUTH 175 #define TK_REGISTER 176 #define TK_VECTOR 177 @@ -14624,8 +14677,9 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_ASTERISK 180 #define TK_SPAN 181 #define TK_ERROR 182 -#define TK_SPACE 183 -#define TK_ILLEGAL 184 +#define TK_QNUMBER 183 +#define TK_SPACE 184 +#define TK_ILLEGAL 185 /************** End of parse.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -14887,7 +14941,7 @@ typedef INT16_TYPE LogEst; # define SQLITE_PTRSIZE __SIZEOF_POINTER__ # elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \ defined(_M_ARM) || defined(__arm__) || defined(__x86) || \ - (defined(__APPLE__) && defined(__POWERPC__)) || \ + (defined(__APPLE__) && defined(__ppc__)) || \ (defined(__TOS_AIX__) && !defined(__64BIT__)) # define SQLITE_PTRSIZE 4 # else @@ -15155,7 +15209,7 @@ SQLITE_PRIVATE u32 sqlite3WhereTrace; ** 0x00000010 Display sqlite3_index_info xBestIndex calls ** 0x00000020 Range an equality scan metrics ** 0x00000040 IN operator decisions -** 0x00000080 WhereLoop cost adjustements +** 0x00000080 WhereLoop cost adjustments ** 0x00000100 ** 0x00000200 Covering index decisions ** 0x00000400 OR optimization @@ -16304,6 +16358,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( sqlite3 *db, /* Database connection that is running the check */ Btree *p, /* The btree to be checked */ Pgno *aRoot, /* An array of root pages numbers for individual trees */ + sqlite3_value *aCnt, /* OUT: entry counts for each btree in aRoot[] */ int nRoot, /* Number of entries in aRoot[] */ int mxErr, /* Stop reporting errors after this many */ int *pnErr, /* OUT: Write number of errors seen to this variable */ @@ -16574,12 +16629,12 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Vacuum 5 #define OP_VFilter 6 /* jump, synopsis: iplan=r[P3] zplan='P4' */ #define OP_VUpdate 7 /* synopsis: data=r[P3@P2] */ -#define OP_Init 8 /* jump, synopsis: Start at P2 */ +#define OP_Init 8 /* jump0, synopsis: Start at P2 */ #define OP_Goto 9 /* jump */ #define OP_Gosub 10 /* jump */ -#define OP_InitCoroutine 11 /* jump */ -#define OP_Yield 12 /* jump */ -#define OP_MustBeInt 13 /* jump */ +#define OP_InitCoroutine 11 /* jump0 */ +#define OP_Yield 12 /* jump0 */ +#define OP_MustBeInt 13 /* jump0 */ #define OP_Jump 14 /* jump */ #define OP_Once 15 /* jump */ #define OP_If 16 /* jump */ @@ -16587,22 +16642,22 @@ typedef struct VdbeOpList VdbeOpList; #define OP_IsType 18 /* jump, synopsis: if typeof(P1.P3) in P5 goto P2 */ #define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */ #define OP_IfNullRow 20 /* jump, synopsis: if P1.nullRow then r[P3]=NULL, goto P2 */ -#define OP_SeekLT 21 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekLE 22 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekGE 23 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekGT 24 /* jump, synopsis: key=r[P3@P4] */ +#define OP_SeekLT 21 /* jump0, synopsis: key=r[P3@P4] */ +#define OP_SeekLE 22 /* jump0, synopsis: key=r[P3@P4] */ +#define OP_SeekGE 23 /* jump0, synopsis: key=r[P3@P4] */ +#define OP_SeekGT 24 /* jump0, synopsis: key=r[P3@P4] */ #define OP_IfNotOpen 25 /* jump, synopsis: if( !csr[P1] ) goto P2 */ #define OP_IfNoHope 26 /* jump, synopsis: key=r[P3@P4] */ #define OP_NoConflict 27 /* jump, synopsis: key=r[P3@P4] */ #define OP_NotFound 28 /* jump, synopsis: key=r[P3@P4] */ #define OP_Found 29 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekRowid 30 /* jump, synopsis: intkey=r[P3] */ +#define OP_SeekRowid 30 /* jump0, synopsis: intkey=r[P3] */ #define OP_NotExists 31 /* jump, synopsis: intkey=r[P3] */ -#define OP_Last 32 /* jump */ -#define OP_IfSmaller 33 /* jump */ +#define OP_Last 32 /* jump0 */ +#define OP_IfSizeBetween 33 /* jump */ #define OP_SorterSort 34 /* jump */ #define OP_Sort 35 /* jump */ -#define OP_Rewind 36 /* jump */ +#define OP_Rewind 36 /* jump0 */ #define OP_SorterNext 37 /* jump */ #define OP_Prev 38 /* jump */ #define OP_Next 39 /* jump */ @@ -16614,7 +16669,7 @@ typedef struct VdbeOpList VdbeOpList; #define OP_IdxGE 45 /* jump, synopsis: key=r[P3@P4] */ #define OP_RowSetRead 46 /* jump, synopsis: r[P3]=rowset(P1) */ #define OP_RowSetTest 47 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */ -#define OP_Program 48 /* jump */ +#define OP_Program 48 /* jump0 */ #define OP_FkIfZero 49 /* jump, synopsis: if fkctr[P1]==0 goto P2 */ #define OP_IsNull 50 /* jump, same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ #define OP_NotNull 51 /* jump, same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ @@ -16644,7 +16699,7 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Null 75 /* synopsis: r[P2..P3]=NULL */ #define OP_SoftNull 76 /* synopsis: r[P1]=NULL */ #define OP_Blob 77 /* synopsis: r[P2]=P4 (len=P1) */ -#define OP_Variable 78 /* synopsis: r[P2]=parameter(P1,P4) */ +#define OP_Variable 78 /* synopsis: r[P2]=parameter(P1) */ #define OP_Move 79 /* synopsis: r[P2@P3]=r[P1@P3] */ #define OP_Copy 80 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ #define OP_SCopy 81 /* synopsis: r[P2]=r[P1] */ @@ -16768,14 +16823,15 @@ typedef struct VdbeOpList VdbeOpList; #define OPFLG_OUT2 0x10 /* out2: P2 is an output */ #define OPFLG_OUT3 0x20 /* out3: P3 is an output */ #define OPFLG_NCYCLE 0x40 /* ncycle:Cycles count against P1 */ +#define OPFLG_JUMP0 0x80 /* jump0: P2 might be zero */ #define OPFLG_INITIALIZER {\ /* 0 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x41, 0x00,\ -/* 8 */ 0x01, 0x01, 0x01, 0x01, 0x03, 0x03, 0x01, 0x01,\ -/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x49, 0x49, 0x49,\ -/* 24 */ 0x49, 0x01, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49,\ -/* 32 */ 0x41, 0x01, 0x41, 0x41, 0x41, 0x01, 0x41, 0x41,\ +/* 8 */ 0x81, 0x01, 0x01, 0x81, 0x83, 0x83, 0x01, 0x01,\ +/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0xc9, 0xc9, 0xc9,\ +/* 24 */ 0xc9, 0x01, 0x49, 0x49, 0x49, 0x49, 0xc9, 0x49,\ +/* 32 */ 0xc1, 0x01, 0x41, 0x41, 0xc1, 0x01, 0x41, 0x41,\ /* 40 */ 0x41, 0x41, 0x41, 0x26, 0x26, 0x41, 0x23, 0x0b,\ -/* 48 */ 0x01, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ +/* 48 */ 0x81, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ /* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x41,\ /* 64 */ 0x01, 0x00, 0x00, 0x02, 0x02, 0x08, 0x00, 0x10,\ /* 72 */ 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10, 0x00,\ @@ -16935,6 +16991,8 @@ SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord*); SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *, SubProgram *); SQLITE_PRIVATE int sqlite3VdbeHasSubProgram(Vdbe*); +SQLITE_PRIVATE void sqlite3MemSetArrayInt64(sqlite3_value *aMem, int iIdx, i64 val); + SQLITE_PRIVATE int sqlite3NotPureFunc(sqlite3_context*); #ifdef SQLITE_ENABLE_BYTECODE_VTAB SQLITE_PRIVATE int sqlite3VdbeBytecodeVtabInit(sqlite3*); @@ -17522,6 +17580,10 @@ struct FuncDefHash { }; #define SQLITE_FUNC_HASH(C,L) (((C)+(L))%SQLITE_FUNC_HASH_SZ) +#if defined(SQLITE_USER_AUTHENTICATION) +# warning "The SQLITE_USER_AUTHENTICATION extension is deprecated. \ + See ext/userauth/user-auth.txt for details." +#endif #ifdef SQLITE_USER_AUTHENTICATION /* ** Information held in the "sqlite3" database connection object and used @@ -17825,7 +17887,7 @@ struct sqlite3 { #define SQLITE_CursorHints 0x00000400 /* Add OP_CursorHint opcodes */ #define SQLITE_Stat4 0x00000800 /* Use STAT4 data */ /* TH3 expects this value ^^^^^^^^^^ to be 0x0000800. Don't change it */ -#define SQLITE_PushDown 0x00001000 /* The push-down optimization */ +#define SQLITE_PushDown 0x00001000 /* WHERE-clause push-down opt */ #define SQLITE_SimplifyJoin 0x00002000 /* Convert LEFT JOIN to JOIN */ #define SQLITE_SkipScan 0x00004000 /* Skip-scans */ #define SQLITE_PropagateConst 0x00008000 /* The constant propagation opt */ @@ -18398,8 +18460,7 @@ struct Table { #define TF_HasStored 0x00000040 /* Has one or more STORED columns */ #define TF_HasGenerated 0x00000060 /* Combo: HasVirtual + HasStored */ #define TF_WithoutRowid 0x00000080 /* No rowid. PRIMARY KEY is the key */ -#define TF_StatsUsed 0x00000100 /* Query planner decisions affected by - ** Index.aiRowLogEst[] values */ +#define TF_MaybeReanalyze 0x00000100 /* Maybe run ANALYZE on this table */ #define TF_NoVisibleRowid 0x00000200 /* No user-visible "rowid" column */ #define TF_OOOHidden 0x00000400 /* Out-of-Order hidden columns */ #define TF_HasNotNull 0x00000800 /* Contains NOT NULL constraints */ @@ -19199,10 +19260,12 @@ struct IdList { ** ** Union member validity: ** -** u1.zIndexedBy fg.isIndexedBy && !fg.isTabFunc -** u1.pFuncArg fg.isTabFunc && !fg.isIndexedBy -** u2.pIBIndex fg.isIndexedBy && !fg.isCte -** u2.pCteUse fg.isCte && !fg.isIndexedBy +** u1.zIndexedBy fg.isIndexedBy && !fg.isTabFunc +** u1.pFuncArg fg.isTabFunc && !fg.isIndexedBy +** u1.nRow !fg.isTabFunc && !fg.isIndexedBy +** +** u2.pIBIndex fg.isIndexedBy && !fg.isCte +** u2.pCteUse fg.isCte && !fg.isIndexedBy */ struct SrcItem { Schema *pSchema; /* Schema to which this item is fixed */ @@ -19230,6 +19293,7 @@ struct SrcItem { unsigned isOn :1; /* u3.pOn was once valid and non-NULL */ unsigned isSynthUsing :1; /* u3.pUsing is synthesized from NATURAL */ unsigned isNestedFrom :1; /* pSelect is a SF_NestedFrom subquery */ + unsigned rowidUsed :1; /* The ROWID of this table is referenced */ } fg; int iCursor; /* The VDBE cursor number used to access this table */ union { @@ -19240,6 +19304,7 @@ struct SrcItem { union { char *zIndexedBy; /* Identifier from "INDEXED BY " clause */ ExprList *pFuncArg; /* Arguments to table-valued-function */ + u32 nRow; /* Number of rows in a VALUES clause */ } u1; union { Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */ @@ -19304,7 +19369,7 @@ struct SrcList { #define WHERE_AGG_DISTINCT 0x0400 /* Query is "SELECT agg(DISTINCT ...)" */ #define WHERE_ORDERBY_LIMIT 0x0800 /* ORDERBY+LIMIT on the inner loop */ #define WHERE_RIGHT_JOIN 0x1000 /* Processing a RIGHT JOIN */ - /* 0x2000 not currently used */ +#define WHERE_KEEP_ALL_JOINS 0x2000 /* Do not do the omit-noop-join opt */ #define WHERE_USE_LIMIT 0x4000 /* Use the LIMIT in cost estimates */ /* 0x8000 not currently used */ @@ -19497,11 +19562,12 @@ struct Select { #define SF_View 0x0200000 /* SELECT statement is a view */ #define SF_NoopOrderBy 0x0400000 /* ORDER BY is ignored for this query */ #define SF_UFSrcCheck 0x0800000 /* Check pSrc as required by UPDATE...FROM */ -#define SF_PushDown 0x1000000 /* SELECT has be modified by push-down opt */ +#define SF_PushDown 0x1000000 /* Modified by WHERE-clause push-down opt */ #define SF_MultiPart 0x2000000 /* Has multiple incompatible PARTITIONs */ #define SF_CopyCte 0x4000000 /* SELECT statement is a copy of a CTE */ #define SF_OrderByReqd 0x8000000 /* The ORDER BY clause may not be omitted */ #define SF_UpdateFrom 0x10000000 /* Query originates with UPDATE FROM */ +#define SF_Correlated 0x20000000 /* True if references the outer context */ /* True if S exists and has SF_NestedFrom */ #define IsNestedFrom(S) ((S)!=0 && ((S)->selFlags&SF_NestedFrom)!=0) @@ -19741,6 +19807,7 @@ struct Parse { u8 disableLookaside; /* Number of times lookaside has been disabled */ u8 prepFlags; /* SQLITE_PREPARE_* flags */ u8 withinRJSubrtn; /* Nesting level for RIGHT JOIN body subroutines */ + u8 bHasWith; /* True if statement contains WITH */ #if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */ #endif @@ -20420,6 +20487,9 @@ struct Window { ** due to the SQLITE_SUBTYPE flag */ }; +SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList *pRow); +SQLITE_PRIVATE void sqlite3MultiValuesEnd(Parse *pParse, Select *pVal); + #ifndef SQLITE_OMIT_WINDOWFUNC SQLITE_PRIVATE void sqlite3WindowDelete(sqlite3*, Window*); SQLITE_PRIVATE void sqlite3WindowUnlinkFromSelect(Window*); @@ -20737,6 +20807,7 @@ SQLITE_PRIVATE int sqlite3ErrorToParser(sqlite3*,int); SQLITE_PRIVATE void sqlite3Dequote(char*); SQLITE_PRIVATE void sqlite3DequoteExpr(Expr*); SQLITE_PRIVATE void sqlite3DequoteToken(Token*); +SQLITE_PRIVATE void sqlite3DequoteNumber(Parse*, Expr*); SQLITE_PRIVATE void sqlite3TokenInit(Token*,char*); SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char*, int); SQLITE_PRIVATE int sqlite3RunParser(Parse*, const char*); @@ -20767,7 +20838,7 @@ SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,const Expr*,const FuncDef*) SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32); SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*); SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3*,void*); -SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse*, Expr*); +SQLITE_PRIVATE int sqlite3ExprDeferredDelete(Parse*, Expr*); SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse*, Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(Parse*,ExprList*,IdList*,Expr*); @@ -20990,12 +21061,10 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3*); SQLITE_PRIVATE u32 sqlite3IsTrueOrFalse(const char*); SQLITE_PRIVATE int sqlite3ExprIdToTrueFalse(Expr*); SQLITE_PRIVATE int sqlite3ExprTruthValue(const Expr*); -SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr*); -SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*); +SQLITE_PRIVATE int sqlite3ExprIsConstant(Parse*,Expr*); SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8); SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse*, Expr*, ExprList*); -SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int); -SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint(Expr*,const SrcList*,int); +SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint(Expr*,const SrcList*,int,int); #ifdef SQLITE_ENABLE_CURSOR_HINTS SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr*); #endif @@ -21180,7 +21249,9 @@ SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...); SQLITE_PRIVATE void sqlite3Error(sqlite3*,int); SQLITE_PRIVATE void sqlite3ErrorClear(sqlite3*); SQLITE_PRIVATE void sqlite3SystemError(sqlite3*,int); +#if !defined(SQLITE_OMIT_BLOB_LITERAL) SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3*, const char *z, int n); +#endif SQLITE_PRIVATE u8 sqlite3HexToInt(int h); SQLITE_PRIVATE int sqlite3TwoPartName(Parse *, Token *, Token *, Token **); @@ -24227,13 +24298,14 @@ struct DateTime { int tz; /* Timezone offset in minutes */ double s; /* Seconds */ char validJD; /* True (1) if iJD is valid */ - char rawS; /* Raw numeric value stored in s */ char validYMD; /* True (1) if Y,M,D are valid */ char validHMS; /* True (1) if h,m,s are valid */ - char validTZ; /* True (1) if tz is valid */ - char tzSet; /* Timezone was set explicitly */ - char isError; /* An overflow has occurred */ - char useSubsec; /* Display subsecond precision */ + char nFloor; /* Days to implement "floor" */ + unsigned rawS : 1; /* Raw numeric value stored in s */ + unsigned isError : 1; /* An overflow has occurred */ + unsigned useSubsec : 1; /* Display subsecond precision */ + unsigned isUtc : 1; /* Time is known to be UTC */ + unsigned isLocal : 1; /* Time is known to be localtime */ }; @@ -24331,6 +24403,8 @@ static int parseTimezone(const char *zDate, DateTime *p){ sgn = +1; }else if( c=='Z' || c=='z' ){ zDate++; + p->isLocal = 0; + p->isUtc = 1; goto zulu_time; }else{ return c!=0; @@ -24343,7 +24417,6 @@ static int parseTimezone(const char *zDate, DateTime *p){ p->tz = sgn*(nMn + nHr*60); zulu_time: while( sqlite3Isspace(*zDate) ){ zDate++; } - p->tzSet = 1; return *zDate!=0; } @@ -24387,7 +24460,6 @@ static int parseHhMmSs(const char *zDate, DateTime *p){ p->m = m; p->s = s + ms; if( parseTimezone(zDate, p) ) return 1; - p->validTZ = (p->tz!=0)?1:0; return 0; } @@ -24434,15 +24506,40 @@ static void computeJD(DateTime *p){ p->validJD = 1; if( p->validHMS ){ p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000 + 0.5); - if( p->validTZ ){ + if( p->tz ){ p->iJD -= p->tz*60000; p->validYMD = 0; p->validHMS = 0; - p->validTZ = 0; + p->tz = 0; + p->isUtc = 1; + p->isLocal = 0; } } } +/* +** Given the YYYY-MM-DD information current in p, determine if there +** is day-of-month overflow and set nFloor to the number of days that +** would need to be subtracted from the date in order to bring the +** date back to the end of the month. +*/ +static void computeFloor(DateTime *p){ + assert( p->validYMD || p->isError ); + assert( p->D>=0 && p->D<=31 ); + assert( p->M>=0 && p->M<=12 ); + if( p->D<=28 ){ + p->nFloor = 0; + }else if( (1<M) & 0x15aa ){ + p->nFloor = 0; + }else if( p->M!=2 ){ + p->nFloor = (p->D==31); + }else if( p->Y%4!=0 || (p->Y%100==0 && p->Y%400!=0) ){ + p->nFloor = p->D - 28; + }else{ + p->nFloor = p->D - 29; + } +} + /* ** Parse dates of the form ** @@ -24481,12 +24578,16 @@ static int parseYyyyMmDd(const char *zDate, DateTime *p){ p->Y = neg ? -Y : Y; p->M = M; p->D = D; - if( p->validTZ ){ + computeFloor(p); + if( p->tz ){ computeJD(p); } return 0; } + +static void clearYMD_HMS_TZ(DateTime *p); /* Forward declaration */ + /* ** Set the time to the current time reported by the VFS. ** @@ -24496,6 +24597,9 @@ static int setDateTimeToCurrent(sqlite3_context *context, DateTime *p){ p->iJD = sqlite3StmtCurrentTime(context); if( p->iJD>0 ){ p->validJD = 1; + p->isUtc = 1; + p->isLocal = 0; + clearYMD_HMS_TZ(p); return 0; }else{ return 1; @@ -24634,7 +24738,7 @@ static void computeYMD_HMS(DateTime *p){ static void clearYMD_HMS_TZ(DateTime *p){ p->validYMD = 0; p->validHMS = 0; - p->validTZ = 0; + p->tz = 0; } #ifndef SQLITE_OMIT_LOCALTIME @@ -24766,7 +24870,7 @@ static int toLocaltime( p->validHMS = 1; p->validJD = 0; p->rawS = 0; - p->validTZ = 0; + p->tz = 0; p->isError = 0; return SQLITE_OK; } @@ -24786,12 +24890,12 @@ static const struct { float rLimit; /* Maximum NNN value for this transform */ float rXform; /* Constant used for this transform */ } aXformType[] = { - { 6, "second", 4.6427e+14, 1.0 }, - { 6, "minute", 7.7379e+12, 60.0 }, - { 4, "hour", 1.2897e+11, 3600.0 }, - { 3, "day", 5373485.0, 86400.0 }, - { 5, "month", 176546.0, 2592000.0 }, - { 4, "year", 14713.0, 31536000.0 }, + /* 0 */ { 6, "second", 4.6427e+14, 1.0 }, + /* 1 */ { 6, "minute", 7.7379e+12, 60.0 }, + /* 2 */ { 4, "hour", 1.2897e+11, 3600.0 }, + /* 3 */ { 3, "day", 5373485.0, 86400.0 }, + /* 4 */ { 5, "month", 176546.0, 30.0*86400.0 }, + /* 5 */ { 4, "year", 14713.0, 365.0*86400.0 }, }; /* @@ -24823,14 +24927,20 @@ static void autoAdjustDate(DateTime *p){ ** NNN.NNNN seconds ** NNN months ** NNN years +** +/-YYYY-MM-DD HH:MM:SS.SSS +** ceiling +** floor ** start of month ** start of year ** start of week ** start of day ** weekday N ** unixepoch +** auto ** localtime ** utc +** subsec +** subsecond ** ** Return 0 on success and 1 if there is any kind of error. If the error ** is in a system call (i.e. localtime()), then an error message is written @@ -24861,6 +24971,37 @@ static int parseModifier( } break; } + case 'c': { + /* + ** ceiling + ** + ** Resolve day-of-month overflow by rolling forward into the next + ** month. As this is the default action, this modifier is really + ** a no-op that is only included for symmetry. See "floor". + */ + if( sqlite3_stricmp(z, "ceiling")==0 ){ + computeJD(p); + clearYMD_HMS_TZ(p); + rc = 0; + p->nFloor = 0; + } + break; + } + case 'f': { + /* + ** floor + ** + ** Resolve day-of-month overflow by rolling back to the end of the + ** previous month. + */ + if( sqlite3_stricmp(z, "floor")==0 ){ + computeJD(p); + p->iJD -= p->nFloor*86400000; + clearYMD_HMS_TZ(p); + rc = 0; + } + break; + } case 'j': { /* ** julianday @@ -24887,7 +25028,9 @@ static int parseModifier( ** show local time. */ if( sqlite3_stricmp(z, "localtime")==0 && sqlite3NotPureFunc(pCtx) ){ - rc = toLocaltime(p, pCtx); + rc = p->isLocal ? SQLITE_OK : toLocaltime(p, pCtx); + p->isUtc = 0; + p->isLocal = 1; } break; } @@ -24912,7 +25055,7 @@ static int parseModifier( } #ifndef SQLITE_OMIT_LOCALTIME else if( sqlite3_stricmp(z, "utc")==0 && sqlite3NotPureFunc(pCtx) ){ - if( p->tzSet==0 ){ + if( p->isUtc==0 ){ i64 iOrigJD; /* Original localtime */ i64 iGuess; /* Guess at the corresponding utc time */ int cnt = 0; /* Safety to prevent infinite loop */ @@ -24935,7 +25078,8 @@ static int parseModifier( memset(p, 0, sizeof(*p)); p->iJD = iGuess; p->validJD = 1; - p->tzSet = 1; + p->isUtc = 1; + p->isLocal = 0; } rc = SQLITE_OK; } @@ -24955,7 +25099,7 @@ static int parseModifier( && r>=0.0 && r<7.0 && (n=(int)r)==r ){ sqlite3_int64 Z; computeYMD_HMS(p); - p->validTZ = 0; + p->tz = 0; p->validJD = 0; computeJD(p); Z = ((p->iJD + 129600000)/86400000) % 7; @@ -24995,7 +25139,7 @@ static int parseModifier( p->h = p->m = 0; p->s = 0.0; p->rawS = 0; - p->validTZ = 0; + p->tz = 0; p->validJD = 0; if( sqlite3_stricmp(z,"month")==0 ){ p->D = 1; @@ -25066,6 +25210,7 @@ static int parseModifier( x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; p->Y += x; p->M -= x*12; + computeFloor(p); computeJD(p); p->validHMS = 0; p->validYMD = 0; @@ -25112,11 +25257,12 @@ static int parseModifier( z += n; while( sqlite3Isspace(*z) ) z++; n = sqlite3Strlen30(z); - if( n>10 || n<3 ) break; + if( n<3 || n>10 ) break; if( sqlite3UpperToLower[(u8)z[n-1]]=='s' ) n--; computeJD(p); assert( rc==1 ); rRounder = r<0 ? -0.5 : +0.5; + p->nFloor = 0; for(i=0; iM += (int)r; x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; p->Y += x; p->M -= x*12; + computeFloor(p); p->validJD = 0; r -= (int)r; break; } case 5: { /* Special processing to add years */ int y = (int)r; - assert( strcmp(aXformType[i].zName,"year")==0 ); + assert( strcmp(aXformType[5].zName,"year")==0 ); computeYMD_HMS(p); + assert( p->M>=0 && p->M<=12 ); p->Y += y; + computeFloor(p); p->validJD = 0; r -= (int)r; break; @@ -25392,22 +25541,83 @@ static void dateFunc( } } +/* +** Compute the number of days after the most recent January 1. +** +** In other words, compute the zero-based day number for the +** current year: +** +** Jan01 = 0, Jan02 = 1, ..., Jan31 = 30, Feb01 = 31, ... +** Dec31 = 364 or 365. +*/ +static int daysAfterJan01(DateTime *pDate){ + DateTime jan01 = *pDate; + assert( jan01.validYMD ); + assert( jan01.validHMS ); + assert( pDate->validJD ); + jan01.validJD = 0; + jan01.M = 1; + jan01.D = 1; + computeJD(&jan01); + return (int)((pDate->iJD-jan01.iJD+43200000)/86400000); +} + +/* +** Return the number of days after the most recent Monday. +** +** In other words, return the day of the week according +** to this code: +** +** 0=Monday, 1=Tuesday, 2=Wednesday, ..., 6=Sunday. +*/ +static int daysAfterMonday(DateTime *pDate){ + assert( pDate->validJD ); + return (int)((pDate->iJD+43200000)/86400000) % 7; +} + +/* +** Return the number of days after the most recent Sunday. +** +** In other words, return the day of the week according +** to this code: +** +** 0=Sunday, 1=Monday, 2=Tues, ..., 6=Saturday +*/ +static int daysAfterSunday(DateTime *pDate){ + assert( pDate->validJD ); + return (int)((pDate->iJD+129600000)/86400000) % 7; +} + /* ** strftime( FORMAT, TIMESTRING, MOD, MOD, ...) ** ** Return a string described by FORMAT. Conversions as follows: ** -** %d day of month +** %d day of month 01-31 +** %e day of month 1-31 ** %f ** fractional seconds SS.SSS +** %F ISO date. YYYY-MM-DD +** %G ISO year corresponding to %V 0000-9999. +** %g 2-digit ISO year corresponding to %V 00-99 ** %H hour 00-24 -** %j day of year 000-366 +** %k hour 0-24 (leading zero converted to space) +** %I hour 01-12 +** %j day of year 001-366 ** %J ** julian day number +** %l hour 1-12 (leading zero converted to space) ** %m month 01-12 ** %M minute 00-59 +** %p "am" or "pm" +** %P "AM" or "PM" +** %R time as HH:MM ** %s seconds since 1970-01-01 ** %S seconds 00-59 -** %w day of week 0-6 Sunday==0 -** %W week of year 00-53 +** %T time as HH:MM:SS +** %u day of week 1-7 Monday==1, Sunday==7 +** %w day of week 0-6 Sunday==0, Monday==1 +** %U week of year 00-53 (First Sunday is start of week 01) +** %V week of year 01-53 (First week containing Thursday is week 01) +** %W week of year 00-53 (First Monday is start of week 01) ** %Y year 0000-9999 ** %% % */ @@ -25444,7 +25654,7 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes, cf=='d' ? "%02d" : "%2d", x.D); break; } - case 'f': { + case 'f': { /* Fractional seconds. (Non-standard) */ double s = x.s; if( s>59.999 ) s = 59.999; sqlite3_str_appendf(&sRes, "%06.3f", s); @@ -25454,6 +25664,21 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes, "%04d-%02d-%02d", x.Y, x.M, x.D); break; } + case 'G': /* Fall thru */ + case 'g': { + DateTime y = x; + assert( y.validJD ); + /* Move y so that it is the Thursday in the same week as x */ + y.iJD += (3 - daysAfterMonday(&x))*86400000; + y.validYMD = 0; + computeYMD(&y); + if( cf=='g' ){ + sqlite3_str_appendf(&sRes, "%02d", y.Y%100); + }else{ + sqlite3_str_appendf(&sRes, "%04d", y.Y); + } + break; + } case 'H': case 'k': { sqlite3_str_appendf(&sRes, cf=='H' ? "%02d" : "%2d", x.h); @@ -25467,25 +25692,11 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes, cf=='I' ? "%02d" : "%2d", h); break; } - case 'W': /* Fall thru */ - case 'j': { - int nDay; /* Number of days since 1st day of year */ - DateTime y = x; - y.validJD = 0; - y.M = 1; - y.D = 1; - computeJD(&y); - nDay = (int)((x.iJD-y.iJD+43200000)/86400000); - if( cf=='W' ){ - int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ - wd = (int)(((x.iJD+43200000)/86400000)%7); - sqlite3_str_appendf(&sRes,"%02d",(nDay+7-wd)/7); - }else{ - sqlite3_str_appendf(&sRes,"%03d",nDay+1); - } + case 'j': { /* Day of year. Jan01==1, Jan02==2, and so forth */ + sqlite3_str_appendf(&sRes,"%03d",daysAfterJan01(&x)+1); break; } - case 'J': { + case 'J': { /* Julian day number. (Non-standard) */ sqlite3_str_appendf(&sRes,"%.16g",x.iJD/86400000.0); break; } @@ -25528,13 +25739,33 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes,"%02d:%02d:%02d", x.h, x.m, (int)x.s); break; } - case 'u': /* Fall thru */ - case 'w': { - char c = (char)(((x.iJD+129600000)/86400000) % 7) + '0'; + case 'u': /* Day of week. 1 to 7. Monday==1, Sunday==7 */ + case 'w': { /* Day of week. 0 to 6. Sunday==0, Monday==1 */ + char c = (char)daysAfterSunday(&x) + '0'; if( c=='0' && cf=='u' ) c = '7'; sqlite3_str_appendchar(&sRes, 1, c); break; } + case 'U': { /* Week num. 00-53. First Sun of the year is week 01 */ + sqlite3_str_appendf(&sRes,"%02d", + (daysAfterJan01(&x)-daysAfterSunday(&x)+7)/7); + break; + } + case 'V': { /* Week num. 01-53. First week with a Thur is week 01 */ + DateTime y = x; + /* Adjust y so that is the Thursday in the same week as x */ + assert( y.validJD ); + y.iJD += (3 - daysAfterMonday(&x))*86400000; + y.validYMD = 0; + computeYMD(&y); + sqlite3_str_appendf(&sRes,"%02d", daysAfterJan01(&y)/7+1); + break; + } + case 'W': { /* Week num. 00-53. First Mon of the year is week 01 */ + sqlite3_str_appendf(&sRes,"%02d", + (daysAfterJan01(&x)-daysAfterMonday(&x)+7)/7); + break; + } case 'Y': { sqlite3_str_appendf(&sRes,"%04d",x.Y); break; @@ -25681,9 +25912,7 @@ static void timediffFunc( d1.iJD = d2.iJD - d1.iJD; d1.iJD += (u64)1486995408 * (u64)100000; } - d1.validYMD = 0; - d1.validHMS = 0; - d1.validTZ = 0; + clearYMD_HMS_TZ(&d1); computeYMD_HMS(&d1); sqlite3StrAccumInit(&sRes, 0, 0, 0, 100); sqlite3_str_appendf(&sRes, "%c%04d-%02d-%02d %02d:%02d:%06.3f", @@ -25752,6 +25981,36 @@ static void currentTimeFunc( } #endif +#if !defined(SQLITE_OMIT_DATETIME_FUNCS) && defined(SQLITE_DEBUG) +/* +** datedebug(...) +** +** This routine returns JSON that describes the internal DateTime object. +** Used for debugging and testing only. Subject to change. +*/ +static void datedebugFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + DateTime x; + if( isDate(context, argc, argv, &x)==0 ){ + char *zJson; + zJson = sqlite3_mprintf( + "{iJD:%lld,Y:%d,M:%d,D:%d,h:%d,m:%d,tz:%d," + "s:%.3f,validJD:%d,validYMS:%d,validHMS:%d," + "nFloor:%d,rawS:%d,isError:%d,useSubsec:%d," + "isUtc:%d,isLocal:%d}", + x.iJD, x.Y, x.M, x.D, x.h, x.m, x.tz, + x.s, x.validJD, x.validYMD, x.validHMS, + x.nFloor, x.rawS, x.isError, x.useSubsec, + x.isUtc, x.isLocal); + sqlite3_result_text(context, zJson, -1, sqlite3_free); + } +} +#endif /* !SQLITE_OMIT_DATETIME_FUNCS && SQLITE_DEBUG */ + + /* ** This function registered all of the above C functions as SQL ** functions. This should be the only routine in this file with @@ -25767,6 +26026,9 @@ SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){ PURE_DATE(datetime, -1, 0, 0, datetimeFunc ), PURE_DATE(strftime, -1, 0, 0, strftimeFunc ), PURE_DATE(timediff, 2, 0, 0, timediffFunc ), +#ifdef SQLITE_DEBUG + PURE_DATE(datedebug, -1, 0, 0, datedebugFunc ), +#endif DFUNCTION(current_time, 0, 0, 0, ctimeFunc ), DFUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc), DFUNCTION(current_date, 0, 0, 0, cdateFunc ), @@ -30182,6 +30444,24 @@ static void sqlite3MallocAlarm(int nByte){ sqlite3_mutex_enter(mem0.mutex); } +#ifdef SQLITE_DEBUG +/* +** This routine is called whenever an out-of-memory condition is seen, +** It's only purpose to to serve as a breakpoint for gdb or similar +** code debuggers when working on out-of-memory conditions, for example +** caused by PRAGMA hard_heap_limit=N. +*/ +static SQLITE_NOINLINE void test_oom_breakpoint(u64 n){ + static u64 nOomFault = 0; + nOomFault += n; + /* The assert() is never reached in a human lifetime. It is here mostly + ** to prevent code optimizers from optimizing out this function. */ + assert( (nOomFault>>32) < 0xffffffff ); +} +#else +# define test_oom_breakpoint(X) /* No-op for production builds */ +#endif + /* ** Do a memory allocation with statistics and alarms. Assume the ** lock is already held. @@ -30208,6 +30488,7 @@ static void mallocWithAlarm(int n, void **pp){ if( mem0.hardLimit ){ nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); if( nUsed >= mem0.hardLimit - nFull ){ + test_oom_breakpoint(1); *pp = 0; return; } @@ -30496,6 +30777,7 @@ SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){ sqlite3MallocAlarm(nDiff); if( mem0.hardLimit>0 && nUsed >= mem0.hardLimit - nDiff ){ sqlite3_mutex_leave(mem0.mutex); + test_oom_breakpoint(1); return 0; } } @@ -31398,13 +31680,14 @@ SQLITE_API void sqlite3_str_vappendf( } exp = s.iDP-1; - if( xtype==etGENERIC && precision>0 ) precision--; /* ** If the field type is etGENERIC, then convert to either etEXP ** or etFLOAT, as appropriate. */ if( xtype==etGENERIC ){ + assert( precision>0 ); + precision--; flag_rtz = !flag_alternateform; if( exp<-4 || exp>precision ){ xtype = etEXP; @@ -31720,9 +32003,13 @@ SQLITE_API void sqlite3_str_vappendf( sqlite3_str_appendall(pAccum, pItem->zAlias); }else{ Select *pSel = pItem->pSelect; - assert( pSel!=0 ); + assert( pSel!=0 ); /* Because of tag-20240424-1 */ if( pSel->selFlags & SF_NestedFrom ){ sqlite3_str_appendf(pAccum, "(join-%u)", pSel->selId); + }else if( pSel->selFlags & SF_MultiValue ){ + assert( !pItem->fg.isTabFunc && !pItem->fg.isIndexedBy ); + sqlite3_str_appendf(pAccum, "%u-ROW VALUES CLAUSE", + pItem->u1.nRow); }else{ sqlite3_str_appendf(pAccum, "(subquery-%u)", pSel->selId); } @@ -32499,8 +32786,10 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc) x.printfFlags |= SQLITE_PRINTF_INTERNAL; sqlite3_str_appendf(&x, "{%d:*} %!S", pItem->iCursor, pItem); if( pItem->pTab ){ - sqlite3_str_appendf(&x, " tab=%Q nCol=%d ptr=%p used=%llx", - pItem->pTab->zName, pItem->pTab->nCol, pItem->pTab, pItem->colUsed); + sqlite3_str_appendf(&x, " tab=%Q nCol=%d ptr=%p used=%llx%s", + pItem->pTab->zName, pItem->pTab->nCol, pItem->pTab, + pItem->colUsed, + pItem->fg.rowidUsed ? "+rowid" : ""); } if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))==(JT_LEFT|JT_RIGHT) ){ sqlite3_str_appendf(&x, " FULL-OUTER-JOIN"); @@ -32540,12 +32829,14 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc) sqlite3TreeViewIdList(pView, pItem->u3.pUsing, (--n)>0, "USING"); } if( pItem->pSelect ){ + sqlite3TreeViewPush(&pView, i+1nSrc); if( pItem->pTab ){ Table *pTab = pItem->pTab; sqlite3TreeViewColumnList(pView, pTab->aCol, pTab->nCol, 1); } assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem->pSelect) ); sqlite3TreeViewSelect(pView, pItem->pSelect, (--n)>0); + sqlite3TreeViewPop(&pView); } if( pItem->fg.isTabFunc ){ sqlite3TreeViewExprList(pView, pItem->u1.pFuncArg, 0, "func-args:"); @@ -32649,7 +32940,7 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m sqlite3TreeViewItem(pView, "LIMIT", (n--)>0); sqlite3TreeViewExpr(pView, p->pLimit->pLeft, p->pLimit->pRight!=0); if( p->pLimit->pRight ){ - sqlite3TreeViewItem(pView, "OFFSET", (n--)>0); + sqlite3TreeViewItem(pView, "OFFSET", 0); sqlite3TreeViewExpr(pView, p->pLimit->pRight, 0); sqlite3TreeViewPop(&pView); } @@ -34950,6 +35241,44 @@ SQLITE_PRIVATE void sqlite3DequoteExpr(Expr *p){ sqlite3Dequote(p->u.zToken); } +/* +** Expression p is a QNUMBER (quoted number). Dequote the value in p->u.zToken +** and set the type to INTEGER or FLOAT. "Quoted" integers or floats are those +** that contain '_' characters that must be removed before further processing. +*/ +SQLITE_PRIVATE void sqlite3DequoteNumber(Parse *pParse, Expr *p){ + assert( p!=0 || pParse->db->mallocFailed ); + if( p ){ + const char *pIn = p->u.zToken; + char *pOut = p->u.zToken; + int bHex = (pIn[0]=='0' && (pIn[1]=='x' || pIn[1]=='X')); + int iValue; + assert( p->op==TK_QNUMBER ); + p->op = TK_INTEGER; + do { + if( *pIn!=SQLITE_DIGIT_SEPARATOR ){ + *pOut++ = *pIn; + if( *pIn=='e' || *pIn=='E' || *pIn=='.' ) p->op = TK_FLOAT; + }else{ + if( (bHex==0 && (!sqlite3Isdigit(pIn[-1]) || !sqlite3Isdigit(pIn[1]))) + || (bHex==1 && (!sqlite3Isxdigit(pIn[-1]) || !sqlite3Isxdigit(pIn[1]))) + ){ + sqlite3ErrorMsg(pParse, "unrecognized token: \"%s\"", p->u.zToken); + } + } + }while( *pIn++ ); + if( bHex ) p->op = TK_INTEGER; + + /* tag-20240227-a: If after dequoting, the number is an integer that + ** fits in 32 bits, then it must be converted into EP_IntValue. Other + ** parts of the code expect this. See also tag-20240227-b. */ + if( p->op==TK_INTEGER && sqlite3GetInt32(p->u.zToken, &iValue) ){ + p->u.iValue = iValue; + p->flags |= EP_IntValue; + } + } +} + /* ** If the input token p is quoted, try to adjust the token to remove ** the quotes. This is not always possible: @@ -36889,7 +37218,7 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 30 */ "SeekRowid" OpHelp("intkey=r[P3]"), /* 31 */ "NotExists" OpHelp("intkey=r[P3]"), /* 32 */ "Last" OpHelp(""), - /* 33 */ "IfSmaller" OpHelp(""), + /* 33 */ "IfSizeBetween" OpHelp(""), /* 34 */ "SorterSort" OpHelp(""), /* 35 */ "Sort" OpHelp(""), /* 36 */ "Rewind" OpHelp(""), @@ -36934,7 +37263,7 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 75 */ "Null" OpHelp("r[P2..P3]=NULL"), /* 76 */ "SoftNull" OpHelp("r[P1]=NULL"), /* 77 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"), - /* 78 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"), + /* 78 */ "Variable" OpHelp("r[P2]=parameter(P1)"), /* 79 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"), /* 80 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), /* 81 */ "SCopy" OpHelp("r[P2]=r[P1]"), @@ -39332,8 +39661,12 @@ static int unixLogErrorAtLine( ** available, the error message will often be an empty string. Not a ** huge problem. Incorrectly concluding that the GNU version is available ** could lead to a segfault though. + ** + ** Forum post 3f13857fa4062301 reports that the Android SDK may use + ** int-type return, depending on its version. */ -#if defined(STRERROR_R_CHAR_P) || defined(__USE_GNU) +#if (defined(STRERROR_R_CHAR_P) || defined(__USE_GNU)) \ + && !defined(ANDROID) && !defined(__ANDROID__) zErr = # endif strerror_r(iErrno, aErr, sizeof(aErr)-1); @@ -44431,12 +44764,19 @@ static int unixOpen( rc = SQLITE_READONLY_DIRECTORY; }else if( errno!=EISDIR && isReadWrite ){ /* Failed to open the file for read/write access. Try read-only. */ + UnixUnusedFd *pReadonly = 0; flags &= ~(SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE); openFlags &= ~(O_RDWR|O_CREAT); flags |= SQLITE_OPEN_READONLY; openFlags |= O_RDONLY; isReadonly = 1; - fd = robust_open(zName, openFlags, openMode); + pReadonly = findReusableFd(zName, flags); + if( pReadonly ){ + fd = pReadonly->fd; + sqlite3_free(pReadonly); + }else{ + fd = robust_open(zName, openFlags, openMode); + } } } if( fd<0 ){ @@ -69887,6 +70227,7 @@ struct IntegrityCk { StrAccum errMsg; /* Accumulate the error message text here */ u32 *heap; /* Min-heap used for analyzing cell coverage */ sqlite3 *db; /* Database connection running the check */ + i64 nRow; /* Number of rows visited in current tree */ }; /* @@ -70361,8 +70702,47 @@ int corruptPageError(int lineno, MemPage *p){ # define SQLITE_CORRUPT_PAGE(pMemPage) SQLITE_CORRUPT_PGNO(pMemPage->pgno) #endif +/* Default value for SHARED_LOCK_TRACE macro if shared-cache is disabled +** or if the lock tracking is disabled. This is always the value for +** release builds. +*/ +#define SHARED_LOCK_TRACE(X,MSG,TAB,TYPE) /*no-op*/ + #ifndef SQLITE_OMIT_SHARED_CACHE +#if 0 +/* ^---- Change to 1 and recompile to enable shared-lock tracing +** for debugging purposes. +** +** Print all shared-cache locks on a BtShared. Debugging use only. +*/ +static void sharedLockTrace( + BtShared *pBt, + const char *zMsg, + int iRoot, + int eLockType +){ + BtLock *pLock; + if( iRoot>0 ){ + printf("%s-%p %u%s:", zMsg, pBt, iRoot, eLockType==READ_LOCK?"R":"W"); + }else{ + printf("%s-%p:", zMsg, pBt); + } + for(pLock=pBt->pLock; pLock; pLock=pLock->pNext){ + printf(" %p/%u%s", pLock->pBtree, pLock->iTable, + pLock->eLock==READ_LOCK ? "R" : "W"); + while( pLock->pNext && pLock->pBtree==pLock->pNext->pBtree ){ + pLock = pLock->pNext; + printf(",%u%s", pLock->iTable, pLock->eLock==READ_LOCK ? "R" : "W"); + } + } + printf("\n"); + fflush(stdout); +} +#undef SHARED_LOCK_TRACE +#define SHARED_LOCK_TRACE(X,MSG,TAB,TYPE) sharedLockTrace(X,MSG,TAB,TYPE) +#endif /* Shared-lock tracing */ + #ifdef SQLITE_DEBUG /* **** This function is only used as part of an assert() statement. *** @@ -70439,6 +70819,8 @@ static int hasSharedCacheTableLock( iTab = iRoot; } + SHARED_LOCK_TRACE(pBtree->pBt,"hasLock",iRoot,eLockType); + /* Search for the required lock. Either a write-lock on root-page iTab, a ** write-lock on the schema table, or (if the client is reading) a ** read-lock on iTab will suffice. Return 1 if any of these are found. */ @@ -70572,6 +70954,8 @@ static int setSharedCacheTableLock(Btree *p, Pgno iTable, u8 eLock){ BtLock *pLock = 0; BtLock *pIter; + SHARED_LOCK_TRACE(pBt,"setLock", iTable, eLock); + assert( sqlite3BtreeHoldsMutex(p) ); assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); assert( p->db!=0 ); @@ -70639,6 +71023,8 @@ static void clearAllSharedCacheTableLocks(Btree *p){ assert( p->sharable || 0==*ppIter ); assert( p->inTrans>0 ); + SHARED_LOCK_TRACE(pBt, "clearAllLocks", 0, 0); + while( *ppIter ){ BtLock *pLock = *ppIter; assert( (pBt->btsFlags & BTS_EXCLUSIVE)==0 || pBt->pWriter==pLock->pBtree ); @@ -70677,6 +71063,9 @@ static void clearAllSharedCacheTableLocks(Btree *p){ */ static void downgradeAllSharedCacheTableLocks(Btree *p){ BtShared *pBt = p->pBt; + + SHARED_LOCK_TRACE(pBt, "downgradeLocks", 0, 0); + if( pBt->pWriter==p ){ BtLock *pLock; pBt->pWriter = 0; @@ -75290,9 +75679,12 @@ static int accessPayload( if( pCur->aOverflow==0 || nOvfl*(int)sizeof(Pgno) > sqlite3MallocSize(pCur->aOverflow) ){ - Pgno *aNew = (Pgno*)sqlite3Realloc( - pCur->aOverflow, nOvfl*2*sizeof(Pgno) - ); + Pgno *aNew; + if( sqlite3FaultSim(413) ){ + aNew = 0; + }else{ + aNew = (Pgno*)sqlite3Realloc(pCur->aOverflow, nOvfl*2*sizeof(Pgno)); + } if( aNew==0 ){ return SQLITE_NOMEM_BKPT; }else{ @@ -75302,6 +75694,12 @@ static int accessPayload( memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno)); pCur->curFlags |= BTCF_ValidOvfl; }else{ + /* Sanity check the validity of the overflow page cache */ + assert( pCur->aOverflow[0]==nextPage + || pCur->aOverflow[0]==0 + || CORRUPT_DB ); + assert( pCur->aOverflow[0]!=0 || pCur->aOverflow[offset/ovflSize]==0 ); + /* If the overflow page-list cache has been allocated and the ** entry for the first required overflow page is valid, skip ** directly to it. @@ -75783,6 +76181,23 @@ SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ return rc; } +#ifdef SQLITE_DEBUG +/* The cursors is CURSOR_VALID and has BTCF_AtLast set. Verify that +** this flags are true for a consistent database. +** +** This routine is is called from within assert() statements only. +** It is an internal verification routine and does not appear in production +** builds. +*/ +static int cursorIsAtLastEntry(BtCursor *pCur){ + int ii; + for(ii=0; iiiPage; ii++){ + if( pCur->aiIdx[ii]!=pCur->apPage[ii]->nCell ) return 0; + } + return pCur->ix==pCur->pPage->nCell-1 && pCur->pPage->leaf!=0; +} +#endif + /* Move the cursor to the last entry in the table. Return SQLITE_OK ** on success. Set *pRes to 0 if the cursor actually points to something ** or set *pRes to 1 if the table is empty. @@ -75811,18 +76226,7 @@ SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ /* If the cursor already points to the last entry, this is a no-op. */ if( CURSOR_VALID==pCur->eState && (pCur->curFlags & BTCF_AtLast)!=0 ){ -#ifdef SQLITE_DEBUG - /* This block serves to assert() that the cursor really does point - ** to the last entry in the b-tree. */ - int ii; - for(ii=0; iiiPage; ii++){ - assert( pCur->aiIdx[ii]==pCur->apPage[ii]->nCell ); - } - assert( pCur->ix==pCur->pPage->nCell-1 || CORRUPT_DB ); - testcase( pCur->ix!=pCur->pPage->nCell-1 ); - /* ^-- dbsqlfuzz b92b72e4de80b5140c30ab71372ca719b8feb618 */ - assert( pCur->pPage->leaf ); -#endif + assert( cursorIsAtLastEntry(pCur) || CORRUPT_DB ); *pRes = 0; return SQLITE_OK; } @@ -75875,6 +76279,7 @@ SQLITE_PRIVATE int sqlite3BtreeTableMoveto( } if( pCur->info.nKeycurFlags & BTCF_AtLast)!=0 ){ + assert( cursorIsAtLastEntry(pCur) || CORRUPT_DB ); *pRes = -1; return SQLITE_OK; } @@ -76341,10 +76746,10 @@ SQLITE_PRIVATE i64 sqlite3BtreeRowCountEst(BtCursor *pCur){ assert( cursorOwnsBtShared(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); - /* Currently this interface is only called by the OP_IfSmaller - ** opcode, and it that case the cursor will always be valid and - ** will always point to a leaf node. */ - if( NEVER(pCur->eState!=CURSOR_VALID) ) return -1; + /* Currently this interface is only called by the OP_IfSizeBetween + ** opcode and the OP_Count opcode with P3=1. In either case, + ** the cursor will always be valid unless the btree is empty. */ + if( pCur->eState!=CURSOR_VALID ) return 0; if( NEVER(pCur->pPage->leaf==0) ) return -1; n = pCur->pPage->nCell; @@ -78475,7 +78880,7 @@ static int balance_nonroot( ** table-interior, index-leaf, or index-interior). */ if( pOld->aData[0]!=apOld[0]->aData[0] ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pOld); goto balance_cleanup; } @@ -78499,7 +78904,7 @@ static int balance_nonroot( memset(&b.szCell[b.nCell], 0, sizeof(b.szCell[0])*(limit+pOld->nOverflow)); if( pOld->nOverflow>0 ){ if( NEVER(limitaiOvfl[0]) ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pOld); goto balance_cleanup; } limit = pOld->aiOvfl[0]; @@ -79142,7 +79547,7 @@ static int anotherValidCursor(BtCursor *pCur){ && pOther->eState==CURSOR_VALID && pOther->pPage==pCur->pPage ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pCur->pPage); } } return SQLITE_OK; @@ -79202,7 +79607,7 @@ static int balance(BtCursor *pCur){ /* The page being written is not a root page, and there is currently ** more than one reference to it. This only happens if the page is one ** of its own ancestor pages. Corruption. */ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); }else{ MemPage * const pParent = pCur->apPage[iPage-1]; int const iIdx = pCur->aiIdx[iPage-1]; @@ -79366,7 +79771,7 @@ static SQLITE_NOINLINE int btreeOverwriteOverflowCell( rc = btreeGetPage(pBt, ovflPgno, &pPage, 0); if( rc ) return rc; if( sqlite3PagerPageRefcount(pPage->pDbPage)!=1 || pPage->isInit ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); }else{ if( iOffset+ovflPageSize<(u32)nTotal ){ ovflPgno = get4byte(pPage->aData); @@ -79394,7 +79799,7 @@ static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){ if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd || pCur->info.pPayload < pPage->aData + pPage->cellOffset ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } if( pCur->info.nLocal==nTotal ){ /* The entire cell is local */ @@ -79475,7 +79880,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** Which can only happen if the SQLITE_NoSchemaError flag was set when ** the schema was loaded. This cannot be asserted though, as a user might ** set the flag, load the schema, and then unset the flag. */ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pCur->pgnoRoot); } } @@ -79598,7 +80003,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( if( pPage->nFree<0 ){ if( NEVER(pCur->eState>CURSOR_INVALID) ){ /* ^^^^^--- due to the moveToRoot() call above */ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); }else{ rc = btreeComputeFreeSpace(pPage); } @@ -79640,7 +80045,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( CellInfo info; assert( idx>=0 ); if( idx>=pPage->nCell ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ){ @@ -79667,10 +80072,10 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** necessary to add the PTRMAP_OVERFLOW1 pointer-map entry. */ assert( rc==SQLITE_OK ); /* clearCell never fails when nLocal==nPayload */ if( oldCell < pPage->aData+pPage->hdrOffset+10 ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } if( oldCell+szNew > pPage->aDataEnd ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } memcpy(oldCell, newCell, szNew); return SQLITE_OK; @@ -79772,7 +80177,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 nIn = pSrc->info.nLocal; aIn = pSrc->info.pPayload; if( aIn+nIn>pSrc->pPage->aDataEnd ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pSrc->pPage); } nRem = pSrc->info.nPayload; if( nIn==nRem && nInpPage->maxLocal ){ @@ -79797,7 +80202,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 if( nRem>nIn ){ if( aIn+nIn+4>pSrc->pPage->aDataEnd ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pSrc->pPage); } ovflIn = get4byte(&pSrc->info.pPayload[nIn]); } @@ -79893,7 +80298,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ assert( rc!=SQLITE_OK || CORRUPT_DB || pCur->eState==CURSOR_VALID ); if( rc || pCur->eState!=CURSOR_VALID ) return rc; }else{ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pCur->pgnoRoot); } } assert( pCur->eState==CURSOR_VALID ); @@ -79902,14 +80307,14 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ iCellIdx = pCur->ix; pPage = pCur->pPage; if( pPage->nCell<=iCellIdx ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } pCell = findCell(pPage, iCellIdx); if( pPage->nFree<0 && btreeComputeFreeSpace(pPage) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } if( pCell<&pPage->aCellIdx[pPage->nCell] ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } /* If the BTREE_SAVEPOSITION bit is on, then the cursor position must @@ -80000,7 +80405,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ n = pCur->pPage->pgno; } pCell = findCell(pLeaf, pLeaf->nCell-1); - if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_BKPT; + if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_PAGE(pLeaf); nCell = pLeaf->xCellSize(pLeaf, pCell); assert( MX_CELL_SIZE(pBt) >= nCell ); pTmp = pBt->pTmpSpace; @@ -80116,7 +80521,7 @@ static int btreeCreateTable(Btree *p, Pgno *piTable, int createTabFlags){ */ sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &pgnoRoot); if( pgnoRoot>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pgnoRoot); } pgnoRoot++; @@ -80164,7 +80569,7 @@ static int btreeCreateTable(Btree *p, Pgno *piTable, int createTabFlags){ } rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage); if( eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PGNO(pgnoRoot); } if( rc!=SQLITE_OK ){ releasePage(pRoot); @@ -80254,14 +80659,14 @@ static int clearDatabasePage( assert( sqlite3_mutex_held(pBt->mutex) ); if( pgno>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pgno); } rc = getAndInitPage(pBt, pgno, &pPage, 0); if( rc ) return rc; if( (pBt->openFlags & BTREE_SINGLE)==0 && sqlite3PagerPageRefcount(pPage->pDbPage) != (1 + (pgno==1)) ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); goto cleardatabasepage_out; } hdr = pPage->hdrOffset; @@ -80365,7 +80770,7 @@ static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){ assert( p->inTrans==TRANS_WRITE ); assert( iTable>=2 ); if( iTable>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(iTable); } rc = sqlite3BtreeClearTable(p, iTable, 0); @@ -80959,6 +81364,9 @@ static int checkTreePage( ** number of cells on the page. */ nCell = get2byte(&data[hdr+3]); assert( pPage->nCell==nCell ); + if( pPage->leaf || pPage->intKey==0 ){ + pCheck->nRow += nCell; + } /* EVIDENCE-OF: R-23882-45353 The cell pointer array of a b-tree page ** immediately follows the b-tree page header. */ @@ -81070,6 +81478,7 @@ static int checkTreePage( btreeHeapInsert(heap, (pc<<16)|(pc+size-1)); } } + assert( heap!=0 ); /* Add the freeblocks to the min-heap ** ** EVIDENCE-OF: R-20690-50594 The second field of the b-tree page header @@ -81169,6 +81578,7 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( sqlite3 *db, /* Database connection that is running the check */ Btree *p, /* The btree to be checked */ Pgno *aRoot, /* An array of root pages numbers for individual trees */ + Mem *aCnt, /* Memory cells to write counts for each tree to */ int nRoot, /* Number of entries in aRoot[] */ int mxErr, /* Stop reporting errors after this many */ int *pnErr, /* OUT: Write number of errors seen to this variable */ @@ -81182,7 +81592,9 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( int bPartial = 0; /* True if not checking all btrees */ int bCkFreelist = 1; /* True to scan the freelist */ VVA_ONLY( int nRef ); + assert( nRoot>0 ); + assert( aCnt!=0 ); /* aRoot[0]==0 means this is a partial check */ if( aRoot[0]==0 ){ @@ -81255,15 +81667,18 @@ SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( testcase( pBt->db->flags & SQLITE_CellSizeCk ); pBt->db->flags &= ~(u64)SQLITE_CellSizeCk; for(i=0; (int)iautoVacuum && aRoot[i]>1 && !bPartial ){ - checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); - } + if( pBt->autoVacuum && aRoot[i]>1 && !bPartial ){ + checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); + } #endif - sCheck.v0 = aRoot[i]; - checkTreePage(&sCheck, aRoot[i], ¬Used, LARGEST_INT64); + sCheck.v0 = aRoot[i]; + checkTreePage(&sCheck, aRoot[i], ¬Used, LARGEST_INT64); + } + sqlite3MemSetArrayInt64(aCnt, i, sCheck.nRow); } pBt->db->flags = savedDbFlags; @@ -83318,6 +83733,13 @@ SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem *pMem, i64 val){ } } +/* +** Set the iIdx'th entry of array aMem[] to contain integer value val. +*/ +SQLITE_PRIVATE void sqlite3MemSetArrayInt64(sqlite3_value *aMem, int iIdx, i64 val){ + sqlite3VdbeMemSetInt64(&aMem[iIdx], val); +} + /* A no-op destructor */ SQLITE_PRIVATE void sqlite3NoopDestructor(void *p){ UNUSED_PARAMETER(p); } @@ -84006,14 +84428,20 @@ static int valueFromExpr( } /* Handle negative integers in a single step. This is needed in the - ** case when the value is -9223372036854775808. - */ - if( op==TK_UMINUS - && (pExpr->pLeft->op==TK_INTEGER || pExpr->pLeft->op==TK_FLOAT) ){ - pExpr = pExpr->pLeft; - op = pExpr->op; - negInt = -1; - zNeg = "-"; + ** case when the value is -9223372036854775808. Except - do not do this + ** for hexadecimal literals. */ + if( op==TK_UMINUS ){ + Expr *pLeft = pExpr->pLeft; + if( (pLeft->op==TK_INTEGER || pLeft->op==TK_FLOAT) ){ + if( ExprHasProperty(pLeft, EP_IntValue) + || pLeft->u.zToken[0]!='0' || (pLeft->u.zToken[1] & ~0x20)!='X' + ){ + pExpr = pLeft; + op = pExpr->op; + negInt = -1; + zNeg = "-"; + } + } } if( op==TK_STRING || op==TK_FLOAT || op==TK_INTEGER ){ @@ -84022,12 +84450,26 @@ static int valueFromExpr( if( ExprHasProperty(pExpr, EP_IntValue) ){ sqlite3VdbeMemSetInt64(pVal, (i64)pExpr->u.iValue*negInt); }else{ - zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken); - if( zVal==0 ) goto no_mem; - sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC); + i64 iVal; + if( op==TK_INTEGER && 0==sqlite3DecOrHexToI64(pExpr->u.zToken, &iVal) ){ + sqlite3VdbeMemSetInt64(pVal, iVal*negInt); + }else{ + zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken); + if( zVal==0 ) goto no_mem; + sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC); + } } - if( (op==TK_INTEGER || op==TK_FLOAT ) && affinity==SQLITE_AFF_BLOB ){ - sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8); + if( affinity==SQLITE_AFF_BLOB ){ + if( op==TK_FLOAT ){ + assert( pVal && pVal->z && pVal->flags==(MEM_Str|MEM_Term) ); + sqlite3AtoF(pVal->z, &pVal->u.r, pVal->n, SQLITE_UTF8); + pVal->flags = MEM_Real; + }else if( op==TK_INTEGER ){ + /* This case is required by -9223372036854775808 and other strings + ** that look like integers but cannot be handled by the + ** sqlite3DecOrHexToI64() call above. */ + sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8); + } }else{ sqlite3ValueApplyAffinity(pVal, affinity, SQLITE_UTF8); } @@ -84297,17 +84739,17 @@ SQLITE_PRIVATE int sqlite3Stat4Column( sqlite3_value **ppVal /* OUT: Extracted value */ ){ u32 t = 0; /* a column type code */ - int nHdr; /* Size of the header in the record */ - int iHdr; /* Next unread header byte */ - int iField; /* Next unread data byte */ - int szField = 0; /* Size of the current data field */ + u32 nHdr; /* Size of the header in the record */ + u32 iHdr; /* Next unread header byte */ + i64 iField; /* Next unread data byte */ + u32 szField = 0; /* Size of the current data field */ int i; /* Column index */ u8 *a = (u8*)pRec; /* Typecast byte array */ Mem *pMem = *ppVal; /* Write result into this Mem object */ assert( iCol>0 ); iHdr = getVarint32(a, nHdr); - if( nHdr>nRec || iHdr>=nHdr ) return SQLITE_CORRUPT_BKPT; + if( nHdr>(u32)nRec || iHdr>=nHdr ) return SQLITE_CORRUPT_BKPT; iField = nHdr; for(i=0; i<=iCol; i++){ iHdr += getVarint32(&a[iHdr], t); @@ -85342,6 +85784,15 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ assert( aLabel!=0 ); /* True because of tag-20230419-1 */ pOp->p2 = aLabel[ADDR(pOp->p2)]; } + + /* OPFLG_JUMP opcodes never have P2==0, though OPFLG_JUMP0 opcodes + ** might */ + assert( pOp->p2>0 + || (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP0)!=0 ); + + /* Jumps never go off the end of the bytecode array */ + assert( pOp->p2nOp + || (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP)==0 ); break; } } @@ -87749,7 +88200,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ /* Check for immediate foreign key violations. */ if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){ - sqlite3VdbeCheckFk(p, 0); + (void)sqlite3VdbeCheckFk(p, 0); } /* If the auto-commit flag is set and this is the only active writer @@ -88919,17 +89370,15 @@ SQLITE_PRIVATE int sqlite3IntFloatCompare(i64 i, double r){ return (xr); }else{ i64 y; - double s; if( r<-9223372036854775808.0 ) return +1; if( r>=9223372036854775808.0 ) return -1; y = (i64)r; if( iy ) return +1; - s = (double)i; - testcase( doubleLt(s,r) ); - testcase( doubleLt(r,s) ); - testcase( doubleEq(r,s) ); - return (sr); + testcase( doubleLt(((double)i),r) ); + testcase( doubleLt(r,((double)i)) ); + testcase( doubleEq(r,((double)i)) ); + return (((double)i)r); } } @@ -89732,7 +90181,8 @@ SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe *v, int iVar, u8 aff assert( iVar>0 ); if( v ){ Mem *pMem = &v->aVar[iVar-1]; - assert( (v->db->flags & SQLITE_EnableQPSG)==0 ); + assert( (v->db->flags & SQLITE_EnableQPSG)==0 + || (v->db->mDbFlags & DBFLAG_InternalFunc)!=0 ); if( 0==(pMem->flags & MEM_Null) ){ sqlite3_value *pRet = sqlite3ValueNew(v->db); if( pRet ){ @@ -89752,7 +90202,8 @@ SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe *v, int iVar, u8 aff */ SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe *v, int iVar){ assert( iVar>0 ); - assert( (v->db->flags & SQLITE_EnableQPSG)==0 ); + assert( (v->db->flags & SQLITE_EnableQPSG)==0 + || (v->db->mDbFlags & DBFLAG_InternalFunc)!=0 ); if( iVar>=32 ){ v->expmask |= 0x80000000; }else{ @@ -92337,7 +92788,6 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2( } if( flags & SQLITE_SCANSTAT_COMPLEX ){ idx = iScan; - pScan = &p->aScan[idx]; }else{ /* If the COMPLEX flag is clear, then this function must ignore any ** ScanStatus structures with ScanStatus.addrLoop set to 0. */ @@ -92350,6 +92800,8 @@ SQLITE_API int sqlite3_stmt_scanstatus_v2( } } if( idx>=p->nScan ) return 1; + assert( pScan==0 || pScan==&p->aScan[idx] ); + pScan = &p->aScan[idx]; switch( iScanStatusOp ){ case SQLITE_SCANSTAT_NLOOP: { @@ -93798,7 +94250,7 @@ case OP_Return: { /* in1 */ ** ** See also: EndCoroutine */ -case OP_InitCoroutine: { /* jump */ +case OP_InitCoroutine: { /* jump0 */ assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) ); assert( pOp->p2>=0 && pOp->p2nOp ); assert( pOp->p3>=0 && pOp->p3nOp ); @@ -93821,7 +94273,9 @@ case OP_InitCoroutine: { /* jump */ ** ** The instruction at the address in register P1 is a Yield. ** Jump to the P2 parameter of that Yield. -** After the jump, register P1 becomes undefined. +** After the jump, the value register P1 is left with a value +** such that subsequent OP_Yields go back to the this same +** OP_EndCoroutine instruction. ** ** See also: InitCoroutine */ @@ -93833,8 +94287,8 @@ case OP_EndCoroutine: { /* in1 */ pCaller = &aOp[pIn1->u.i]; assert( pCaller->opcode==OP_Yield ); assert( pCaller->p2>=0 && pCaller->p2nOp ); + pIn1->u.i = (int)(pOp - p->aOp) - 1; pOp = &aOp[pCaller->p2 - 1]; - pIn1->flags = MEM_Undefined; break; } @@ -93851,7 +94305,7 @@ case OP_EndCoroutine: { /* in1 */ ** ** See also: InitCoroutine */ -case OP_Yield: { /* in1, jump */ +case OP_Yield: { /* in1, jump0 */ int pcDest; pIn1 = &aMem[pOp->p1]; assert( VdbeMemDynamic(pIn1)==0 ); @@ -94181,19 +94635,15 @@ case OP_Blob: { /* out2 */ break; } -/* Opcode: Variable P1 P2 * P4 * -** Synopsis: r[P2]=parameter(P1,P4) +/* Opcode: Variable P1 P2 * * * +** Synopsis: r[P2]=parameter(P1) ** ** Transfer the values of bound parameter P1 into register P2 -** -** If the parameter is named, then its name appears in P4. -** The P4 value is used by sqlite3_bind_parameter_name(). */ case OP_Variable: { /* out2 */ Mem *pVar; /* Value being transferred */ assert( pOp->p1>0 && pOp->p1<=p->nVar ); - assert( pOp->p4.z==0 || pOp->p4.z==sqlite3VListNumToName(p->pVList,pOp->p1) ); pVar = &p->aVar[pOp->p1 - 1]; if( sqlite3VdbeMemTooBig(pVar) ){ goto too_big; @@ -94714,7 +95164,7 @@ case OP_AddImm: { /* in1 */ ** without data loss, then jump immediately to P2, or if P2==0 ** raise an SQLITE_MISMATCH exception. */ -case OP_MustBeInt: { /* jump, in1 */ +case OP_MustBeInt: { /* jump0, in1 */ pIn1 = &aMem[pOp->p1]; if( (pIn1->flags & MEM_Int)==0 ){ applyAffinity(pIn1, SQLITE_AFF_NUMERIC, encoding); @@ -94755,7 +95205,7 @@ case OP_RealAffinity: { /* in1 */ } #endif -#ifndef SQLITE_OMIT_CAST +#if !defined(SQLITE_OMIT_CAST) && !defined(SQLITE_OMIT_ANALYZE) /* Opcode: Cast P1 P2 * * * ** Synopsis: affinity(r[P1]) ** @@ -96327,11 +96777,16 @@ case OP_MakeRecord: { switch( len ){ default: zPayload[7] = (u8)(v&0xff); v >>= 8; zPayload[6] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 6: zPayload[5] = (u8)(v&0xff); v >>= 8; zPayload[4] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 4: zPayload[3] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 3: zPayload[2] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 2: zPayload[1] = (u8)(v&0xff); v >>= 8; + /* no break */ deliberate_fall_through case 1: zPayload[0] = (u8)(v&0xff); } zPayload += len; @@ -97250,7 +97705,8 @@ case OP_SequenceTest: { ** is the only cursor opcode that works with a pseudo-table. ** ** P3 is the number of fields in the records that will be stored by -** the pseudo-table. +** the pseudo-table. If P2 is 0 or negative then the pseudo-cursor +** will return NULL for every column. */ case OP_OpenPseudo: { VdbeCursor *pCx; @@ -97393,10 +97849,10 @@ case OP_ColumnsUsed: { ** ** See also: Found, NotFound, SeekGt, SeekGe, SeekLt */ -case OP_SeekLT: /* jump, in3, group, ncycle */ -case OP_SeekLE: /* jump, in3, group, ncycle */ -case OP_SeekGE: /* jump, in3, group, ncycle */ -case OP_SeekGT: { /* jump, in3, group, ncycle */ +case OP_SeekLT: /* jump0, in3, group, ncycle */ +case OP_SeekLE: /* jump0, in3, group, ncycle */ +case OP_SeekGE: /* jump0, in3, group, ncycle */ +case OP_SeekGT: { /* jump0, in3, group, ncycle */ int res; /* Comparison result */ int oc; /* Opcode */ VdbeCursor *pC; /* The cursor to seek */ @@ -98063,7 +98519,7 @@ case OP_Found: { /* jump, in3, ncycle */ ** ** See also: Found, NotFound, NoConflict, SeekRowid */ -case OP_SeekRowid: { /* jump, in3, ncycle */ +case OP_SeekRowid: { /* jump0, in3, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; @@ -98822,7 +99278,7 @@ case OP_NullRow: { ** configured to use Prev, not Next. */ case OP_SeekEnd: /* ncycle */ -case OP_Last: { /* jump, ncycle */ +case OP_Last: { /* jump0, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; @@ -98856,28 +99312,38 @@ case OP_Last: { /* jump, ncycle */ break; } -/* Opcode: IfSmaller P1 P2 P3 * * +/* Opcode: IfSizeBetween P1 P2 P3 P4 * ** -** Estimate the number of rows in the table P1. Jump to P2 if that -** estimate is less than approximately 2**(0.1*P3). +** Let N be the approximate number of rows in the table or index +** with cursor P1 and let X be 10*log2(N) if N is positive or -1 +** if N is zero. +** +** Jump to P2 if X is in between P3 and P4, inclusive. */ -case OP_IfSmaller: { /* jump */ +case OP_IfSizeBetween: { /* jump */ VdbeCursor *pC; BtCursor *pCrsr; int res; i64 sz; assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( pOp->p4type==P4_INT32 ); + assert( pOp->p3>=-1 && pOp->p3<=640*2 ); + assert( pOp->p4.i>=-1 && pOp->p4.i<=640*2 ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); pCrsr = pC->uc.pCursor; assert( pCrsr ); rc = sqlite3BtreeFirst(pCrsr, &res); if( rc ) goto abort_due_to_error; - if( res==0 ){ + if( res!=0 ){ + sz = -1; /* -Infinity encoding */ + }else{ sz = sqlite3BtreeRowCountEst(pCrsr); - if( ALWAYS(sz>=0) && sqlite3LogEst((u64)sz)p3 ) res = 1; + assert( sz>0 ); + sz = sqlite3LogEst((u64)sz); } + res = sz>=pOp->p3 && sz<=pOp->p4.i; VdbeBranchTaken(res!=0,2); if( res ) goto jump_to_p2; break; @@ -98930,7 +99396,7 @@ case OP_Sort: { /* jump ncycle */ ** from the beginning toward the end. In other words, the cursor is ** configured to use Next, not Prev. */ -case OP_Rewind: { /* jump, ncycle */ +case OP_Rewind: { /* jump0, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; @@ -99577,11 +100043,18 @@ case OP_CreateBtree: { /* out2 */ break; } -/* Opcode: SqlExec * * * P4 * +/* Opcode: SqlExec P1 P2 * P4 * ** ** Run the SQL statement or statements specified in the P4 string. -** Disable Auth and Trace callbacks while those statements are running if -** P1 is true. +** +** The P1 parameter is a bitmask of options: +** +** 0x0001 Disable Auth and Trace callbacks while the statements +** in P4 are running. +** +** 0x0002 Set db->nAnalysisLimit to P2 while the statements in +** P4 are running. +** */ case OP_SqlExec: { char *zErr; @@ -99589,6 +100062,7 @@ case OP_SqlExec: { sqlite3_xauth xAuth; #endif u8 mTrace; + int savedAnalysisLimit; sqlite3VdbeIncrWriteCounter(p, 0); db->nSqlExec++; @@ -99597,18 +100071,23 @@ case OP_SqlExec: { xAuth = db->xAuth; #endif mTrace = db->mTrace; - if( pOp->p1 ){ + savedAnalysisLimit = db->nAnalysisLimit; + if( pOp->p1 & 0x0001 ){ #ifndef SQLITE_OMIT_AUTHORIZATION db->xAuth = 0; #endif db->mTrace = 0; } + if( pOp->p1 & 0x0002 ){ + db->nAnalysisLimit = pOp->p2; + } rc = sqlite3_exec(db, pOp->p4.z, 0, 0, &zErr); db->nSqlExec--; #ifndef SQLITE_OMIT_AUTHORIZATION db->xAuth = xAuth; #endif db->mTrace = mTrace; + db->nAnalysisLimit = savedAnalysisLimit; if( zErr || rc ){ sqlite3VdbeError(p, "%s", zErr); sqlite3_free(zErr); @@ -99760,11 +100239,11 @@ case OP_DropTrigger: { /* Opcode: IntegrityCk P1 P2 P3 P4 P5 ** ** Do an analysis of the currently open database. Store in -** register P1 the text of an error message describing any problems. -** If no problems are found, store a NULL in register P1. +** register (P1+1) the text of an error message describing any problems. +** If no problems are found, store a NULL in register (P1+1). ** -** The register P3 contains one less than the maximum number of allowed errors. -** At most reg(P3) errors will be reported. +** The register (P1) contains one less than the maximum number of allowed +** errors. At most reg(P1) errors will be reported. ** In other words, the analysis stops as soon as reg(P1) errors are ** seen. Reg(P1) is updated with the number of errors remaining. ** @@ -99784,19 +100263,21 @@ case OP_IntegrityCk: { Mem *pnErr; /* Register keeping track of errors remaining */ assert( p->bIsReader ); + assert( pOp->p4type==P4_INTARRAY ); nRoot = pOp->p2; aRoot = pOp->p4.ai; assert( nRoot>0 ); + assert( aRoot!=0 ); assert( aRoot[0]==(Pgno)nRoot ); - assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); - pnErr = &aMem[pOp->p3]; + assert( pOp->p1>0 && (pOp->p1+1)<=(p->nMem+1 - p->nCursor) ); + pnErr = &aMem[pOp->p1]; assert( (pnErr->flags & MEM_Int)!=0 ); assert( (pnErr->flags & (MEM_Str|MEM_Blob))==0 ); - pIn1 = &aMem[pOp->p1]; + pIn1 = &aMem[pOp->p1+1]; assert( pOp->p5nDb ); assert( DbMaskTest(p->btreeMask, pOp->p5) ); - rc = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], nRoot, - (int)pnErr->u.i+1, &nErr, &z); + rc = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], + &aMem[pOp->p3], nRoot, (int)pnErr->u.i+1, &nErr, &z); sqlite3VdbeMemSetNull(pIn1); if( nErr==0 ){ assert( z==0 ); @@ -99923,7 +100404,9 @@ case OP_RowSetTest: { /* jump, in1, in3 */ ** P1 contains the address of the memory cell that contains the first memory ** cell in an array of values used as arguments to the sub-program. P2 ** contains the address to jump to if the sub-program throws an IGNORE -** exception using the RAISE() function. Register P3 contains the address +** exception using the RAISE() function. P2 might be zero, if there is +** no possibility that an IGNORE exception will be raised. +** Register P3 contains the address ** of a memory cell in this (the parent) VM that is used to allocate the ** memory required by the sub-vdbe at runtime. ** @@ -99931,7 +100414,7 @@ case OP_RowSetTest: { /* jump, in1, in3 */ ** ** If P5 is non-zero, then recursive program invocation is enabled. */ -case OP_Program: { /* jump */ +case OP_Program: { /* jump0 */ int nMem; /* Number of memory registers for sub-program */ int nByte; /* Bytes of runtime space required for sub-program */ Mem *pRt; /* Register to allocate runtime space */ @@ -101480,7 +101963,7 @@ case OP_Filter: { /* jump */ ** error is encountered. */ case OP_Trace: -case OP_Init: { /* jump */ +case OP_Init: { /* jump0 */ int i; #ifndef SQLITE_OMIT_TRACE char *zTrace; @@ -105381,10 +105864,10 @@ static int bytecodevtabColumn( #ifdef SQLITE_ENABLE_STMT_SCANSTATUS case 9: /* nexec */ - sqlite3_result_int(ctx, pOp->nExec); + sqlite3_result_int64(ctx, pOp->nExec); break; case 10: /* ncycle */ - sqlite3_result_int(ctx, pOp->nCycle); + sqlite3_result_int64(ctx, pOp->nCycle); break; #else case 9: /* nexec */ @@ -106477,7 +106960,7 @@ static void extendFJMatch( static SQLITE_NOINLINE int isValidSchemaTableName( const char *zTab, /* Name as it appears in the SQL */ Table *pTab, /* The schema table we are trying to match */ - Schema *pSchema /* non-NULL if a database qualifier is present */ + const char *zDb /* non-NULL if a database qualifier is present */ ){ const char *zLegacy; assert( pTab!=0 ); @@ -106488,7 +106971,7 @@ static SQLITE_NOINLINE int isValidSchemaTableName( if( sqlite3StrICmp(zTab+7, &PREFERRED_TEMP_SCHEMA_TABLE[7])==0 ){ return 1; } - if( pSchema==0 ) return 0; + if( zDb==0 ) return 0; if( sqlite3StrICmp(zTab+7, &LEGACY_SCHEMA_TABLE[7])==0 ) return 1; if( sqlite3StrICmp(zTab+7, &PREFERRED_SCHEMA_TABLE[7])==0 ) return 1; }else{ @@ -106528,7 +107011,7 @@ static int lookupName( Parse *pParse, /* The parsing context */ const char *zDb, /* Name of the database containing table, or NULL */ const char *zTab, /* Name of table containing column, or NULL */ - const char *zCol, /* Name of the column. */ + const Expr *pRight, /* Name of the column. */ NameContext *pNC, /* The name context used to resolve the name */ Expr *pExpr /* Make this EXPR node point to the selected column */ ){ @@ -106545,6 +107028,7 @@ static int lookupName( Table *pTab = 0; /* Table holding the row */ Column *pCol; /* A column of pTab */ ExprList *pFJMatch = 0; /* Matches for FULL JOIN .. USING */ + const char *zCol = pRight->u.zToken; assert( pNC ); /* the name context cannot be NULL. */ assert( zCol ); /* The Z in X.Y.Z cannot be NULL */ @@ -106670,7 +107154,7 @@ static int lookupName( } }else if( sqlite3StrICmp(zTab, pTab->zName)!=0 ){ if( pTab->tnum!=1 ) continue; - if( !isValidSchemaTableName(zTab, pTab, pSchema) ) continue; + if( !isValidSchemaTableName(zTab, pTab, zDb) ) continue; } assert( ExprUseYTab(pExpr) ); if( IN_RENAME_OBJECT && pItem->zAlias ){ @@ -106776,7 +107260,8 @@ static int lookupName( if( pParse->bReturning ){ if( (pNC->ncFlags & NC_UBaseReg)!=0 && ALWAYS(zTab==0 - || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0) + || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0 + || isValidSchemaTableName(zTab, pParse->pTriggerTab, 0)) ){ pExpr->iTable = op!=TK_DELETE; pTab = pParse->pTriggerTab; @@ -106880,6 +107365,11 @@ static int lookupName( && ALWAYS(VisibleRowid(pMatch->pTab) || pMatch->fg.isNestedFrom) ){ cnt = cntTab; +#if SQLITE_ALLOW_ROWID_IN_VIEW+0==2 + if( pMatch->pTab!=0 && IsView(pMatch->pTab) ){ + eNewExprOp = TK_NULL; + } +#endif if( pMatch->fg.isNestedFrom==0 ) pExpr->iColumn = -1; pExpr->affExpr = SQLITE_AFF_INTEGER; } @@ -107033,6 +107523,10 @@ static int lookupName( sqlite3ErrorMsg(pParse, "%s: %s.%s.%s", zErr, zDb, zTab, zCol); }else if( zTab ){ sqlite3ErrorMsg(pParse, "%s: %s.%s", zErr, zTab, zCol); + }else if( cnt==0 && ExprHasProperty(pRight,EP_DblQuoted) ){ + sqlite3ErrorMsg(pParse, "%s: \"%s\" - should this be a" + " string literal in single-quotes?", + zErr, zCol); }else{ sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol); } @@ -107066,8 +107560,12 @@ static int lookupName( ** If a generated column is referenced, set bits for every column ** of the table. */ - if( pExpr->iColumn>=0 && cnt==1 && pMatch!=0 ){ - pMatch->colUsed |= sqlite3ExprColUsed(pExpr); + if( pMatch ){ + if( pExpr->iColumn>=0 ){ + pMatch->colUsed |= sqlite3ExprColUsed(pExpr); + }else{ + pMatch->fg.rowidUsed = 1; + } } pExpr->op = eNewExprOp; @@ -107310,7 +107808,6 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ */ case TK_ID: case TK_DOT: { - const char *zColumn; const char *zTable; const char *zDb; Expr *pRight; @@ -107319,7 +107816,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ zDb = 0; zTable = 0; assert( !ExprHasProperty(pExpr, EP_IntValue) ); - zColumn = pExpr->u.zToken; + pRight = pExpr; }else{ Expr *pLeft = pExpr->pLeft; testcase( pNC->ncFlags & NC_IdxExpr ); @@ -107338,14 +107835,13 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ } assert( ExprUseUToken(pLeft) && ExprUseUToken(pRight) ); zTable = pLeft->u.zToken; - zColumn = pRight->u.zToken; assert( ExprUseYTab(pExpr) ); if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, (void*)pExpr, (void*)pRight); sqlite3RenameTokenRemap(pParse, (void*)&pExpr->y.pTab, (void*)pLeft); } } - return lookupName(pParse, zDb, zTable, zColumn, pNC, pExpr); + return lookupName(pParse, zDb, zTable, pRight, pNC, pExpr); } /* Resolve function names @@ -107521,11 +108017,9 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ #endif } } -#ifndef SQLITE_OMIT_WINDOWFUNC - else if( ExprHasProperty(pExpr, EP_WinFunc) ){ + else if( ExprHasProperty(pExpr, EP_WinFunc) || pExpr->pLeft ){ is_agg = 1; } -#endif sqlite3WalkExprList(pWalker, pList); if( is_agg ){ if( pExpr->pLeft ){ @@ -107595,6 +108089,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ testcase( pNC->ncFlags & NC_PartIdx ); testcase( pNC->ncFlags & NC_IdxExpr ); testcase( pNC->ncFlags & NC_GenCol ); + assert( pExpr->x.pSelect ); if( pNC->ncFlags & NC_SelfRef ){ notValidImpl(pParse, pNC, "subqueries", pExpr, pExpr); }else{ @@ -107603,6 +108098,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ assert( pNC->nRef>=nRef ); if( nRef!=pNC->nRef ){ ExprSetProperty(pExpr, EP_VarSelect); + pExpr->x.pSelect->selFlags |= SF_Correlated; } pNC->ncFlags |= NC_Subquery; } @@ -108128,6 +108624,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ if( pOuterNC ) pOuterNC->nNestedSelect++; for(i=0; ipSrc->nSrc; i++){ SrcItem *pItem = &p->pSrc->a[i]; + assert( pItem->zName!=0 || pItem->pSelect!=0 );/* Test of tag-20240424-1*/ if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){ int nRef = pOuterNC ? pOuterNC->nRef : 0; const char *zSavedContext = pParse->zAuthContext; @@ -108389,6 +108886,9 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames( ** Resolve all names for all expression in an expression list. This is ** just like sqlite3ResolveExprNames() except that it works for an expression ** list rather than a single expression. +** +** The return value is SQLITE_OK (0) for success or SQLITE_ERROR (1) for a +** failure. */ SQLITE_PRIVATE int sqlite3ResolveExprListNames( NameContext *pNC, /* Namespace to resolve expressions in. */ @@ -108397,7 +108897,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( int i; int savedHasAgg = 0; Walker w; - if( pList==0 ) return WRC_Continue; + if( pList==0 ) return SQLITE_OK; w.pParse = pNC->pParse; w.xExprCallback = resolveExprStep; w.xSelectCallback = resolveSelectStep; @@ -108411,7 +108911,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( #if SQLITE_MAX_EXPR_DEPTH>0 w.pParse->nHeight += pExpr->nHeight; if( sqlite3ExprCheckHeight(w.pParse, w.pParse->nHeight) ){ - return WRC_Abort; + return SQLITE_ERROR; } #endif sqlite3WalkExprNN(&w, pExpr); @@ -108428,10 +108928,10 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( (NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg); pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin|NC_OrderAgg); } - if( w.pParse->nErr>0 ) return WRC_Abort; + if( w.pParse->nErr>0 ) return SQLITE_ERROR; } pNC->ncFlags |= savedHasAgg; - return WRC_Continue; + return SQLITE_OK; } /* @@ -109434,11 +109934,12 @@ SQLITE_PRIVATE void sqlite3ExprSetErrorOffset(Expr *pExpr, int iOfst){ ** appear to be quoted. If the quotes were of the form "..." (double-quotes) ** then the EP_DblQuoted flag is set on the expression node. ** -** Special case: If op==TK_INTEGER and pToken points to a string that -** can be translated into a 32-bit integer, then the token is not -** stored in u.zToken. Instead, the integer values is written -** into u.iValue and the EP_IntValue flag is set. No extra storage +** Special case (tag-20240227-a): If op==TK_INTEGER and pToken points to +** a string that can be translated into a 32-bit integer, then the token is +** not stored in u.zToken. Instead, the integer values is written +** into u.iValue and the EP_IntValue flag is set. No extra storage ** is allocated to hold the integer text and the dequote flag is ignored. +** See also tag-20240227-b. */ SQLITE_PRIVATE Expr *sqlite3ExprAlloc( sqlite3 *db, /* Handle for sqlite3DbMallocRawNN() */ @@ -109454,7 +109955,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprAlloc( if( pToken ){ if( op!=TK_INTEGER || pToken->z==0 || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - nExtra = pToken->n+1; + nExtra = pToken->n+1; /* tag-20240227-a */ assert( iValue>=0 ); } } @@ -109886,6 +110387,7 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ assert( p!=0 ); assert( db!=0 ); +exprDeleteRestart: assert( !ExprUseUValue(p) || p->u.iValue>=0 ); assert( !ExprUseYWin(p) || !ExprUseYSub(p) ); assert( !ExprUseYWin(p) || p->y.pWin!=0 || db->mallocFailed ); @@ -109901,7 +110403,6 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ if( !ExprHasProperty(p, (EP_TokenOnly|EP_Leaf)) ){ /* The Expr.x union is never used at the same time as Expr.pRight */ assert( (ExprUseXList(p) && p->x.pList==0) || p->pRight==0 ); - if( p->pLeft && p->op!=TK_SELECT_COLUMN ) sqlite3ExprDeleteNN(db, p->pLeft); if( p->pRight ){ assert( !ExprHasProperty(p, EP_WinFunc) ); sqlite3ExprDeleteNN(db, p->pRight); @@ -109916,6 +110417,19 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ } #endif } + if( p->pLeft && p->op!=TK_SELECT_COLUMN ){ + Expr *pLeft = p->pLeft; + if( !ExprHasProperty(p, EP_Static) + && !ExprHasProperty(pLeft, EP_Static) + ){ + /* Avoid unnecessary recursion on unary operators */ + sqlite3DbNNFreeNN(db, p); + p = pLeft; + goto exprDeleteRestart; + }else{ + sqlite3ExprDeleteNN(db, pLeft); + } + } } if( !ExprHasProperty(p, EP_Static) ){ sqlite3DbNNFreeNN(db, p); @@ -109948,11 +110462,11 @@ SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){ ** ** The pExpr might be deleted immediately on an OOM error. ** -** The deferred delete is (currently) implemented by adding the -** pExpr to the pParse->pConstExpr list with a register number of 0. +** Return 0 if the delete was successfully deferred. Return non-zero +** if the delete happened immediately because of an OOM. */ -SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){ - sqlite3ParserAddCleanup(pParse, sqlite3ExprDeleteGeneric, pExpr); +SQLITE_PRIVATE int sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){ + return 0==sqlite3ParserAddCleanup(pParse, sqlite3ExprDeleteGeneric, pExpr); } /* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the @@ -110388,17 +110902,19 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, const SrcList *p, int fla pNewItem->iCursor = pOldItem->iCursor; pNewItem->addrFillSub = pOldItem->addrFillSub; pNewItem->regReturn = pOldItem->regReturn; + pNewItem->regResult = pOldItem->regResult; if( pNewItem->fg.isIndexedBy ){ pNewItem->u1.zIndexedBy = sqlite3DbStrDup(db, pOldItem->u1.zIndexedBy); + }else if( pNewItem->fg.isTabFunc ){ + pNewItem->u1.pFuncArg = + sqlite3ExprListDup(db, pOldItem->u1.pFuncArg, flags); + }else{ + pNewItem->u1.nRow = pOldItem->u1.nRow; } pNewItem->u2 = pOldItem->u2; if( pNewItem->fg.isCte ){ pNewItem->u2.pCteUse->nUse++; } - if( pNewItem->fg.isTabFunc ){ - pNewItem->u1.pFuncArg = - sqlite3ExprListDup(db, pOldItem->u1.pFuncArg, flags); - } pTab = pNewItem->pTab = pOldItem->pTab; if( pTab ){ pTab->nTabRef++; @@ -110864,6 +111380,54 @@ SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr *pExpr){ return pExpr; } +/* +** pExpr is a TK_FUNCTION node. Try to determine whether or not the +** function is a constant function. A function is constant if all of +** the following are true: +** +** (1) It is a scalar function (not an aggregate or window function) +** (2) It has either the SQLITE_FUNC_CONSTANT or SQLITE_FUNC_SLOCHNG +** property. +** (3) All of its arguments are constants +** +** This routine sets pWalker->eCode to 0 if pExpr is not a constant. +** It makes no changes to pWalker->eCode if pExpr is constant. In +** every case, it returns WRC_Abort. +** +** Called as a service subroutine from exprNodeIsConstant(). +*/ +static SQLITE_NOINLINE int exprNodeIsConstantFunction( + Walker *pWalker, + Expr *pExpr +){ + int n; /* Number of arguments */ + ExprList *pList; /* List of arguments */ + FuncDef *pDef; /* The function */ + sqlite3 *db; /* The database */ + + assert( pExpr->op==TK_FUNCTION ); + if( ExprHasProperty(pExpr, EP_TokenOnly) + || (pList = pExpr->x.pList)==0 + ){; + n = 0; + }else{ + n = pList->nExpr; + sqlite3WalkExprList(pWalker, pList); + if( pWalker->eCode==0 ) return WRC_Abort; + } + db = pWalker->pParse->db; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( pDef==0 + || pDef->xFinalize!=0 + || (pDef->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0 + || ExprHasProperty(pExpr, EP_WinFunc) + ){ + pWalker->eCode = 0; + return WRC_Abort; + } + return WRC_Prune; +} + /* ** These routines are Walker callbacks used to check expressions to @@ -110892,6 +111456,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr *pExpr){ ** malformed schema error. */ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ + assert( pWalker->eCode>0 ); /* If pWalker->eCode is 2 then any term of the expression that comes from ** the ON or USING clauses of an outer join disqualifies the expression @@ -110911,6 +111476,8 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ ){ if( pWalker->eCode==5 ) ExprSetProperty(pExpr, EP_FromDDL); return WRC_Continue; + }else if( pWalker->pParse ){ + return exprNodeIsConstantFunction(pWalker, pExpr); }else{ pWalker->eCode = 0; return WRC_Abort; @@ -110939,9 +111506,11 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ case TK_IF_NULL_ROW: case TK_REGISTER: case TK_DOT: + case TK_RAISE: testcase( pExpr->op==TK_REGISTER ); testcase( pExpr->op==TK_IF_NULL_ROW ); testcase( pExpr->op==TK_DOT ); + testcase( pExpr->op==TK_RAISE ); pWalker->eCode = 0; return WRC_Abort; case TK_VARIABLE: @@ -110963,15 +111532,15 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ return WRC_Continue; } } -static int exprIsConst(Expr *p, int initFlag, int iCur){ +static int exprIsConst(Parse *pParse, Expr *p, int initFlag){ Walker w; w.eCode = initFlag; + w.pParse = pParse; w.xExprCallback = exprNodeIsConstant; w.xSelectCallback = sqlite3SelectWalkFail; #ifdef SQLITE_DEBUG w.xSelectCallback2 = sqlite3SelectWalkAssert2; #endif - w.u.iCur = iCur; sqlite3WalkExpr(&w, p); return w.eCode; } @@ -110983,9 +111552,15 @@ static int exprIsConst(Expr *p, int initFlag, int iCur){ ** For the purposes of this function, a double-quoted string (ex: "abc") ** is considered a variable but a single-quoted string (ex: 'abc') is ** a constant. +** +** The pParse parameter may be NULL. But if it is NULL, there is no way +** to determine if function calls are constant or not, and hence all +** function calls will be considered to be non-constant. If pParse is +** not NULL, then a function call might be constant, depending on the +** function and on its parameters. */ -SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr *p){ - return exprIsConst(p, 1, 0); +SQLITE_PRIVATE int sqlite3ExprIsConstant(Parse *pParse, Expr *p){ + return exprIsConst(pParse, p, 1); } /* @@ -111001,8 +111576,24 @@ SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr *p){ ** can be added to the pParse->pConstExpr list and evaluated once when ** the prepared statement starts up. See sqlite3ExprCodeRunJustOnce(). */ -SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr *p){ - return exprIsConst(p, 2, 0); +static int sqlite3ExprIsConstantNotJoin(Parse *pParse, Expr *p){ + return exprIsConst(pParse, p, 2); +} + +/* +** This routine examines sub-SELECT statements as an expression is being +** walked as part of sqlite3ExprIsTableConstant(). Sub-SELECTs are considered +** constant as long as they are uncorrelated - meaning that they do not +** contain any terms from outer contexts. +*/ +static int exprSelectWalkTableConstant(Walker *pWalker, Select *pSelect){ + assert( pSelect!=0 ); + assert( pWalker->eCode==3 || pWalker->eCode==0 ); + if( (pSelect->selFlags & SF_Correlated)!=0 ){ + pWalker->eCode = 0; + return WRC_Abort; + } + return WRC_Prune; } /* @@ -111010,9 +111601,26 @@ SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr *p){ ** for any single row of the table with cursor iCur. In other words, the ** expression must not refer to any non-deterministic function nor any ** table other than iCur. +** +** Consider uncorrelated subqueries to be constants if the bAllowSubq +** parameter is true. */ -SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ - return exprIsConst(p, 3, iCur); +static int sqlite3ExprIsTableConstant(Expr *p, int iCur, int bAllowSubq){ + Walker w; + w.eCode = 3; + w.pParse = 0; + w.xExprCallback = exprNodeIsConstant; + if( bAllowSubq ){ + w.xSelectCallback = exprSelectWalkTableConstant; + }else{ + w.xSelectCallback = sqlite3SelectWalkFail; +#ifdef SQLITE_DEBUG + w.xSelectCallback2 = sqlite3SelectWalkAssert2; +#endif + } + w.u.iCur = iCur; + sqlite3WalkExpr(&w, p); + return w.eCode; } /* @@ -111030,7 +111638,10 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ ** ** (1) pExpr cannot refer to any table other than pSrc->iCursor. ** -** (2) pExpr cannot use subqueries or non-deterministic functions. +** (2a) pExpr cannot use subqueries unless the bAllowSubq parameter is +** true and the subquery is non-correlated +** +** (2b) pExpr cannot use non-deterministic functions. ** ** (3) pSrc cannot be part of the left operand for a RIGHT JOIN. ** (Is there some way to relax this constraint?) @@ -111059,7 +111670,8 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint( Expr *pExpr, /* The constraint */ const SrcList *pSrcList, /* Complete FROM clause */ - int iSrc /* Which element of pSrcList to use */ + int iSrc, /* Which element of pSrcList to use */ + int bAllowSubq /* Allow non-correlated subqueries */ ){ const SrcItem *pSrc = &pSrcList->a[iSrc]; if( pSrc->fg.jointype & JT_LTORJ ){ @@ -111084,7 +111696,8 @@ SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint( } } } - return sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor); /* rules (1), (2) */ + /* Rules (1), (2a), and (2b) handled by the following: */ + return sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor, bAllowSubq); } @@ -111169,7 +111782,7 @@ SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse *pParse, Expr *p, ExprLi */ SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr *p, u8 isInit){ assert( isInit==0 || isInit==1 ); - return exprIsConst(p, 4+isInit, 0); + return exprIsConst(0, p, 4+isInit); } #ifdef SQLITE_ENABLE_CURSOR_HINTS @@ -111417,13 +112030,13 @@ static void sqlite3SetHasNullFlag(Vdbe *v, int iCur, int regHasNull){ ** The argument is an IN operator with a list (not a subquery) on the ** right-hand side. Return TRUE if that list is constant. */ -static int sqlite3InRhsIsConstant(Expr *pIn){ +static int sqlite3InRhsIsConstant(Parse *pParse, Expr *pIn){ Expr *pLHS; int res; assert( !ExprHasProperty(pIn, EP_xIsSelect) ); pLHS = pIn->pLeft; pIn->pLeft = 0; - res = sqlite3ExprIsConstant(pIn); + res = sqlite3ExprIsConstant(pParse, pIn); pIn->pLeft = pLHS; return res; } @@ -111692,7 +112305,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex( if( eType==0 && (inFlags & IN_INDEX_NOOP_OK) && ExprUseXList(pX) - && (!sqlite3InRhsIsConstant(pX) || pX->x.pList->nExpr<=2) + && (!sqlite3InRhsIsConstant(pParse,pX) || pX->x.pList->nExpr<=2) ){ pParse->nTab--; /* Back out the allocation of the unused cursor */ iTab = -1; /* Cursor is not allocated */ @@ -111975,7 +112588,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( ** this code only executes once. Because for a non-constant ** expression we need to rerun this code each time. */ - if( addrOnce && !sqlite3ExprIsConstant(pE2) ){ + if( addrOnce && !sqlite3ExprIsConstant(pParse, pE2) ){ sqlite3VdbeChangeToNoop(v, addrOnce-1); sqlite3VdbeChangeToNoop(v, addrOnce); ExprClearProperty(pExpr, EP_Subrtn); @@ -113139,12 +113752,6 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) assert( pExpr->u.zToken!=0 ); assert( pExpr->u.zToken[0]!=0 ); sqlite3VdbeAddOp2(v, OP_Variable, pExpr->iColumn, target); - if( pExpr->u.zToken[1]!=0 ){ - const char *z = sqlite3VListNumToName(pParse->pVList, pExpr->iColumn); - assert( pExpr->u.zToken[0]=='?' || (z && !strcmp(pExpr->u.zToken, z)) ); - pParse->pVList[0] = 0; /* Indicate VList may no longer be enlarged */ - sqlite3VdbeAppendP4(v, (char*)z, P4_STATIC); - } return target; } case TK_REGISTER: { @@ -113318,7 +113925,9 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) } #endif - if( ConstFactorOk(pParse) && sqlite3ExprIsConstantNotJoin(pExpr) ){ + if( ConstFactorOk(pParse) + && sqlite3ExprIsConstantNotJoin(pParse,pExpr) + ){ /* SQL functions can be expensive. So try to avoid running them ** multiple times if we know they always give the same result */ return sqlite3ExprCodeRunJustOnce(pParse, pExpr, -1); @@ -113349,7 +113958,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) } for(i=0; ia[i].pExpr) ){ + if( i<32 && sqlite3ExprIsConstant(pParse, pFarg->a[i].pExpr) ){ testcase( i==31 ); constMask |= MASKBIT32(i); } @@ -113491,8 +114100,9 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) if( !ExprHasProperty(pExpr, EP_Collate) ){ /* A TK_COLLATE Expr node without the EP_Collate tag is a so-called ** "SOFT-COLLATE" that is added to constraints that are pushed down - ** from outer queries into sub-queries by the push-down optimization. - ** Clear subtypes as subtypes may not cross a subquery boundary. + ** from outer queries into sub-queries by the WHERE-clause push-down + ** optimization. Clear subtypes as subtypes may not cross a subquery + ** boundary. */ assert( pExpr->pLeft ); sqlite3ExprCode(pParse, pExpr->pLeft, target); @@ -113816,7 +114426,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse *pParse, Expr *pExpr, int *pReg){ if( ConstFactorOk(pParse) && ALWAYS(pExpr!=0) && pExpr->op!=TK_REGISTER - && sqlite3ExprIsConstantNotJoin(pExpr) + && sqlite3ExprIsConstantNotJoin(pParse, pExpr) ){ *pReg = 0; r2 = sqlite3ExprCodeRunJustOnce(pParse, pExpr, -1); @@ -113880,7 +114490,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse *pParse, Expr *pExpr, int target){ ** might choose to code the expression at initialization time. */ SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse *pParse, Expr *pExpr, int target){ - if( pParse->okConstFactor && sqlite3ExprIsConstantNotJoin(pExpr) ){ + if( pParse->okConstFactor && sqlite3ExprIsConstantNotJoin(pParse,pExpr) ){ sqlite3ExprCodeRunJustOnce(pParse, pExpr, target); }else{ sqlite3ExprCodeCopy(pParse, pExpr, target); @@ -113939,7 +114549,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList( sqlite3VdbeAddOp2(v, copyOp, j+srcReg-1, target+i); } }else if( (flags & SQLITE_ECEL_FACTOR)!=0 - && sqlite3ExprIsConstantNotJoin(pExpr) + && sqlite3ExprIsConstantNotJoin(pParse,pExpr) ){ sqlite3ExprCodeRunJustOnce(pParse, pExpr, target+i); }else{ @@ -115090,9 +115700,8 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ && pAggInfo->aCol[iAgg].pCExpr==pExpr ){ pExpr = sqlite3ExprDup(db, pExpr, 0); - if( pExpr ){ + if( pExpr && !sqlite3ExprDeferredDelete(pParse, pExpr) ){ pAggInfo->aCol[iAgg].pCExpr = pExpr; - sqlite3ExprDeferredDelete(pParse, pExpr); } } }else{ @@ -115101,9 +115710,8 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ && pAggInfo->aFunc[iAgg].pFExpr==pExpr ){ pExpr = sqlite3ExprDup(db, pExpr, 0); - if( pExpr ){ + if( pExpr && !sqlite3ExprDeferredDelete(pParse, pExpr) ){ pAggInfo->aFunc[iAgg].pFExpr = pExpr; - sqlite3ExprDeferredDelete(pParse, pExpr); } } } @@ -116862,7 +117470,7 @@ static int renameResolveTrigger(Parse *pParse){ /* ALWAYS() because if the table of the trigger does not exist, the ** error would have been hit before this point */ if( ALWAYS(pParse->pTriggerTab) ){ - rc = sqlite3ViewGetColumnNames(pParse, pParse->pTriggerTab); + rc = sqlite3ViewGetColumnNames(pParse, pParse->pTriggerTab)!=0; } /* Resolve symbols in WHEN clause */ @@ -117804,7 +118412,12 @@ SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse *pParse, SrcList *pSrc, const T if( i==pTab->iPKey ){ sqlite3VdbeAddOp2(v, OP_Null, 0, regOut); }else{ + char aff = pTab->aCol[i].affinity; + if( aff==SQLITE_AFF_REAL ){ + pTab->aCol[i].affinity = SQLITE_AFF_NUMERIC; + } sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, i, regOut); + pTab->aCol[i].affinity = aff; } nField++; } @@ -118723,7 +119336,7 @@ static void statGet( if( iVal==2 && p->nRow*10 <= nDistinct*11 ) iVal = 1; sqlite3_str_appendf(&sStat, " %llu", iVal); #ifdef SQLITE_ENABLE_STAT4 - assert( p->current.anEq[i] ); + assert( p->current.anEq[i] || p->nRow==0 ); #endif } sqlite3ResultStrAccum(context, &sStat); @@ -118908,7 +119521,7 @@ static void analyzeOneTable( for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ int nCol; /* Number of columns in pIdx. "N" */ - int addrRewind; /* Address of "OP_Rewind iIdxCur" */ + int addrGotoEnd; /* Address of "OP_Rewind iIdxCur" */ int addrNextRow; /* Address of "next_row:" */ const char *zIdxName; /* Name of the index */ int nColTest; /* Number of columns to test for changes */ @@ -118932,9 +119545,14 @@ static void analyzeOneTable( /* ** Pseudo-code for loop that calls stat_push(): ** - ** Rewind csr - ** if eof(csr) goto end_of_scan; ** regChng = 0 + ** Rewind csr + ** if eof(csr){ + ** stat_init() with count = 0; + ** goto end_of_scan; + ** } + ** count() + ** stat_init() ** goto chng_addr_0; ** ** next_row: @@ -118973,41 +119591,36 @@ static void analyzeOneTable( sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "%s", pIdx->zName)); - /* Invoke the stat_init() function. The arguments are: + /* Implementation of the following: ** + ** regChng = 0 + ** Rewind csr + ** if eof(csr){ + ** stat_init() with count = 0; + ** goto end_of_scan; + ** } + ** count() + ** stat_init() + ** goto chng_addr_0; + */ + assert( regTemp2==regStat+4 ); + sqlite3VdbeAddOp2(v, OP_Integer, db->nAnalysisLimit, regTemp2); + + /* Arguments to stat_init(): ** (1) the number of columns in the index including the rowid ** (or for a WITHOUT ROWID table, the number of PK columns), ** (2) the number of columns in the key without the rowid/pk - ** (3) estimated number of rows in the index, - */ + ** (3) estimated number of rows in the index. */ sqlite3VdbeAddOp2(v, OP_Integer, nCol, regStat+1); assert( regRowid==regStat+2 ); sqlite3VdbeAddOp2(v, OP_Integer, pIdx->nKeyCol, regRowid); -#ifdef SQLITE_ENABLE_STAT4 - if( OptimizationEnabled(db, SQLITE_Stat4) ){ - sqlite3VdbeAddOp2(v, OP_Count, iIdxCur, regTemp); - addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); - VdbeCoverage(v); - }else -#endif - { - addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); - VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_Count, iIdxCur, regTemp, 1); - } - assert( regTemp2==regStat+4 ); - sqlite3VdbeAddOp2(v, OP_Integer, db->nAnalysisLimit, regTemp2); + sqlite3VdbeAddOp3(v, OP_Count, iIdxCur, regTemp, + OptimizationDisabled(db, SQLITE_Stat4)); sqlite3VdbeAddFunctionCall(pParse, 0, regStat+1, regStat, 4, &statInitFuncdef, 0); + addrGotoEnd = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); + VdbeCoverage(v); - /* Implementation of the following: - ** - ** Rewind csr - ** if eof(csr) goto end_of_scan; - ** regChng = 0 - ** goto next_push_0; - ** - */ sqlite3VdbeAddOp2(v, OP_Integer, 0, regChng); addrNextRow = sqlite3VdbeCurrentAddr(v); @@ -119114,6 +119727,12 @@ static void analyzeOneTable( } /* Add the entry to the stat1 table. */ + if( pIdx->pPartIdxWhere ){ + /* Partial indexes might get a zero-entry in sqlite_stat1. But + ** an empty table is omitted from sqlite_stat1. */ + sqlite3VdbeJumpHere(v, addrGotoEnd); + addrGotoEnd = 0; + } callStatGet(pParse, regStat, STAT_GET_STAT1, regStat1); assert( "BBB"[0]==SQLITE_AFF_TEXT ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0); @@ -119137,6 +119756,13 @@ static void analyzeOneTable( int addrIsNull; u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound; + /* No STAT4 data is generated if the number of rows is zero */ + if( addrGotoEnd==0 ){ + sqlite3VdbeAddOp2(v, OP_Cast, regStat1, SQLITE_AFF_INTEGER); + addrGotoEnd = sqlite3VdbeAddOp1(v, OP_IfNot, regStat1); + VdbeCoverage(v); + } + if( doOnce ){ int mxCol = nCol; Index *pX; @@ -119189,7 +119815,7 @@ static void analyzeOneTable( #endif /* SQLITE_ENABLE_STAT4 */ /* End of analysis */ - sqlite3VdbeJumpHere(v, addrRewind); + if( addrGotoEnd ) sqlite3VdbeJumpHere(v, addrGotoEnd); } @@ -120938,7 +121564,7 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ } sqlite3VdbeAddOp0(v, OP_Halt); -#if SQLITE_USER_AUTHENTICATION +#if SQLITE_USER_AUTHENTICATION && !defined(SQLITE_OMIT_SHARED_CACHE) if( pParse->nTableLock>0 && db->init.busy==0 ){ sqlite3UserAuthInit(db); if( db->auth.authLevelrc = SQLITE_ERROR; pParse->nErr++; return; } + iCsr = pParse->nTab++; regYield = ++pParse->nMem; regRec = ++pParse->nMem; regRowid = ++pParse->nMem; - assert(pParse->nTab==1); sqlite3MayAbort(pParse); - sqlite3VdbeAddOp3(v, OP_OpenWrite, 1, pParse->regRoot, iDb); + sqlite3VdbeAddOp3(v, OP_OpenWrite, iCsr, pParse->regRoot, iDb); sqlite3VdbeChangeP5(v, OPFLAG_P2ISREG); - pParse->nTab = 2; addrTop = sqlite3VdbeCurrentAddr(v) + 1; sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); if( pParse->nErr ) return; @@ -123611,11 +124237,11 @@ SQLITE_PRIVATE void sqlite3EndTable( VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_MakeRecord, dest.iSdst, dest.nSdst, regRec); sqlite3TableAffinity(v, p, 0); - sqlite3VdbeAddOp2(v, OP_NewRowid, 1, regRowid); - sqlite3VdbeAddOp3(v, OP_Insert, 1, regRec, regRowid); + sqlite3VdbeAddOp2(v, OP_NewRowid, iCsr, regRowid); + sqlite3VdbeAddOp3(v, OP_Insert, iCsr, regRec, regRowid); sqlite3VdbeGoto(v, addrInsLoop); sqlite3VdbeJumpHere(v, addrInsLoop); - sqlite3VdbeAddOp1(v, OP_Close, 1); + sqlite3VdbeAddOp1(v, OP_Close, iCsr); } /* Compute the complete text of the CREATE statement */ @@ -123672,13 +124298,10 @@ SQLITE_PRIVATE void sqlite3EndTable( /* Test for cycles in generated columns and illegal expressions ** in CHECK constraints and in DEFAULT clauses. */ if( p->tabFlags & TF_HasGenerated ){ - sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0, + sqlite3VdbeAddOp4(v, OP_SqlExec, 0x0001, 0, 0, sqlite3MPrintf(db, "SELECT*FROM\"%w\".\"%w\"", db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC); } - sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0, - sqlite3MPrintf(db, "PRAGMA \"%w\".integrity_check(%Q)", - db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC); } /* Add the table to the in-memory representation of the database. @@ -123816,8 +124439,9 @@ SQLITE_PRIVATE void sqlite3CreateView( #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) /* ** The Table structure pTable is really a VIEW. Fill in the names of -** the columns of the view in the pTable structure. Return the number -** of errors. If an error is seen leave an error message in pParse->zErrMsg. +** the columns of the view in the pTable structure. Return non-zero if +** there are errors. If an error is seen an error message is left +** in pParse->zErrMsg. */ static SQLITE_NOINLINE int viewGetColumnNames(Parse *pParse, Table *pTable){ Table *pSelTab; /* A fake table from which we get the result set */ @@ -123940,7 +124564,7 @@ static SQLITE_NOINLINE int viewGetColumnNames(Parse *pParse, Table *pTable){ sqlite3DeleteColumnNames(db, pTable); } #endif /* SQLITE_OMIT_VIEW */ - return nErr; + return nErr + pParse->nErr; } SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ assert( pTable!=0 ); @@ -130238,6 +130862,8 @@ static void groupConcatValue(sqlite3_context *context){ sqlite3_result_error_toobig(context); }else if( pAccum->accError==SQLITE_NOMEM ){ sqlite3_result_error_nomem(context); + }else if( pGCC->nAccum>0 && pAccum->nChar==0 ){ + sqlite3_result_text(context, "", 1, SQLITE_STATIC); }else{ const char *zText = sqlite3_str_value(pAccum); sqlite3_result_text(context, zText, pAccum->nChar, SQLITE_TRANSIENT); @@ -132852,6 +133478,196 @@ SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse){ # define autoIncStep(A,B,C) #endif /* SQLITE_OMIT_AUTOINCREMENT */ +/* +** If argument pVal is a Select object returned by an sqlite3MultiValues() +** that was able to use the co-routine optimization, finish coding the +** co-routine. +*/ +SQLITE_PRIVATE void sqlite3MultiValuesEnd(Parse *pParse, Select *pVal){ + if( ALWAYS(pVal) && pVal->pSrc->nSrc>0 ){ + SrcItem *pItem = &pVal->pSrc->a[0]; + sqlite3VdbeEndCoroutine(pParse->pVdbe, pItem->regReturn); + sqlite3VdbeJumpHere(pParse->pVdbe, pItem->addrFillSub - 1); + } +} + +/* +** Return true if all expressions in the expression-list passed as the +** only argument are constant. +*/ +static int exprListIsConstant(Parse *pParse, ExprList *pRow){ + int ii; + for(ii=0; iinExpr; ii++){ + if( 0==sqlite3ExprIsConstant(pParse, pRow->a[ii].pExpr) ) return 0; + } + return 1; +} + +/* +** Return true if all expressions in the expression-list passed as the +** only argument are both constant and have no affinity. +*/ +static int exprListIsNoAffinity(Parse *pParse, ExprList *pRow){ + int ii; + if( exprListIsConstant(pParse,pRow)==0 ) return 0; + for(ii=0; iinExpr; ii++){ + Expr *pExpr = pRow->a[ii].pExpr; + assert( pExpr->op!=TK_RAISE ); + assert( pExpr->affExpr==0 ); + if( 0!=sqlite3ExprAffinity(pExpr) ) return 0; + } + return 1; + +} + +/* +** This function is called by the parser for the second and subsequent +** rows of a multi-row VALUES clause. Argument pLeft is the part of +** the VALUES clause already parsed, argument pRow is the vector of values +** for the new row. The Select object returned represents the complete +** VALUES clause, including the new row. +** +** There are two ways in which this may be achieved - by incremental +** coding of a co-routine (the "co-routine" method) or by returning a +** Select object equivalent to the following (the "UNION ALL" method): +** +** "pLeft UNION ALL SELECT pRow" +** +** If the VALUES clause contains a lot of rows, this compound Select +** object may consume a lot of memory. +** +** When the co-routine method is used, each row that will be returned +** by the VALUES clause is coded into part of a co-routine as it is +** passed to this function. The returned Select object is equivalent to: +** +** SELECT * FROM ( +** Select object to read co-routine +** ) +** +** The co-routine method is used in most cases. Exceptions are: +** +** a) If the current statement has a WITH clause. This is to avoid +** statements like: +** +** WITH cte AS ( VALUES('x'), ('y') ... ) +** SELECT * FROM cte AS a, cte AS b; +** +** This will not work, as the co-routine uses a hard-coded register +** for its OP_Yield instructions, and so it is not possible for two +** cursors to iterate through it concurrently. +** +** b) The schema is currently being parsed (i.e. the VALUES clause is part +** of a schema item like a VIEW or TRIGGER). In this case there is no VM +** being generated when parsing is taking place, and so generating +** a co-routine is not possible. +** +** c) There are non-constant expressions in the VALUES clause (e.g. +** the VALUES clause is part of a correlated sub-query). +** +** d) One or more of the values in the first row of the VALUES clause +** has an affinity (i.e. is a CAST expression). This causes problems +** because the complex rules SQLite uses (see function +** sqlite3SubqueryColumnTypes() in select.c) to determine the effective +** affinity of such a column for all rows require access to all values in +** the column simultaneously. +*/ +SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList *pRow){ + + if( pParse->bHasWith /* condition (a) above */ + || pParse->db->init.busy /* condition (b) above */ + || exprListIsConstant(pParse,pRow)==0 /* condition (c) above */ + || (pLeft->pSrc->nSrc==0 && + exprListIsNoAffinity(pParse,pLeft->pEList)==0) /* condition (d) above */ + || IN_SPECIAL_PARSE + ){ + /* The co-routine method cannot be used. Fall back to UNION ALL. */ + Select *pSelect = 0; + int f = SF_Values | SF_MultiValue; + if( pLeft->pSrc->nSrc ){ + sqlite3MultiValuesEnd(pParse, pLeft); + f = SF_Values; + }else if( pLeft->pPrior ){ + /* In this case set the SF_MultiValue flag only if it was set on pLeft */ + f = (f & pLeft->selFlags); + } + pSelect = sqlite3SelectNew(pParse, pRow, 0, 0, 0, 0, 0, f, 0); + pLeft->selFlags &= ~SF_MultiValue; + if( pSelect ){ + pSelect->op = TK_ALL; + pSelect->pPrior = pLeft; + pLeft = pSelect; + } + }else{ + SrcItem *p = 0; /* SrcItem that reads from co-routine */ + + if( pLeft->pSrc->nSrc==0 ){ + /* Co-routine has not yet been started and the special Select object + ** that accesses the co-routine has not yet been created. This block + ** does both those things. */ + Vdbe *v = sqlite3GetVdbe(pParse); + Select *pRet = sqlite3SelectNew(pParse, 0, 0, 0, 0, 0, 0, 0, 0); + + /* Ensure the database schema has been read. This is to ensure we have + ** the correct text encoding. */ + if( (pParse->db->mDbFlags & DBFLAG_SchemaKnownOk)==0 ){ + sqlite3ReadSchema(pParse); + } + + if( pRet ){ + SelectDest dest; + pRet->pSrc->nSrc = 1; + pRet->pPrior = pLeft->pPrior; + pRet->op = pLeft->op; + if( pRet->pPrior ) pRet->selFlags |= SF_Values; + pLeft->pPrior = 0; + pLeft->op = TK_SELECT; + assert( pLeft->pNext==0 ); + assert( pRet->pNext==0 ); + p = &pRet->pSrc->a[0]; + p->pSelect = pLeft; + p->fg.viaCoroutine = 1; + p->addrFillSub = sqlite3VdbeCurrentAddr(v) + 1; + p->regReturn = ++pParse->nMem; + p->iCursor = -1; + p->u1.nRow = 2; + sqlite3VdbeAddOp3(v,OP_InitCoroutine,p->regReturn,0,p->addrFillSub); + sqlite3SelectDestInit(&dest, SRT_Coroutine, p->regReturn); + + /* Allocate registers for the output of the co-routine. Do so so + ** that there are two unused registers immediately before those + ** used by the co-routine. This allows the code in sqlite3Insert() + ** to use these registers directly, instead of copying the output + ** of the co-routine to a separate array for processing. */ + dest.iSdst = pParse->nMem + 3; + dest.nSdst = pLeft->pEList->nExpr; + pParse->nMem += 2 + dest.nSdst; + + pLeft->selFlags |= SF_MultiValue; + sqlite3Select(pParse, pLeft, &dest); + p->regResult = dest.iSdst; + assert( pParse->nErr || dest.iSdst>0 ); + pLeft = pRet; + } + }else{ + p = &pLeft->pSrc->a[0]; + assert( !p->fg.isTabFunc && !p->fg.isIndexedBy ); + p->u1.nRow++; + } + + if( pParse->nErr==0 ){ + assert( p!=0 ); + if( p->pSelect->pEList->nExpr!=pRow->nExpr ){ + sqlite3SelectWrongNumTermsError(pParse, p->pSelect); + }else{ + sqlite3ExprCodeExprList(pParse, pRow, p->regResult, 0, 0); + sqlite3VdbeAddOp1(pParse->pVdbe, OP_Yield, p->regReturn); + } + } + sqlite3ExprListDelete(pParse->db, pRow); + } + + return pLeft; +} /* Forward declaration */ static int xferOptimization( @@ -133188,25 +134004,40 @@ SQLITE_PRIVATE void sqlite3Insert( if( pSelect ){ /* Data is coming from a SELECT or from a multi-row VALUES clause. ** Generate a co-routine to run the SELECT. */ - int regYield; /* Register holding co-routine entry-point */ - int addrTop; /* Top of the co-routine */ int rc; /* Result code */ - regYield = ++pParse->nMem; - addrTop = sqlite3VdbeCurrentAddr(v) + 1; - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); - sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield); - dest.iSdst = bIdListInOrder ? regData : 0; - dest.nSdst = pTab->nCol; - rc = sqlite3Select(pParse, pSelect, &dest); - regFromSelect = dest.iSdst; - assert( db->pParse==pParse ); - if( rc || pParse->nErr ) goto insert_cleanup; - assert( db->mallocFailed==0 ); - sqlite3VdbeEndCoroutine(v, regYield); - sqlite3VdbeJumpHere(v, addrTop - 1); /* label B: */ - assert( pSelect->pEList ); - nColumn = pSelect->pEList->nExpr; + if( pSelect->pSrc->nSrc==1 + && pSelect->pSrc->a[0].fg.viaCoroutine + && pSelect->pPrior==0 + ){ + SrcItem *pItem = &pSelect->pSrc->a[0]; + dest.iSDParm = pItem->regReturn; + regFromSelect = pItem->regResult; + nColumn = pItem->pSelect->pEList->nExpr; + ExplainQueryPlan((pParse, 0, "SCAN %S", pItem)); + if( bIdListInOrder && nColumn==pTab->nCol ){ + regData = regFromSelect; + regRowid = regData - 1; + regIns = regRowid - (IsVirtual(pTab) ? 1 : 0); + } + }else{ + int addrTop; /* Top of the co-routine */ + int regYield = ++pParse->nMem; + addrTop = sqlite3VdbeCurrentAddr(v) + 1; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); + sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield); + dest.iSdst = bIdListInOrder ? regData : 0; + dest.nSdst = pTab->nCol; + rc = sqlite3Select(pParse, pSelect, &dest); + regFromSelect = dest.iSdst; + assert( db->pParse==pParse ); + if( rc || pParse->nErr ) goto insert_cleanup; + assert( db->mallocFailed==0 ); + sqlite3VdbeEndCoroutine(v, regYield); + sqlite3VdbeJumpHere(v, addrTop - 1); /* label B: */ + assert( pSelect->pEList ); + nColumn = pSelect->pEList->nExpr; + } /* Set useTempTable to TRUE if the result of the SELECT statement ** should be written into a temporary table (template 4). Set to @@ -137931,6 +138762,34 @@ static const PragmaName aPragmaName[] = { /************** End of pragma.h **********************************************/ /************** Continuing where we left off in pragma.c *********************/ +/* +** When the 0x10 bit of PRAGMA optimize is set, any ANALYZE commands +** will be run with an analysis_limit set to the lessor of the value of +** the following macro or to the actual analysis_limit if it is non-zero, +** in order to prevent PRAGMA optimize from running for too long. +** +** The value of 2000 is chosen emperically so that the worst-case run-time +** for PRAGMA optimize does not exceed 100 milliseconds against a variety +** of test databases on a RaspberryPI-4 compiled using -Os and without +** -DSQLITE_DEBUG. Of course, your mileage may vary. For the purpose of +** this paragraph, "worst-case" means that ANALYZE ends up being +** run on every table in the database. The worst case typically only +** happens if PRAGMA optimize is run on a database file for which ANALYZE +** has not been previously run and the 0x10000 flag is included so that +** all tables are analyzed. The usual case for PRAGMA optimize is that +** no ANALYZE commands will be run at all, or if any ANALYZE happens it +** will be against a single table, so that expected timing for PRAGMA +** optimize on a PI-4 is more like 1 millisecond or less with the 0x10000 +** flag or less than 100 microseconds without the 0x10000 flag. +** +** An analysis limit of 2000 is almost always sufficient for the query +** planner to fully characterize an index. The additional accuracy from +** a larger analysis is not usually helpful. +*/ +#ifndef SQLITE_DEFAULT_OPTIMIZE_LIMIT +# define SQLITE_DEFAULT_OPTIMIZE_LIMIT 2000 +#endif + /* ** Interpret the given string as a safety level. Return 0 for OFF, ** 1 for ON or NORMAL, 2 for FULL, and 3 for EXTRA. Return 1 for an empty or @@ -139576,7 +140435,7 @@ SQLITE_PRIVATE void sqlite3Pragma( /* Set the maximum error count */ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; if( zRight ){ - if( sqlite3GetInt32(zRight, &mxErr) ){ + if( sqlite3GetInt32(pValue->z, &mxErr) ){ if( mxErr<=0 ){ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; } @@ -139593,7 +140452,6 @@ SQLITE_PRIVATE void sqlite3Pragma( Hash *pTbls; /* Set of all tables in the schema */ int *aRoot; /* Array of root page numbers of all btrees */ int cnt = 0; /* Number of entries in aRoot[] */ - int mxIdx = 0; /* Maximum number of indexes for any table */ if( OMIT_TEMPDB && i==1 ) continue; if( iDb>=0 && i!=iDb ) continue; @@ -139615,7 +140473,6 @@ SQLITE_PRIVATE void sqlite3Pragma( if( pObjTab && pObjTab!=pTab ) continue; if( HasRowid(pTab) ) cnt++; for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ cnt++; } - if( nIdx>mxIdx ) mxIdx = nIdx; } if( cnt==0 ) continue; if( pObjTab ) cnt++; @@ -139635,11 +140492,11 @@ SQLITE_PRIVATE void sqlite3Pragma( aRoot[0] = cnt; /* Make sure sufficient number of registers have been allocated */ - sqlite3TouchRegister(pParse, 8+mxIdx); + sqlite3TouchRegister(pParse, 8+cnt); sqlite3ClearTempRegCache(pParse); /* Do the b-tree integrity checks */ - sqlite3VdbeAddOp4(v, OP_IntegrityCk, 2, cnt, 1, (char*)aRoot,P4_INTARRAY); + sqlite3VdbeAddOp4(v, OP_IntegrityCk, 1, cnt, 8, (char*)aRoot,P4_INTARRAY); sqlite3VdbeChangeP5(v, (u8)i); addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, @@ -139649,6 +140506,36 @@ SQLITE_PRIVATE void sqlite3Pragma( integrityCheckResultRow(v); sqlite3VdbeJumpHere(v, addr); + /* Check that the indexes all have the right number of rows */ + cnt = pObjTab ? 1 : 0; + sqlite3VdbeLoadString(v, 2, "wrong # of entries in index "); + for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ + int iTab = 0; + Table *pTab = sqliteHashData(x); + Index *pIdx; + if( pObjTab && pObjTab!=pTab ) continue; + if( HasRowid(pTab) ){ + iTab = cnt++; + }else{ + iTab = cnt; + for(pIdx=pTab->pIndex; ALWAYS(pIdx); pIdx=pIdx->pNext){ + if( IsPrimaryKeyIndex(pIdx) ) break; + iTab++; + } + } + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( pIdx->pPartIdxWhere==0 ){ + addr = sqlite3VdbeAddOp3(v, OP_Eq, 8+cnt, 0, 8+iTab); + VdbeCoverageNeverNull(v); + sqlite3VdbeLoadString(v, 4, pIdx->zName); + sqlite3VdbeAddOp3(v, OP_Concat, 4, 2, 3); + integrityCheckResultRow(v); + sqlite3VdbeJumpHere(v, addr); + } + cnt++; + } + } + /* Make sure all the indices are constructed correctly. */ for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ @@ -139972,21 +140859,9 @@ SQLITE_PRIVATE void sqlite3Pragma( } sqlite3VdbeAddOp2(v, OP_Next, iDataCur, loopTop); VdbeCoverage(v); sqlite3VdbeJumpHere(v, loopTop-1); - if( !isQuick ){ - sqlite3VdbeLoadString(v, 2, "wrong # of entries in index "); - for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ - if( pPk==pIdx ) continue; - sqlite3VdbeAddOp2(v, OP_Count, iIdxCur+j, 3); - addr = sqlite3VdbeAddOp3(v, OP_Eq, 8+j, 0, 3); VdbeCoverage(v); - sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); - sqlite3VdbeLoadString(v, 4, pIdx->zName); - sqlite3VdbeAddOp3(v, OP_Concat, 4, 2, 3); - integrityCheckResultRow(v); - sqlite3VdbeJumpHere(v, addr); - } - if( pPk ){ - sqlite3ReleaseTempRange(pParse, r2, pPk->nKeyCol); - } + if( pPk ){ + assert( !isQuick ); + sqlite3ReleaseTempRange(pParse, r2, pPk->nKeyCol); } } @@ -140284,44 +141159,63 @@ SQLITE_PRIVATE void sqlite3Pragma( ** ** The optional argument is a bitmask of optimizations to perform: ** - ** 0x0001 Debugging mode. Do not actually perform any optimizations - ** but instead return one line of text for each optimization - ** that would have been done. Off by default. + ** 0x00001 Debugging mode. Do not actually perform any optimizations + ** but instead return one line of text for each optimization + ** that would have been done. Off by default. ** - ** 0x0002 Run ANALYZE on tables that might benefit. On by default. - ** See below for additional information. + ** 0x00002 Run ANALYZE on tables that might benefit. On by default. + ** See below for additional information. ** - ** 0x0004 (Not yet implemented) Record usage and performance - ** information from the current session in the - ** database file so that it will be available to "optimize" - ** pragmas run by future database connections. + ** 0x00010 Run all ANALYZE operations using an analysis_limit that + ** is the lessor of the current analysis_limit and the + ** SQLITE_DEFAULT_OPTIMIZE_LIMIT compile-time option. + ** The default value of SQLITE_DEFAULT_OPTIMIZE_LIMIT is + ** currently (2024-02-19) set to 2000, which is such that + ** the worst case run-time for PRAGMA optimize on a 100MB + ** database will usually be less than 100 milliseconds on + ** a RaspberryPI-4 class machine. On by default. ** - ** 0x0008 (Not yet implemented) Create indexes that might have - ** been helpful to recent queries + ** 0x10000 Look at tables to see if they need to be reanalyzed + ** due to growth or shrinkage even if they have not been + ** queried during the current connection. Off by default. ** - ** The default MASK is and always shall be 0xfffe. 0xfffe means perform all - ** of the optimizations listed above except Debug Mode, including new - ** optimizations that have not yet been invented. If new optimizations are - ** ever added that should be off by default, those off-by-default - ** optimizations will have bitmasks of 0x10000 or larger. + ** The default MASK is and always shall be 0x0fffe. In the current + ** implementation, the default mask only covers the 0x00002 optimization, + ** though additional optimizations that are covered by 0x0fffe might be + ** added in the future. Optimizations that are off by default and must + ** be explicitly requested have masks of 0x10000 or greater. ** ** DETERMINATION OF WHEN TO RUN ANALYZE ** ** In the current implementation, a table is analyzed if only if all of ** the following are true: ** - ** (1) MASK bit 0x02 is set. + ** (1) MASK bit 0x00002 is set. + ** + ** (2) The table is an ordinary table, not a virtual table or view. + ** + ** (3) The table name does not begin with "sqlite_". ** - ** (2) The query planner used sqlite_stat1-style statistics for one or - ** more indexes of the table at some point during the lifetime of - ** the current connection. + ** (4) One or more of the following is true: + ** (4a) The 0x10000 MASK bit is set. + ** (4b) One or more indexes on the table lacks an entry + ** in the sqlite_stat1 table. + ** (4c) The query planner used sqlite_stat1-style statistics for one + ** or more indexes of the table at some point during the lifetime + ** of the current connection. ** - ** (3) One or more indexes of the table are currently unanalyzed OR - ** the number of rows in the table has increased by 25 times or more - ** since the last time ANALYZE was run. + ** (5) One or more of the following is true: + ** (5a) One or more indexes on the table lacks an entry + ** in the sqlite_stat1 table. (Same as 4a) + ** (5b) The number of rows in the table has increased or decreased by + ** 10-fold. In other words, the current size of the table is + ** 10 times larger than the size in sqlite_stat1 or else the + ** current size is less than 1/10th the size in sqlite_stat1. ** ** The rules for when tables are analyzed are likely to change in - ** future releases. + ** future releases. Future versions of SQLite might accept a string + ** literal argument to this pragma that contains a mnemonic description + ** of the options rather than a bitmap. */ case PragTyp_OPTIMIZE: { int iDbLast; /* Loop termination point for the schema loop */ @@ -140333,6 +141227,10 @@ SQLITE_PRIVATE void sqlite3Pragma( LogEst szThreshold; /* Size threshold above which reanalysis needed */ char *zSubSql; /* SQL statement for the OP_SqlExec opcode */ u32 opMask; /* Mask of operations to perform */ + int nLimit; /* Analysis limit to use */ + int nCheck = 0; /* Number of tables to be optimized */ + int nBtree = 0; /* Number of btrees to scan */ + int nIndex; /* Number of indexes on the current table */ if( zRight ){ opMask = (u32)sqlite3Atoi(zRight); @@ -140340,6 +141238,14 @@ SQLITE_PRIVATE void sqlite3Pragma( }else{ opMask = 0xfffe; } + if( (opMask & 0x10)==0 ){ + nLimit = 0; + }else if( db->nAnalysisLimit>0 + && db->nAnalysisLimitnTab++; for(iDbLast = zDb?iDb:db->nDb-1; iDb<=iDbLast; iDb++){ if( iDb==1 ) continue; @@ -140348,23 +141254,61 @@ SQLITE_PRIVATE void sqlite3Pragma( for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){ pTab = (Table*)sqliteHashData(k); - /* If table pTab has not been used in a way that would benefit from - ** having analysis statistics during the current session, then skip it. - ** This also has the effect of skipping virtual tables and views */ - if( (pTab->tabFlags & TF_StatsUsed)==0 ) continue; + /* This only works for ordinary tables */ + if( !IsOrdinaryTable(pTab) ) continue; - /* Reanalyze if the table is 25 times larger than the last analysis */ - szThreshold = pTab->nRowLogEst + 46; assert( sqlite3LogEst(25)==46 ); + /* Do not scan system tables */ + if( 0==sqlite3StrNICmp(pTab->zName, "sqlite_", 7) ) continue; + + /* Find the size of the table as last recorded in sqlite_stat1. + ** If any index is unanalyzed, then the threshold is -1 to + ** indicate a new, unanalyzed index + */ + szThreshold = pTab->nRowLogEst; + nIndex = 0; for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + nIndex++; if( !pIdx->hasStat1 ){ - szThreshold = 0; /* Always analyze if any index lacks statistics */ - break; + szThreshold = -1; /* Always analyze if any index lacks statistics */ } } - if( szThreshold ){ - sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead); - sqlite3VdbeAddOp3(v, OP_IfSmaller, iTabCur, - sqlite3VdbeCurrentAddr(v)+2+(opMask&1), szThreshold); + + /* If table pTab has not been used in a way that would benefit from + ** having analysis statistics during the current session, then skip it, + ** unless the 0x10000 MASK bit is set. */ + if( (pTab->tabFlags & TF_MaybeReanalyze)!=0 ){ + /* Check for size change if stat1 has been used for a query */ + }else if( opMask & 0x10000 ){ + /* Check for size change if 0x10000 is set */ + }else if( pTab->pIndex!=0 && szThreshold<0 ){ + /* Do analysis if unanalyzed indexes exists */ + }else{ + /* Otherwise, we can skip this table */ + continue; + } + + nCheck++; + if( nCheck==2 ){ + /* If ANALYZE might be invoked two or more times, hold a write + ** transaction for efficiency */ + sqlite3BeginWriteOperation(pParse, 0, iDb); + } + nBtree += nIndex+1; + + /* Reanalyze if the table is 10 times larger or smaller than + ** the last analysis. Unconditional reanalysis if there are + ** unanalyzed indexes. */ + sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead); + if( szThreshold>=0 ){ + const LogEst iRange = 33; /* 10x size change */ + sqlite3VdbeAddOp4Int(v, OP_IfSizeBetween, iTabCur, + sqlite3VdbeCurrentAddr(v)+2+(opMask&1), + szThreshold>=iRange ? szThreshold-iRange : -1, + szThreshold+iRange); + VdbeCoverage(v); + }else{ + sqlite3VdbeAddOp2(v, OP_Rewind, iTabCur, + sqlite3VdbeCurrentAddr(v)+2+(opMask&1)); VdbeCoverage(v); } zSubSql = sqlite3MPrintf(db, "ANALYZE \"%w\".\"%w\"", @@ -140374,11 +141318,27 @@ SQLITE_PRIVATE void sqlite3Pragma( sqlite3VdbeAddOp4(v, OP_String8, 0, r1, 0, zSubSql, P4_DYNAMIC); sqlite3VdbeAddOp2(v, OP_ResultRow, r1, 1); }else{ - sqlite3VdbeAddOp4(v, OP_SqlExec, 0, 0, 0, zSubSql, P4_DYNAMIC); + sqlite3VdbeAddOp4(v, OP_SqlExec, nLimit ? 0x02 : 00, nLimit, 0, + zSubSql, P4_DYNAMIC); } } } sqlite3VdbeAddOp0(v, OP_Expire); + + /* In a schema with a large number of tables and indexes, scale back + ** the analysis_limit to avoid excess run-time in the worst case. + */ + if( !db->mallocFailed && nLimit>0 && nBtree>100 ){ + int iAddr, iEnd; + VdbeOp *aOp; + nLimit = 100*nLimit/nBtree; + if( nLimit<100 ) nLimit = 100; + aOp = sqlite3VdbeGetOp(v, 0); + iEnd = sqlite3VdbeCurrentAddr(v); + for(iAddr=0; iAddrnConstraint; i++, pConstraint++){ - if( pConstraint->usable==0 ) continue; - if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue; if( pConstraint->iColumn < pTab->iHidden ) continue; + if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue; + if( pConstraint->usable==0 ) return SQLITE_CONSTRAINT; j = pConstraint->iColumn - pTab->iHidden; assert( j < 2 ); seen[j] = i+1; @@ -140657,16 +141617,13 @@ static int pragmaVtabBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ j = seen[0]-1; pIdxInfo->aConstraintUsage[j].argvIndex = 1; pIdxInfo->aConstraintUsage[j].omit = 1; - if( seen[1]==0 ){ - pIdxInfo->estimatedCost = (double)1000; - pIdxInfo->estimatedRows = 1000; - return SQLITE_OK; - } pIdxInfo->estimatedCost = (double)20; pIdxInfo->estimatedRows = 20; - j = seen[1]-1; - pIdxInfo->aConstraintUsage[j].argvIndex = 2; - pIdxInfo->aConstraintUsage[j].omit = 1; + if( seen[1] ){ + j = seen[1]-1; + pIdxInfo->aConstraintUsage[j].argvIndex = 2; + pIdxInfo->aConstraintUsage[j].omit = 1; + } return SQLITE_OK; } @@ -140686,6 +141643,7 @@ static void pragmaVtabCursorClear(PragmaVtabCursor *pCsr){ int i; sqlite3_finalize(pCsr->pPragma); pCsr->pPragma = 0; + pCsr->iRowid = 0; for(i=0; iazArg); i++){ sqlite3_free(pCsr->azArg[i]); pCsr->azArg[i] = 0; @@ -141486,7 +142444,13 @@ SQLITE_PRIVATE void *sqlite3ParserAddCleanup( void (*xCleanup)(sqlite3*,void*), /* The cleanup routine */ void *pPtr /* Pointer to object to be cleaned up */ ){ - ParseCleanup *pCleanup = sqlite3DbMallocRaw(pParse->db, sizeof(*pCleanup)); + ParseCleanup *pCleanup; + if( sqlite3FaultSim(300) ){ + pCleanup = 0; + sqlite3OomFault(pParse->db); + }else{ + pCleanup = sqlite3DbMallocRaw(pParse->db, sizeof(*pCleanup)); + } if( pCleanup ){ pCleanup->pNext = pParse->pCleanup; pParse->pCleanup = pCleanup; @@ -143608,9 +144572,16 @@ static void generateSortTail( int addrExplain; /* Address of OP_Explain instruction */ #endif - ExplainQueryPlan2(addrExplain, (pParse, 0, - "USE TEMP B-TREE FOR %sORDER BY", pSort->nOBSat>0?"RIGHT PART OF ":"") - ); + nKey = pOrderBy->nExpr - pSort->nOBSat; + if( pSort->nOBSat==0 || nKey==1 ){ + ExplainQueryPlan2(addrExplain, (pParse, 0, + "USE TEMP B-TREE FOR %sORDER BY", pSort->nOBSat?"LAST TERM OF ":"" + )); + }else{ + ExplainQueryPlan2(addrExplain, (pParse, 0, + "USE TEMP B-TREE FOR LAST %d TERMS OF ORDER BY", nKey + )); + } sqlite3VdbeScanStatusRange(v, addrExplain,pSort->addrPush,pSort->addrPushEnd); sqlite3VdbeScanStatusCounters(v, addrExplain, addrExplain, pSort->addrPush); @@ -143648,7 +144619,6 @@ static void generateSortTail( regRow = sqlite3GetTempRange(pParse, nColumn); } } - nKey = pOrderBy->nExpr - pSort->nOBSat; if( pSort->sortFlags & SORTFLAG_UseSorter ){ int regSortOut = ++pParse->nMem; iSortTab = pParse->nTab++; @@ -144253,8 +145223,7 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( NameContext sNC; assert( pSelect!=0 ); - testcase( (pSelect->selFlags & SF_Resolved)==0 ); - assert( (pSelect->selFlags & SF_Resolved)!=0 || IN_RENAME_OBJECT ); + assert( (pSelect->selFlags & SF_Resolved)!=0 ); assert( pTab->nCol==pSelect->pEList->nExpr || pParse->nErr>0 ); assert( aff==SQLITE_AFF_NONE || aff==SQLITE_AFF_BLOB ); if( db->mallocFailed || IN_RENAME_OBJECT ) return; @@ -144265,17 +145234,22 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ const char *zType; i64 n; + int m = 0; + Select *pS2 = pSelect; pTab->tabFlags |= (pCol->colFlags & COLFLAG_NOINSERT); p = a[i].pExpr; /* pCol->szEst = ... // Column size est for SELECT tables never used */ pCol->affinity = sqlite3ExprAffinity(p); + while( pCol->affinity<=SQLITE_AFF_NONE && pS2->pNext!=0 ){ + m |= sqlite3ExprDataType(pS2->pEList->a[i].pExpr); + pS2 = pS2->pNext; + pCol->affinity = sqlite3ExprAffinity(pS2->pEList->a[i].pExpr); + } if( pCol->affinity<=SQLITE_AFF_NONE ){ pCol->affinity = aff; } - if( pCol->affinity>=SQLITE_AFF_TEXT && pSelect->pNext ){ - int m = 0; - Select *pS2; - for(m=0, pS2=pSelect->pNext; pS2; pS2=pS2->pNext){ + if( pCol->affinity>=SQLITE_AFF_TEXT && (pS2->pNext || pS2!=pSelect) ){ + for(pS2=pS2->pNext; pS2; pS2=pS2->pNext){ m |= sqlite3ExprDataType(pS2->pEList->a[i].pExpr); } if( pCol->affinity==SQLITE_AFF_TEXT && (m&0x01)!=0 ){ @@ -144305,12 +145279,12 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( } } if( zType ){ - i64 m = sqlite3Strlen30(zType); + const i64 k = sqlite3Strlen30(zType); n = sqlite3Strlen30(pCol->zCnName); - pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+m+2); + pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+k+2); pCol->colFlags &= ~(COLFLAG_HASTYPE|COLFLAG_HASCOLL); if( pCol->zCnName ){ - memcpy(&pCol->zCnName[n+1], zType, m+1); + memcpy(&pCol->zCnName[n+1], zType, k+1); pCol->colFlags |= COLFLAG_HASTYPE; } } @@ -146707,7 +147681,7 @@ static void constInsert( ){ int i; assert( pColumn->op==TK_COLUMN ); - assert( sqlite3ExprIsConstant(pValue) ); + assert( sqlite3ExprIsConstant(pConst->pParse, pValue) ); if( ExprHasProperty(pColumn, EP_FixedCol) ) return; if( sqlite3ExprAffinity(pValue)!=0 ) return; @@ -146765,10 +147739,10 @@ static void findConstInWhere(WhereConst *pConst, Expr *pExpr){ pLeft = pExpr->pLeft; assert( pRight!=0 ); assert( pLeft!=0 ); - if( pRight->op==TK_COLUMN && sqlite3ExprIsConstant(pLeft) ){ + if( pRight->op==TK_COLUMN && sqlite3ExprIsConstant(pConst->pParse, pLeft) ){ constInsert(pConst,pRight,pLeft,pExpr); } - if( pLeft->op==TK_COLUMN && sqlite3ExprIsConstant(pRight) ){ + if( pLeft->op==TK_COLUMN && sqlite3ExprIsConstant(pConst->pParse, pRight) ){ constInsert(pConst,pLeft,pRight,pExpr); } } @@ -146989,6 +147963,18 @@ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ ** The hope is that the terms added to the inner query will make it more ** efficient. ** +** NAME AMBIGUITY +** +** This optimization is called the "WHERE-clause push-down optimization". +** +** Do not confuse this optimization with another unrelated optimization +** with a similar name: The "MySQL push-down optimization" causes WHERE +** clause terms that can be evaluated using only the index and without +** reference to the table are run first, so that if they are false, +** unnecessary table seeks are avoided. +** +** RULES +** ** Do not attempt this optimization if: ** ** (1) (** This restriction was removed on 2017-09-29. We used to @@ -147054,10 +148040,10 @@ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ ** (9c) There is a RIGHT JOIN (or FULL JOIN) in between the ON/USING ** clause and the subquery. ** -** Without this restriction, the push-down optimization might move -** the ON/USING filter expression from the left side of a RIGHT JOIN -** over to the right side, which leads to incorrect answers. See -** also restriction (6) in sqlite3ExprIsSingleTableConstraint(). +** Without this restriction, the WHERE-clause push-down optimization +** might move the ON/USING filter expression from the left side of a +** RIGHT JOIN over to the right side, which leads to incorrect answers. +** See also restriction (6) in sqlite3ExprIsSingleTableConstraint(). ** ** (10) The inner query is not the right-hand table of a RIGHT JOIN. ** @@ -147189,7 +148175,7 @@ static int pushDownWhereTerms( } #endif - if( sqlite3ExprIsSingleTableConstraint(pWhere, pSrcList, iSrc) ){ + if( sqlite3ExprIsSingleTableConstraint(pWhere, pSrcList, iSrc, 1) ){ nChng++; pSubq->selFlags |= SF_PushDown; while( pSubq ){ @@ -148324,8 +149310,7 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ if( p->selFlags & SF_HasTypeInfo ) return; p->selFlags |= SF_HasTypeInfo; pParse = pWalker->pParse; - testcase( (p->selFlags & SF_Resolved)==0 ); - assert( (p->selFlags & SF_Resolved) || IN_RENAME_OBJECT ); + assert( (p->selFlags & SF_Resolved) ); pTabList = p->pSrc; for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ Table *pTab = pFrom->pTab; @@ -148395,6 +149380,8 @@ SQLITE_PRIVATE void sqlite3SelectPrep( */ static void printAggInfo(AggInfo *pAggInfo){ int ii; + sqlite3DebugPrintf("AggInfo %d/%p:\n", + pAggInfo->selId, pAggInfo); for(ii=0; iinColumn; ii++){ struct AggInfo_col *pCol = &pAggInfo->aCol[ii]; sqlite3DebugPrintf( @@ -149585,7 +150572,7 @@ SQLITE_PRIVATE int sqlite3Select( /* Generate code for all sub-queries in the FROM clause */ pSub = pItem->pSelect; - if( pSub==0 ) continue; + if( pSub==0 || pItem->addrFillSub!=0 ) continue; /* The code for a subquery should only be generated once. */ assert( pItem->addrFillSub==0 ); @@ -149616,7 +150603,7 @@ SQLITE_PRIVATE int sqlite3Select( #endif assert( pItem->pSelect && (pItem->pSelect->selFlags & SF_PushDown)!=0 ); }else{ - TREETRACE(0x4000,pParse,p,("Push-down not possible\n")); + TREETRACE(0x4000,pParse,p,("WHERE-lcause push-down not possible\n")); } /* Convert unused result columns of the subquery into simple NULL @@ -150497,6 +151484,12 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3ExprListDelete(db, pMinMaxOrderBy); #ifdef SQLITE_DEBUG if( pAggInfo && !db->mallocFailed ){ +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x20 ){ + TREETRACE(0x20,pParse,p,("Finished with AggInfo\n")); + printAggInfo(pAggInfo); + } +#endif for(i=0; inColumn; i++){ Expr *pExpr = pAggInfo->aCol[i].pCExpr; if( pExpr==0 ) continue; @@ -151678,6 +152671,72 @@ static ExprList *sqlite3ExpandReturning( return pNew; } +/* If the Expr node is a subquery or an EXISTS operator or an IN operator that +** uses a subquery, and if the subquery is SF_Correlated, then mark the +** expression as EP_VarSelect. +*/ +static int sqlite3ReturningSubqueryVarSelect(Walker *NotUsed, Expr *pExpr){ + UNUSED_PARAMETER(NotUsed); + if( ExprUseXSelect(pExpr) + && (pExpr->x.pSelect->selFlags & SF_Correlated)!=0 + ){ + testcase( ExprHasProperty(pExpr, EP_VarSelect) ); + ExprSetProperty(pExpr, EP_VarSelect); + } + return WRC_Continue; +} + + +/* +** If the SELECT references the table pWalker->u.pTab, then do two things: +** +** (1) Mark the SELECT as as SF_Correlated. +** (2) Set pWalker->eCode to non-zero so that the caller will know +** that (1) has happened. +*/ +static int sqlite3ReturningSubqueryCorrelated(Walker *pWalker, Select *pSelect){ + int i; + SrcList *pSrc; + assert( pSelect!=0 ); + pSrc = pSelect->pSrc; + assert( pSrc!=0 ); + for(i=0; inSrc; i++){ + if( pSrc->a[i].pTab==pWalker->u.pTab ){ + testcase( pSelect->selFlags & SF_Correlated ); + pSelect->selFlags |= SF_Correlated; + pWalker->eCode = 1; + break; + } + } + return WRC_Continue; +} + +/* +** Scan the expression list that is the argument to RETURNING looking +** for subqueries that depend on the table which is being modified in the +** statement that is hosting the RETURNING clause (pTab). Mark all such +** subqueries as SF_Correlated. If the subqueries are part of an +** expression, mark the expression as EP_VarSelect. +** +** https://sqlite.org/forum/forumpost/2c83569ce8945d39 +*/ +static void sqlite3ProcessReturningSubqueries( + ExprList *pEList, + Table *pTab +){ + Walker w; + memset(&w, 0, sizeof(w)); + w.xExprCallback = sqlite3ExprWalkNoop; + w.xSelectCallback = sqlite3ReturningSubqueryCorrelated; + w.u.pTab = pTab; + sqlite3WalkExprList(&w, pEList); + if( w.eCode ){ + w.xExprCallback = sqlite3ReturningSubqueryVarSelect; + w.xSelectCallback = sqlite3SelectWalkNoop; + sqlite3WalkExprList(&w, pEList); + } +} + /* ** Generate code for the RETURNING trigger. Unlike other triggers ** that invoke a subprogram in the bytecode, the code for RETURNING @@ -151714,6 +152773,7 @@ static void codeReturningTrigger( sSelect.pSrc = &sFrom; sFrom.nSrc = 1; sFrom.a[0].pTab = pTab; + sFrom.a[0].zName = pTab->zName; /* tag-20240424-1 */ sFrom.a[0].iCursor = -1; sqlite3SelectPrep(pParse, &sSelect, 0); if( pParse->nErr==0 ){ @@ -151740,6 +152800,7 @@ static void codeReturningTrigger( int i; int nCol = pNew->nExpr; int reg = pParse->nMem+1; + sqlite3ProcessReturningSubqueries(pNew, pTab); pParse->nMem += nCol+2; pReturning->iRetReg = reg; for(i=0; ipVtabCtx = &sCtx; pTab->nTabRef++; rc = xConstruct(db, pMod->pAux, nArg, azArg, &pVTable->pVtab, &zErr); + assert( pTab!=0 ); + assert( pTab->nTabRef>1 || rc!=SQLITE_OK ); sqlite3DeleteTable(db, pTab); db->pVtabCtx = sCtx.pPrior; if( rc==SQLITE_NOMEM ) sqlite3OomFault(db); @@ -154972,7 +156035,7 @@ static int vtabCallConstructor( pVTable->nRef = 1; if( sCtx.bDeclared==0 ){ const char *zFormat = "vtable constructor did not declare schema: %s"; - *pzErr = sqlite3MPrintf(db, zFormat, pTab->zName); + *pzErr = sqlite3MPrintf(db, zFormat, zModuleName); sqlite3VtabUnlock(pVTable); rc = SQLITE_ERROR; }else{ @@ -155150,12 +156213,30 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ Table *pTab; Parse sParse; int initBusy; + int i; + const unsigned char *z; + static const u8 aKeyword[] = { TK_CREATE, TK_TABLE, 0 }; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zCreateTable==0 ){ return SQLITE_MISUSE_BKPT; } #endif + + /* Verify that the first two keywords in the CREATE TABLE statement + ** really are "CREATE" and "TABLE". If this is not the case, then + ** sqlite3_declare_vtab() is being misused. + */ + z = (const unsigned char*)zCreateTable; + for(i=0; aKeyword[i]; i++){ + int tokenType = 0; + do{ z += sqlite3GetToken(z, &tokenType); }while( tokenType==TK_SPACE ); + if( tokenType!=aKeyword[i] ){ + sqlite3ErrorWithMsg(db, SQLITE_ERROR, "syntax error"); + return SQLITE_ERROR; + } + } + sqlite3_mutex_enter(db->mutex); pCtx = db->pVtabCtx; if( !pCtx || pCtx->bDeclared ){ @@ -155163,6 +156244,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ sqlite3_mutex_leave(db->mutex); return SQLITE_MISUSE_BKPT; } + pTab = pCtx->pTab; assert( IsVirtual(pTab) ); @@ -155176,11 +156258,10 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ initBusy = db->init.busy; db->init.busy = 0; sParse.nQueryLoop = 1; - if( SQLITE_OK==sqlite3RunParser(&sParse, zCreateTable) - && ALWAYS(sParse.pNewTable!=0) - && ALWAYS(!db->mallocFailed) - && IsOrdinaryTable(sParse.pNewTable) - ){ + if( SQLITE_OK==sqlite3RunParser(&sParse, zCreateTable) ){ + assert( sParse.pNewTable!=0 ); + assert( !db->mallocFailed ); + assert( IsOrdinaryTable(sParse.pNewTable) ); assert( sParse.zErrMsg==0 ); if( !pTab->aCol ){ Table *pNew = sParse.pNewTable; @@ -157675,6 +158756,27 @@ static SQLITE_NOINLINE void filterPullDown( } } +/* +** Loop pLoop is a WHERE_INDEXED level that uses at least one IN(...) +** operator. Return true if level pLoop is guaranteed to visit only one +** row for each key generated for the index. +*/ +static int whereLoopIsOneRow(WhereLoop *pLoop){ + if( pLoop->u.btree.pIndex->onError + && pLoop->nSkip==0 + && pLoop->u.btree.nEq==pLoop->u.btree.pIndex->nKeyCol + ){ + int ii; + for(ii=0; iiu.btree.nEq; ii++){ + if( pLoop->aLTerm[ii]->eOperator & (WO_IS|WO_ISNULL) ){ + return 0; + } + } + return 1; + } + return 0; +} + /* ** Generate code for the start of the iLevel-th loop in the WHERE clause ** implementation described by pWInfo. @@ -157753,7 +158855,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( pLevel->iFrom>0 && (pTabItem[0].fg.jointype & JT_LEFT)!=0 ){ pLevel->iLeftJoin = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Integer, 0, pLevel->iLeftJoin); - VdbeComment((v, "init LEFT JOIN no-match flag")); + VdbeComment((v, "init LEFT JOIN match flag")); } /* Compute a safe address to jump to if we discover that the table for @@ -158422,7 +159524,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } /* Record the instruction used to terminate the loop. */ - if( pLoop->wsFlags & WHERE_ONEROW ){ + if( (pLoop->wsFlags & WHERE_ONEROW) + || (pLevel->u.in.nIn && regBignull==0 && whereLoopIsOneRow(pLoop)) + ){ pLevel->op = OP_Noop; }else if( bRev ){ pLevel->op = OP_Prev; @@ -158812,6 +159916,12 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** iLoop==3: Code all remaining expressions. ** ** An effort is made to skip unnecessary iterations of the loop. + ** + ** This optimization of causing simple query restrictions to occur before + ** more complex one is call the "push-down" optimization in MySQL. Here + ** in SQLite, the name is "MySQL push-down", since there is also another + ** totally unrelated optimization called "WHERE-clause push-down". + ** Sometimes the qualifier is omitted, resulting in an ambiguity, so beware. */ iLoop = (pIdx ? 1 : 2); do{ @@ -159062,7 +160172,16 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( pRJ->regReturn); for(k=0; ka[k].pWLoop->iTab == pWInfo->a[k].iFrom ); + pRight = &pWInfo->pTabList->a[pWInfo->a[k].iFrom]; mAll |= pWInfo->a[k].pWLoop->maskSelf; + if( pRight->fg.viaCoroutine ){ + sqlite3VdbeAddOp3( + v, OP_Null, 0, pRight->regResult, + pRight->regResult + pRight->pSelect->pEList->nExpr-1 + ); + } sqlite3VdbeAddOp1(v, OP_NullRow, pWInfo->a[k].iTabCur); iIdxCur = pWInfo->a[k].iIdxCur; if( iIdxCur ){ @@ -160119,7 +161238,7 @@ static SQLITE_NOINLINE int exprMightBeIndexed2( if( pIdx->aiColumn[i]!=XN_EXPR ) continue; assert( pIdx->bHasExpr ); if( sqlite3ExprCompareSkip(pExpr,pIdx->aColExpr->a[i].pExpr,iCur)==0 - && pExpr->op!=TK_STRING + && !sqlite3ExprIsConstant(0,pIdx->aColExpr->a[i].pExpr) ){ aiCurCol[0] = iCur; aiCurCol[1] = XN_EXPR; @@ -160768,6 +161887,7 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Selec continue; } if( pWC->a[ii].leftCursor!=iCsr ) return; + if( pWC->a[ii].prereqRight!=0 ) return; } /* Check condition (5). Return early if it is not met. */ @@ -160782,12 +161902,14 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Selec /* All conditions are met. Add the terms to the where-clause object. */ assert( p->pLimit->op==TK_LIMIT ); - whereAddLimitExpr(pWC, p->iLimit, p->pLimit->pLeft, - iCsr, SQLITE_INDEX_CONSTRAINT_LIMIT); - if( p->iOffset>0 ){ + if( p->iOffset!=0 && (p->selFlags & SF_Compound)==0 ){ whereAddLimitExpr(pWC, p->iOffset, p->pLimit->pRight, iCsr, SQLITE_INDEX_CONSTRAINT_OFFSET); } + if( p->iOffset==0 || (p->selFlags & SF_Compound)==0 ){ + whereAddLimitExpr(pWC, p->iLimit, p->pLimit->pLeft, + iCsr, SQLITE_INDEX_CONSTRAINT_LIMIT); + } } } @@ -161305,6 +162427,42 @@ static Expr *whereRightSubexprIsColumn(Expr *p){ return 0; } +/* +** Term pTerm is guaranteed to be a WO_IN term. It may be a component term +** of a vector IN expression of the form "(x, y, ...) IN (SELECT ...)". +** This function checks to see if the term is compatible with an index +** column with affinity idxaff (one of the SQLITE_AFF_XYZ values). If so, +** it returns a pointer to the name of the collation sequence (e.g. "BINARY" +** or "NOCASE") used by the comparison in pTerm. If it is not compatible +** with affinity idxaff, NULL is returned. +*/ +static SQLITE_NOINLINE const char *indexInAffinityOk( + Parse *pParse, + WhereTerm *pTerm, + u8 idxaff +){ + Expr *pX = pTerm->pExpr; + Expr inexpr; + + assert( pTerm->eOperator & WO_IN ); + + if( sqlite3ExprIsVector(pX->pLeft) ){ + int iField = pTerm->u.x.iField - 1; + inexpr.flags = 0; + inexpr.op = TK_EQ; + inexpr.pLeft = pX->pLeft->x.pList->a[iField].pExpr; + assert( ExprUseXSelect(pX) ); + inexpr.pRight = pX->x.pSelect->pEList->a[iField].pExpr; + pX = &inexpr; + } + + if( sqlite3IndexAffinityOk(pX, idxaff) ){ + CollSeq *pRet = sqlite3ExprCompareCollSeq(pParse, pX); + return pRet ? pRet->zName : sqlite3StrBINARY; + } + return 0; +} + /* ** Advance to the next WhereTerm that matches according to the criteria ** established when the pScan object was initialized by whereScanInit(). @@ -161355,16 +162513,24 @@ static WhereTerm *whereScanNext(WhereScan *pScan){ if( (pTerm->eOperator & pScan->opMask)!=0 ){ /* Verify the affinity and collating sequence match */ if( pScan->zCollName && (pTerm->eOperator & WO_ISNULL)==0 ){ - CollSeq *pColl; + const char *zCollName; Parse *pParse = pWC->pWInfo->pParse; pX = pTerm->pExpr; - if( !sqlite3IndexAffinityOk(pX, pScan->idxaff) ){ - continue; + + if( (pTerm->eOperator & WO_IN) ){ + zCollName = indexInAffinityOk(pParse, pTerm, pScan->idxaff); + if( !zCollName ) continue; + }else{ + CollSeq *pColl; + if( !sqlite3IndexAffinityOk(pX, pScan->idxaff) ){ + continue; + } + assert(pX->pLeft); + pColl = sqlite3ExprCompareCollSeq(pParse, pX); + zCollName = pColl ? pColl->zName : sqlite3StrBINARY; } - assert(pX->pLeft); - pColl = sqlite3ExprCompareCollSeq(pParse, pX); - if( pColl==0 ) pColl = pParse->db->pDfltColl; - if( sqlite3StrICmp(pColl->zName, pScan->zCollName) ){ + + if( sqlite3StrICmp(zCollName, pScan->zCollName) ){ continue; } } @@ -161716,9 +162882,13 @@ static void translateColumnToCopy( ** are no-ops. */ #if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(WHERETRACE_ENABLED) -static void whereTraceIndexInfoInputs(sqlite3_index_info *p){ +static void whereTraceIndexInfoInputs( + sqlite3_index_info *p, /* The IndexInfo object */ + Table *pTab /* The TABLE that is the virtual table */ +){ int i; if( (sqlite3WhereTrace & 0x10)==0 ) return; + sqlite3DebugPrintf("sqlite3_index_info inputs for %s:\n", pTab->zName); for(i=0; inConstraint; i++){ sqlite3DebugPrintf( " constraint[%d]: col=%d termid=%d op=%d usabled=%d collseq=%s\n", @@ -161736,9 +162906,13 @@ static void whereTraceIndexInfoInputs(sqlite3_index_info *p){ p->aOrderBy[i].desc); } } -static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){ +static void whereTraceIndexInfoOutputs( + sqlite3_index_info *p, /* The IndexInfo object */ + Table *pTab /* The TABLE that is the virtual table */ +){ int i; if( (sqlite3WhereTrace & 0x10)==0 ) return; + sqlite3DebugPrintf("sqlite3_index_info outputs for %s:\n", pTab->zName); for(i=0; inConstraint; i++){ sqlite3DebugPrintf(" usage[%d]: argvIdx=%d omit=%d\n", i, @@ -161752,8 +162926,8 @@ static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){ sqlite3DebugPrintf(" estimatedRows=%lld\n", p->estimatedRows); } #else -#define whereTraceIndexInfoInputs(A) -#define whereTraceIndexInfoOutputs(A) +#define whereTraceIndexInfoInputs(A,B) +#define whereTraceIndexInfoOutputs(A,B) #endif /* @@ -161937,7 +163111,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex( ** WHERE clause (or the ON clause of a LEFT join) that constrain which ** rows of the target table (pSrc) that can be used. */ if( (pTerm->wtFlags & TERM_VIRTUAL)==0 - && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, pLevel->iFrom) + && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, pLevel->iFrom, 0) ){ pPartial = sqlite3ExprAnd(pParse, pPartial, sqlite3ExprDup(pParse->db, pExpr, 0)); @@ -161979,7 +163153,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex( ** if they go out of sync. */ if( IsView(pTable) ){ - extraCols = ALLBITS; + extraCols = ALLBITS & ~idxCols; }else{ extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); } @@ -162206,7 +163380,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( for(pTerm=pWInfo->sWC.a; pTermpExpr; if( (pTerm->wtFlags & TERM_VIRTUAL)==0 - && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, iSrc) + && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, iSrc, 0) ){ sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL); } @@ -162332,7 +163506,7 @@ static sqlite3_index_info *allocateIndexInfo( Expr *pE2; /* Skip over constant terms in the ORDER BY clause */ - if( sqlite3ExprIsConstant(pExpr) ){ + if( sqlite3ExprIsConstant(0, pExpr) ){ continue; } @@ -162367,7 +163541,7 @@ static sqlite3_index_info *allocateIndexInfo( } if( i==n ){ nOrderBy = n; - if( (pWInfo->wctrlFlags & WHERE_DISTINCTBY) ){ + if( (pWInfo->wctrlFlags & WHERE_DISTINCTBY) && !pSrc->fg.rowidUsed ){ eDistinct = 2 + ((pWInfo->wctrlFlags & WHERE_SORTBYGROUP)!=0); }else if( pWInfo->wctrlFlags & WHERE_GROUPBY ){ eDistinct = 1; @@ -162444,7 +163618,7 @@ static sqlite3_index_info *allocateIndexInfo( pIdxInfo->nConstraint = j; for(i=j=0; ia[i].pExpr; - if( sqlite3ExprIsConstant(pExpr) ) continue; + if( sqlite3ExprIsConstant(0, pExpr) ) continue; assert( pExpr->op==TK_COLUMN || (pExpr->op==TK_COLLATE && pExpr->pLeft->op==TK_COLUMN && pExpr->iColumn==pExpr->pLeft->iColumn) ); @@ -162496,11 +163670,11 @@ static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ sqlite3_vtab *pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab; int rc; - whereTraceIndexInfoInputs(p); + whereTraceIndexInfoInputs(p, pTab); pParse->db->nSchemaLock++; rc = pVtab->pModule->xBestIndex(pVtab, p); pParse->db->nSchemaLock--; - whereTraceIndexInfoOutputs(p); + whereTraceIndexInfoOutputs(p, pTab); if( rc!=SQLITE_OK && rc!=SQLITE_CONSTRAINT ){ if( rc==SQLITE_NOMEM ){ @@ -163978,7 +165152,9 @@ static int whereLoopAddBtreeIndex( } if( pProbe->bUnordered || pProbe->bLowQual ){ if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); - if( pProbe->bLowQual ) opMask &= ~(WO_EQ|WO_IN|WO_IS); + if( pProbe->bLowQual && pSrc->fg.isIndexedBy==0 ){ + opMask &= ~(WO_EQ|WO_IN|WO_IS); + } } assert( pNew->u.btree.nEqnColumn ); @@ -164245,10 +165421,13 @@ static int whereLoopAddBtreeIndex( } } - /* Set rCostIdx to the cost of visiting selected rows in index. Add - ** it to pNew->rRun, which is currently set to the cost of the index - ** seek only. Then, if this is a non-covering index, add the cost of - ** visiting the rows in the main table. */ + /* Set rCostIdx to the estimated cost of visiting selected rows in the + ** index. The estimate is the sum of two values: + ** 1. The cost of doing one search-by-key to find the first matching + ** entry + ** 2. Stepping forward in the index pNew->nOut times to find all + ** additional matching entries. + */ assert( pSrc->pTab->szTabRow>0 ); if( pProbe->idxType==SQLITE_IDXTYPE_IPK ){ /* The pProbe->szIdxRow is low for an IPK table since the interior @@ -164259,7 +165438,15 @@ static int whereLoopAddBtreeIndex( }else{ rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; } - pNew->rRun = sqlite3LogEstAdd(rLogSize, rCostIdx); + rCostIdx = sqlite3LogEstAdd(rLogSize, rCostIdx); + + /* Estimate the cost of running the loop. If all data is coming + ** from the index, then this is just the cost of doing the index + ** lookup and scan. But if some data is coming out of the main table, + ** we also have to add in the cost of doing pNew->nOut searches to + ** locate the row in the main table that corresponds to the index entry. + */ + pNew->rRun = rCostIdx; if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK|WHERE_EXPRIDX))==0 ){ pNew->rRun = sqlite3LogEstAdd(pNew->rRun, pNew->nOut + 16); } @@ -164365,7 +165552,9 @@ static int indexMightHelpWithOrderBy( for(ii=0; iinExpr; ii++){ Expr *pExpr = sqlite3ExprSkipCollateAndLikely(pOB->a[ii].pExpr); if( NEVER(pExpr==0) ) continue; - if( pExpr->op==TK_COLUMN && pExpr->iTable==iCursor ){ + if( (pExpr->op==TK_COLUMN || pExpr->op==TK_AGG_COLUMN) + && pExpr->iTable==iCursor + ){ if( pExpr->iColumn<0 ) return 1; for(jj=0; jjnKeyCol; jj++){ if( pExpr->iColumn==pIndex->aiColumn[jj] ) return 1; @@ -164622,7 +165811,7 @@ static void wherePartIdxExpr( u8 aff; if( pLeft->op!=TK_COLUMN ) return; - if( !sqlite3ExprIsConstant(pRight) ) return; + if( !sqlite3ExprIsConstant(0, pRight) ) return; if( !sqlite3IsBinary(sqlite3ExprCompareCollSeq(pParse, pPart)) ) return; if( pLeft->iColumn<0 ) return; aff = pIdx->pTable->aCol[pLeft->iColumn].affinity; @@ -164895,7 +166084,9 @@ static int whereLoopAddBtree( " according to whereIsCoveringIndex()\n", pProbe->zName)); } } - }else if( m==0 ){ + }else if( m==0 + && (HasRowid(pTab) || pWInfo->pSelect!=0 || sqlite3FaultSim(700)) + ){ WHERETRACE(0x200, ("-> %s a covering index according to bitmasks\n", pProbe->zName, m==0 ? "is" : "is not")); @@ -164971,7 +166162,7 @@ static int whereLoopAddBtree( ** unique index is used (making the index functionally non-unique) ** then the sqlite_stat1 data becomes important for scoring the ** plan */ - pTab->tabFlags |= TF_StatsUsed; + pTab->tabFlags |= TF_MaybeReanalyze; } #ifdef SQLITE_ENABLE_STAT4 sqlite3Stat4ProbeFree(pBuilder->pRec); @@ -164993,6 +166184,21 @@ static int isLimitTerm(WhereTerm *pTerm){ && pTerm->eMatchOp<=SQLITE_INDEX_CONSTRAINT_OFFSET; } +/* +** Return true if the first nCons constraints in the pUsage array are +** marked as in-use (have argvIndex>0). False otherwise. +*/ +static int allConstraintsUsed( + struct sqlite3_index_constraint_usage *aUsage, + int nCons +){ + int ii; + for(ii=0; iipNew->iTab. This @@ -165133,13 +166339,20 @@ static int whereLoopAddVirtualOne( *pbIn = 1; assert( (mExclude & WO_IN)==0 ); } + /* Unless pbRetryLimit is non-NULL, there should be no LIMIT/OFFSET + ** terms. And if there are any, they should follow all other terms. */ assert( pbRetryLimit || !isLimitTerm(pTerm) ); - if( isLimitTerm(pTerm) && *pbIn ){ + assert( !isLimitTerm(pTerm) || i>=nConstraint-2 ); + assert( !isLimitTerm(pTerm) || i==nConstraint-1 || isLimitTerm(pTerm+1) ); + + if( isLimitTerm(pTerm) && (*pbIn || !allConstraintsUsed(pUsage, i)) ){ /* If there is an IN(...) term handled as an == (separate call to ** xFilter for each value on the RHS of the IN) and a LIMIT or - ** OFFSET term handled as well, the plan is unusable. Set output - ** variable *pbRetryLimit to true to tell the caller to retry with - ** LIMIT and OFFSET disabled. */ + ** OFFSET term handled as well, the plan is unusable. Similarly, + ** if there is a LIMIT/OFFSET and there are other unused terms, + ** the plan cannot be used. In these cases set variable *pbRetryLimit + ** to true to tell the caller to retry with LIMIT and OFFSET + ** disabled. */ if( pIdxInfo->needToFreeIdxStr ){ sqlite3_free(pIdxInfo->idxStr); pIdxInfo->idxStr = 0; @@ -165996,7 +167209,7 @@ static i8 wherePathSatisfiesOrderBy( if( MASKBIT(i) & obSat ) continue; p = pOrderBy->a[i].pExpr; mTerm = sqlite3WhereExprUsage(&pWInfo->sMaskSet,p); - if( mTerm==0 && !sqlite3ExprIsConstant(p) ) continue; + if( mTerm==0 && !sqlite3ExprIsConstant(0,p) ) continue; if( (mTerm&~orderDistinctMask)==0 ){ obSat |= MASKBIT(i); } @@ -166465,10 +167678,9 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){ pWInfo->eDistinct = WHERE_DISTINCT_ORDERED; } - if( pWInfo->pSelect->pOrderBy - && pWInfo->nOBSat > pWInfo->pSelect->pOrderBy->nExpr ){ - pWInfo->nOBSat = pWInfo->pSelect->pOrderBy->nExpr; - } + /* vvv--- See check-in [12ad822d9b827777] on 2023-03-16 ---vvv */ + assert( pWInfo->pSelect->pOrderBy==0 + || pWInfo->nOBSat <= pWInfo->pSelect->pOrderBy->nExpr ); }else{ pWInfo->revMask = pFrom->revLoop; if( pWInfo->nOBSat<=0 ){ @@ -166511,7 +167723,6 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ } } - pWInfo->nRowOut = pFrom->nRow; /* Free temporary memory and return success */ @@ -166519,6 +167730,83 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ return SQLITE_OK; } +/* +** This routine implements a heuristic designed to improve query planning. +** This routine is called in between the first and second call to +** wherePathSolver(). Hence the name "Interstage" "Heuristic". +** +** The first call to wherePathSolver() (hereafter just "solver()") computes +** the best path without regard to the order of the outputs. The second call +** to the solver() builds upon the first call to try to find an alternative +** path that satisfies the ORDER BY clause. +** +** This routine looks at the results of the first solver() run, and for +** every FROM clause term in the resulting query plan that uses an equality +** constraint against an index, disable other WhereLoops for that same +** FROM clause term that would try to do a full-table scan. This prevents +** an index search from being converted into a full-table scan in order to +** satisfy an ORDER BY clause, since even though we might get slightly better +** performance using the full-scan without sorting if the output size +** estimates are very precise, we might also get severe performance +** degradation using the full-scan if the output size estimate is too large. +** It is better to err on the side of caution. +** +** Except, if the first solver() call generated a full-table scan in an outer +** loop then stop this analysis at the first full-scan, since the second +** solver() run might try to swap that full-scan for another in order to +** get the output into the correct order. In other words, we allow a +** rewrite like this: +** +** First Solver() Second Solver() +** |-- SCAN t1 |-- SCAN t2 +** |-- SEARCH t2 `-- SEARCH t1 +** `-- SORT USING B-TREE +** +** The purpose of this routine is to disallow rewrites such as: +** +** First Solver() Second Solver() +** |-- SEARCH t1 |-- SCAN t2 <--- bad! +** |-- SEARCH t2 `-- SEARCH t1 +** `-- SORT USING B-TREE +** +** See test cases in test/whereN.test for the real-world query that +** originally provoked this heuristic. +*/ +static SQLITE_NOINLINE void whereInterstageHeuristic(WhereInfo *pWInfo){ + int i; +#ifdef WHERETRACE_ENABLED + int once = 0; +#endif + for(i=0; inLevel; i++){ + WhereLoop *p = pWInfo->a[i].pWLoop; + if( p==0 ) break; + if( (p->wsFlags & WHERE_VIRTUALTABLE)!=0 ) continue; + if( (p->wsFlags & (WHERE_COLUMN_EQ|WHERE_COLUMN_NULL|WHERE_COLUMN_IN))!=0 ){ + u8 iTab = p->iTab; + WhereLoop *pLoop; + for(pLoop=pWInfo->pLoops; pLoop; pLoop=pLoop->pNextLoop){ + if( pLoop->iTab!=iTab ) continue; + if( (pLoop->wsFlags & (WHERE_CONSTRAINT|WHERE_AUTO_INDEX))!=0 ){ + /* Auto-index and index-constrained loops allowed to remain */ + continue; + } +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace & 0x80 ){ + if( once==0 ){ + sqlite3DebugPrintf("Loops disabled by interstage heuristic:\n"); + once = 1; + } + sqlite3WhereLoopPrint(pLoop, &pWInfo->sWC); + } +#endif /* WHERETRACE_ENABLED */ + pLoop->prereq = ALLBITS; /* Prevent 2nd solver() from using this one */ + } + }else{ + break; + } + } +} + /* ** Most queries use only a single table (they are not joins) and have ** simple == constraints against indexed fields. This routine attempts @@ -166687,6 +167975,10 @@ static void showAllWhereLoops(WhereInfo *pWInfo, WhereClause *pWC){ ** the right-most table of a subquery that was flattened into the ** main query and that subquery was the right-hand operand of an ** inner join that held an ON or USING clause. +** 6) The ORDER BY clause has 63 or fewer terms +** 7) The omit-noop-join optimization is enabled. +** +** Items (1), (6), and (7) are checked by the caller. ** ** For example, given: ** @@ -166807,7 +168099,7 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab]; Table *pTab = pItem->pTab; if( (pTab->tabFlags & TF_HasStat1)==0 ) break; - pTab->tabFlags |= TF_StatsUsed; + pTab->tabFlags |= TF_MaybeReanalyze; if( i>=1 && (pLoop->wsFlags & reqFlags)==reqFlags /* vvvvvv--- Always the case if WHERE_COLUMN_EQ is defined */ @@ -166828,6 +168120,58 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( } } +/* +** Expression Node callback for sqlite3ExprCanReturnSubtype(). +** +** Only a function call is able to return a subtype. So if the node +** is not a function call, return WRC_Prune immediately. +** +** A function call is able to return a subtype if it has the +** SQLITE_RESULT_SUBTYPE property. +** +** Assume that every function is able to pass-through a subtype from +** one of its argument (using sqlite3_result_value()). Most functions +** are not this way, but we don't have a mechanism to distinguish those +** that are from those that are not, so assume they all work this way. +** That means that if one of its arguments is another function and that +** other function is able to return a subtype, then this function is +** able to return a subtype. +*/ +static int exprNodeCanReturnSubtype(Walker *pWalker, Expr *pExpr){ + int n; + FuncDef *pDef; + sqlite3 *db; + if( pExpr->op!=TK_FUNCTION ){ + return WRC_Prune; + } + assert( ExprUseXList(pExpr) ); + db = pWalker->pParse->db; + n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ + pWalker->eCode = 1; + return WRC_Prune; + } + return WRC_Continue; +} + +/* +** Return TRUE if expression pExpr is able to return a subtype. +** +** A TRUE return does not guarantee that a subtype will be returned. +** It only indicates that a subtype return is possible. False positives +** are acceptable as they only disable an optimization. False negatives, +** on the other hand, can lead to incorrect answers. +*/ +static int sqlite3ExprCanReturnSubtype(Parse *pParse, Expr *pExpr){ + Walker w; + memset(&w, 0, sizeof(w)); + w.pParse = pParse; + w.xExprCallback = exprNodeCanReturnSubtype; + sqlite3WalkExpr(&w, pExpr); + return w.eCode; +} + /* ** The index pIdx is used by a query and contains one or more expressions. ** In other words pIdx is an index on an expression. iIdxCur is the cursor @@ -166860,20 +168204,12 @@ static SQLITE_NOINLINE void whereAddIndexedExpr( }else{ continue; } - if( sqlite3ExprIsConstant(pExpr) ) continue; - if( pExpr->op==TK_FUNCTION ){ + if( sqlite3ExprIsConstant(0,pExpr) ) continue; + if( pExpr->op==TK_FUNCTION && sqlite3ExprCanReturnSubtype(pParse,pExpr) ){ /* Functions that might set a subtype should not be replaced by the ** value taken from an expression index since the index omits the ** subtype. https://sqlite.org/forum/forumpost/68d284c86b082c3e */ - int n; - FuncDef *pDef; - sqlite3 *db = pParse->db; - assert( ExprUseXList(pExpr) ); - n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; - pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); - if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ - continue; - } + continue; } p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr)); if( p==0 ) break; @@ -167056,6 +168392,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( pOrderBy && pOrderBy->nExpr>=BMS ){ pOrderBy = 0; wctrlFlags &= ~WHERE_WANT_DISTINCT; + wctrlFlags |= WHERE_KEEP_ALL_JOINS; /* Disable omit-noop-join opt */ } /* The number of tables in the FROM clause is limited by the number of @@ -167138,7 +168475,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ){ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; } - ExplainQueryPlan((pParse, 0, "SCAN CONSTANT ROW")); + if( ALWAYS(pWInfo->pSelect) + && (pWInfo->pSelect->selFlags & SF_MultiValue)==0 + ){ + ExplainQueryPlan((pParse, 0, "SCAN CONSTANT ROW")); + } }else{ /* Assign a bit from the bitmask to every term in the FROM clause. ** @@ -167291,6 +168632,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( wherePathSolver(pWInfo, 0); if( db->mallocFailed ) goto whereBeginError; if( pWInfo->pOrderBy ){ + whereInterstageHeuristic(pWInfo); wherePathSolver(pWInfo, pWInfo->nRowOut+1); if( db->mallocFailed ) goto whereBeginError; } @@ -167351,10 +168693,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** in-line sqlite3WhereCodeOneLoopStart() for performance reasons. */ notReady = ~(Bitmask)0; - if( pWInfo->nLevel>=2 - && pResultSet!=0 /* these two combine to guarantee */ - && 0==(wctrlFlags & WHERE_AGG_DISTINCT) /* condition (1) above */ - && OptimizationEnabled(db, SQLITE_OmitNoopJoin) + if( pWInfo->nLevel>=2 /* Must be a join, or this opt8n is pointless */ + && pResultSet!=0 /* Condition (1) */ + && 0==(wctrlFlags & (WHERE_AGG_DISTINCT|WHERE_KEEP_ALL_JOINS)) /* (1),(6) */ + && OptimizationEnabled(db, SQLITE_OmitNoopJoin) /* (7) */ ){ notReady = whereOmitNoopJoin(pWInfo, notReady); nTabList = pWInfo->nLevel; @@ -167674,26 +169016,6 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } #endif -#ifdef SQLITE_DEBUG -/* -** Return true if cursor iCur is opened by instruction k of the -** bytecode. Used inside of assert() only. -*/ -static int cursorIsOpen(Vdbe *v, int iCur, int k){ - while( k>=0 ){ - VdbeOp *pOp = sqlite3VdbeGetOp(v,k--); - if( pOp->p1!=iCur ) continue; - if( pOp->opcode==OP_Close ) return 0; - if( pOp->opcode==OP_OpenRead ) return 1; - if( pOp->opcode==OP_OpenWrite ) return 1; - if( pOp->opcode==OP_OpenDup ) return 1; - if( pOp->opcode==OP_OpenAutoindex ) return 1; - if( pOp->opcode==OP_OpenEphemeral ) return 1; - } - return 0; -} -#endif /* SQLITE_DEBUG */ - /* ** Generate the end of the WHERE loop. See comments on ** sqlite3WhereBegin() for additional information. @@ -167840,7 +169162,15 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ addr = sqlite3VdbeAddOp1(v, OP_IfPos, pLevel->iLeftJoin); VdbeCoverage(v); assert( (ws & WHERE_IDX_ONLY)==0 || (ws & WHERE_INDEXED)!=0 ); if( (ws & WHERE_IDX_ONLY)==0 ){ - assert( pLevel->iTabCur==pTabList->a[pLevel->iFrom].iCursor ); + SrcItem *pSrc = &pTabList->a[pLevel->iFrom]; + assert( pLevel->iTabCur==pSrc->iCursor ); + if( pSrc->fg.viaCoroutine ){ + int m, n; + n = pSrc->regResult; + assert( pSrc->pTab!=0 ); + m = pSrc->pTab->nCol; + sqlite3VdbeAddOp3(v, OP_Null, 0, n, n+m-1); + } sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iTabCur); } if( (ws & WHERE_INDEXED) @@ -167890,6 +169220,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ */ if( pTabItem->fg.viaCoroutine ){ testcase( pParse->db->mallocFailed ); + assert( pTabItem->regResult>=0 ); translateColumnToCopy(pParse, pLevel->addrBody, pLevel->iTabCur, pTabItem->regResult, 0); continue; @@ -167984,16 +169315,10 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ ** reference. Verify that this is harmless - that the ** table being referenced really is open. */ -#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC - assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 - || cursorIsOpen(v,pOp->p1,k) - || pOp->opcode==OP_Offset - ); -#else - assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 - || cursorIsOpen(v,pOp->p1,k) - ); -#endif + if( pLoop->wsFlags & WHERE_IDX_ONLY ){ + sqlite3ErrorMsg(pParse, "internal query planner error"); + pParse->rc = SQLITE_INTERNAL; + } } }else if( pOp->opcode==OP_Rowid ){ pOp->p1 = pLevel->iIdxCur; @@ -169194,7 +170519,7 @@ SQLITE_PRIVATE void sqlite3WindowListDelete(sqlite3 *db, Window *p){ ** variable values in the expression tree. */ static Expr *sqlite3WindowOffsetExpr(Parse *pParse, Expr *pExpr){ - if( 0==sqlite3ExprIsConstant(pExpr) ){ + if( 0==sqlite3ExprIsConstant(0,pExpr) ){ if( IN_RENAME_OBJECT ) sqlite3RenameExprUnmap(pParse, pExpr); sqlite3ExprDelete(pParse->db, pExpr); pExpr = sqlite3ExprAlloc(pParse->db, TK_NULL, 0, 0); @@ -171266,9 +172591,9 @@ static void updateDeleteLimitError( break; } } - if( (p->selFlags & SF_MultiValue)==0 && - (mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT])>0 && - cnt>mxSelect + if( (p->selFlags & (SF_MultiValue|SF_Values))==0 + && (mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT])>0 + && cnt>mxSelect ){ sqlite3ErrorMsg(pParse, "too many terms in compound SELECT"); } @@ -171288,6 +172613,14 @@ static void updateDeleteLimitError( return pSelect; } + /* Memory allocator for parser stack resizing. This is a thin wrapper around + ** sqlite3_realloc() that includes a call to sqlite3FaultSim() to facilitate + ** testing. + */ + static void *parserStackRealloc(void *pOld, sqlite3_uint64 newSize){ + return sqlite3FaultSim(700) ? 0 : sqlite3_realloc(pOld, newSize); + } + /* Construct a new Expr object from a single token */ static Expr *tokenExpr(Parse *pParse, int op, Token t){ @@ -171537,8 +172870,8 @@ static void updateDeleteLimitError( #define TK_TRUEFALSE 170 #define TK_ISNOT 171 #define TK_FUNCTION 172 -#define TK_UMINUS 173 -#define TK_UPLUS 174 +#define TK_UPLUS 173 +#define TK_UMINUS 174 #define TK_TRUTH 175 #define TK_REGISTER 176 #define TK_VECTOR 177 @@ -171547,8 +172880,9 @@ static void updateDeleteLimitError( #define TK_ASTERISK 180 #define TK_SPAN 181 #define TK_ERROR 182 -#define TK_SPACE 183 -#define TK_ILLEGAL 184 +#define TK_QNUMBER 183 +#define TK_SPACE 184 +#define TK_ILLEGAL 185 #endif /**************** End token definitions ***************************************/ @@ -171589,6 +172923,9 @@ static void updateDeleteLimitError( ** sqlite3ParserARG_STORE Code to store %extra_argument into yypParser ** sqlite3ParserARG_FETCH Code to extract %extra_argument from yypParser ** sqlite3ParserCTX_* As sqlite3ParserARG_ except for %extra_context +** YYREALLOC Name of the realloc() function to use +** YYFREE Name of the free() function to use +** YYDYNSTACK True if stack space should be extended on heap ** YYERRORSYMBOL is the code number of the error symbol. If not ** defined, then do no error processing. ** YYNSTATE the combined number of states. @@ -171602,37 +172939,39 @@ static void updateDeleteLimitError( ** YY_NO_ACTION The yy_action[] code for no-op ** YY_MIN_REDUCE Minimum value for reduce actions ** YY_MAX_REDUCE Maximum value for reduce actions +** YY_MIN_DSTRCTR Minimum symbol value that has a destructor +** YY_MAX_DSTRCTR Maximum symbol value that has a destructor */ #ifndef INTERFACE # define INTERFACE 1 #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 319 +#define YYNOCODE 322 #define YYACTIONTYPE unsigned short int #define YYWILDCARD 101 #define sqlite3ParserTOKENTYPE Token typedef union { int yyinit; sqlite3ParserTOKENTYPE yy0; - TriggerStep* yy33; - Window* yy41; - Select* yy47; - SrcList* yy131; - struct TrigEvent yy180; - struct {int value; int mask;} yy231; - IdList* yy254; - u32 yy285; - ExprList* yy322; - Cte* yy385; - int yy394; - Upsert* yy444; - u8 yy516; - With* yy521; - const char* yy522; - Expr* yy528; - OnOrUsing yy561; - struct FrameBound yy595; + ExprList* yy14; + With* yy59; + Cte* yy67; + Upsert* yy122; + IdList* yy132; + int yy144; + const char* yy168; + SrcList* yy203; + Window* yy211; + OnOrUsing yy269; + struct TrigEvent yy286; + struct {int value; int mask;} yy383; + u32 yy391; + TriggerStep* yy427; + Expr* yy454; + u8 yy462; + struct FrameBound yy509; + Select* yy555; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -171642,24 +172981,29 @@ typedef union { #define sqlite3ParserARG_PARAM #define sqlite3ParserARG_FETCH #define sqlite3ParserARG_STORE +#define YYREALLOC parserStackRealloc +#define YYFREE sqlite3_free +#define YYDYNSTACK 1 #define sqlite3ParserCTX_SDECL Parse *pParse; #define sqlite3ParserCTX_PDECL ,Parse *pParse #define sqlite3ParserCTX_PARAM ,pParse #define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse; #define sqlite3ParserCTX_STORE yypParser->pParse=pParse; #define YYFALLBACK 1 -#define YYNSTATE 583 -#define YYNRULE 405 -#define YYNRULE_WITH_ACTION 340 -#define YYNTOKEN 185 -#define YY_MAX_SHIFT 582 -#define YY_MIN_SHIFTREDUCE 842 -#define YY_MAX_SHIFTREDUCE 1246 -#define YY_ERROR_ACTION 1247 -#define YY_ACCEPT_ACTION 1248 -#define YY_NO_ACTION 1249 -#define YY_MIN_REDUCE 1250 -#define YY_MAX_REDUCE 1654 +#define YYNSTATE 587 +#define YYNRULE 409 +#define YYNRULE_WITH_ACTION 344 +#define YYNTOKEN 186 +#define YY_MAX_SHIFT 586 +#define YY_MIN_SHIFTREDUCE 849 +#define YY_MAX_SHIFTREDUCE 1257 +#define YY_ERROR_ACTION 1258 +#define YY_ACCEPT_ACTION 1259 +#define YY_NO_ACTION 1260 +#define YY_MIN_REDUCE 1261 +#define YY_MAX_REDUCE 1669 +#define YY_MIN_DSTRCTR 205 +#define YY_MAX_DSTRCTR 319 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -171675,6 +173019,22 @@ typedef union { # define yytestcase(X) #endif +/* Macro to determine if stack space has the ability to grow using +** heap memory. +*/ +#if YYSTACKDEPTH<=0 || YYDYNSTACK +# define YYGROWABLESTACK 1 +#else +# define YYGROWABLESTACK 0 +#endif + +/* Guarantee a minimum number of initial stack slots. +*/ +#if YYSTACKDEPTH<=0 +# undef YYSTACKDEPTH +# define YYSTACKDEPTH 2 /* Need a minimum stack size */ +#endif + /* Next are the tables used to determine what action to take based on the ** current state and lookahead token. These tables are used to implement @@ -171726,624 +173086,629 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2112) +#define YY_ACTTAB_COUNT (2138) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 576, 210, 576, 119, 116, 231, 576, 119, 116, 231, - /* 10 */ 576, 1321, 383, 1300, 414, 570, 570, 570, 576, 415, - /* 20 */ 384, 1321, 1283, 42, 42, 42, 42, 210, 1533, 72, - /* 30 */ 72, 978, 425, 42, 42, 499, 305, 281, 305, 979, - /* 40 */ 403, 72, 72, 126, 127, 81, 1221, 1221, 1058, 1061, - /* 50 */ 1048, 1048, 124, 124, 125, 125, 125, 125, 484, 415, - /* 60 */ 1248, 1, 1, 582, 2, 1252, 558, 119, 116, 231, - /* 70 */ 319, 488, 147, 488, 532, 119, 116, 231, 537, 1334, - /* 80 */ 423, 531, 143, 126, 127, 81, 1221, 1221, 1058, 1061, - /* 90 */ 1048, 1048, 124, 124, 125, 125, 125, 125, 119, 116, - /* 100 */ 231, 329, 123, 123, 123, 123, 122, 122, 121, 121, - /* 110 */ 121, 120, 117, 452, 286, 286, 286, 286, 450, 450, - /* 120 */ 450, 1572, 382, 1574, 1197, 381, 1168, 573, 1168, 573, - /* 130 */ 415, 1572, 545, 261, 228, 452, 102, 146, 457, 318, - /* 140 */ 567, 242, 123, 123, 123, 123, 122, 122, 121, 121, - /* 150 */ 121, 120, 117, 452, 126, 127, 81, 1221, 1221, 1058, - /* 160 */ 1061, 1048, 1048, 124, 124, 125, 125, 125, 125, 143, - /* 170 */ 296, 1197, 345, 456, 121, 121, 121, 120, 117, 452, - /* 180 */ 128, 1197, 1198, 1197, 149, 449, 448, 576, 120, 117, - /* 190 */ 452, 125, 125, 125, 125, 118, 123, 123, 123, 123, - /* 200 */ 122, 122, 121, 121, 121, 120, 117, 452, 462, 114, - /* 210 */ 13, 13, 554, 123, 123, 123, 123, 122, 122, 121, - /* 220 */ 121, 121, 120, 117, 452, 428, 318, 567, 1197, 1198, - /* 230 */ 1197, 150, 1229, 415, 1229, 125, 125, 125, 125, 123, - /* 240 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117, - /* 250 */ 452, 473, 348, 1045, 1045, 1059, 1062, 126, 127, 81, - /* 260 */ 1221, 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, - /* 270 */ 125, 125, 1286, 530, 224, 1197, 576, 415, 226, 523, - /* 280 */ 177, 83, 84, 123, 123, 123, 123, 122, 122, 121, - /* 290 */ 121, 121, 120, 117, 452, 1014, 16, 16, 1197, 134, - /* 300 */ 134, 126, 127, 81, 1221, 1221, 1058, 1061, 1048, 1048, - /* 310 */ 124, 124, 125, 125, 125, 125, 123, 123, 123, 123, - /* 320 */ 122, 122, 121, 121, 121, 120, 117, 452, 1049, 554, - /* 330 */ 1197, 379, 1197, 1198, 1197, 254, 1442, 405, 512, 509, - /* 340 */ 508, 112, 568, 574, 4, 933, 933, 439, 507, 346, - /* 350 */ 468, 332, 366, 400, 1242, 1197, 1198, 1197, 571, 576, - /* 360 */ 123, 123, 123, 123, 122, 122, 121, 121, 121, 120, - /* 370 */ 117, 452, 286, 286, 375, 1585, 1611, 449, 448, 155, - /* 380 */ 415, 453, 72, 72, 1293, 573, 1226, 1197, 1198, 1197, - /* 390 */ 86, 1228, 273, 565, 551, 524, 524, 576, 99, 1227, - /* 400 */ 6, 1285, 480, 143, 126, 127, 81, 1221, 1221, 1058, - /* 410 */ 1061, 1048, 1048, 124, 124, 125, 125, 125, 125, 558, - /* 420 */ 13, 13, 1035, 515, 1229, 1197, 1229, 557, 110, 110, - /* 430 */ 224, 576, 1243, 177, 576, 433, 111, 199, 453, 577, - /* 440 */ 453, 436, 1559, 1023, 327, 559, 1197, 272, 289, 374, - /* 450 */ 518, 369, 517, 259, 72, 72, 551, 72, 72, 365, - /* 460 */ 318, 567, 1617, 123, 123, 123, 123, 122, 122, 121, - /* 470 */ 121, 121, 120, 117, 452, 1023, 1023, 1025, 1026, 28, - /* 480 */ 286, 286, 1197, 1198, 1197, 1163, 576, 1616, 415, 908, - /* 490 */ 192, 558, 362, 573, 558, 944, 541, 525, 1163, 441, - /* 500 */ 419, 1163, 560, 1197, 1198, 1197, 576, 552, 552, 52, - /* 510 */ 52, 216, 126, 127, 81, 1221, 1221, 1058, 1061, 1048, - /* 520 */ 1048, 124, 124, 125, 125, 125, 125, 1197, 482, 136, - /* 530 */ 136, 415, 286, 286, 1497, 513, 122, 122, 121, 121, - /* 540 */ 121, 120, 117, 452, 1014, 573, 526, 219, 549, 549, - /* 550 */ 318, 567, 143, 6, 540, 126, 127, 81, 1221, 1221, - /* 560 */ 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, 125, - /* 570 */ 1561, 123, 123, 123, 123, 122, 122, 121, 121, 121, - /* 580 */ 120, 117, 452, 493, 1197, 1198, 1197, 490, 283, 1274, - /* 590 */ 964, 254, 1197, 379, 512, 509, 508, 1197, 346, 578, - /* 600 */ 1197, 578, 415, 294, 507, 964, 883, 193, 488, 318, - /* 610 */ 567, 390, 292, 386, 123, 123, 123, 123, 122, 122, - /* 620 */ 121, 121, 121, 120, 117, 452, 126, 127, 81, 1221, - /* 630 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 640 */ 125, 415, 400, 1143, 1197, 876, 101, 286, 286, 1197, - /* 650 */ 1198, 1197, 379, 1100, 1197, 1198, 1197, 1197, 1198, 1197, - /* 660 */ 573, 463, 33, 379, 235, 126, 127, 81, 1221, 1221, - /* 670 */ 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, 125, - /* 680 */ 1441, 966, 576, 230, 965, 123, 123, 123, 123, 122, - /* 690 */ 122, 121, 121, 121, 120, 117, 452, 1163, 230, 1197, - /* 700 */ 158, 1197, 1198, 1197, 1560, 13, 13, 303, 964, 1237, - /* 710 */ 1163, 154, 415, 1163, 379, 1588, 1181, 5, 375, 1585, - /* 720 */ 435, 1243, 3, 964, 123, 123, 123, 123, 122, 122, - /* 730 */ 121, 121, 121, 120, 117, 452, 126, 127, 81, 1221, - /* 740 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 750 */ 125, 415, 210, 575, 1197, 1036, 1197, 1198, 1197, 1197, - /* 760 */ 394, 859, 156, 1559, 380, 408, 1105, 1105, 496, 576, - /* 770 */ 473, 348, 1326, 1326, 1559, 126, 127, 81, 1221, 1221, - /* 780 */ 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, 125, - /* 790 */ 130, 576, 13, 13, 536, 123, 123, 123, 123, 122, - /* 800 */ 122, 121, 121, 121, 120, 117, 452, 304, 576, 461, - /* 810 */ 229, 1197, 1198, 1197, 13, 13, 1197, 1198, 1197, 1304, - /* 820 */ 471, 1274, 415, 1324, 1324, 1559, 1019, 461, 460, 440, - /* 830 */ 301, 72, 72, 1272, 123, 123, 123, 123, 122, 122, - /* 840 */ 121, 121, 121, 120, 117, 452, 126, 127, 81, 1221, - /* 850 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 860 */ 125, 415, 388, 1080, 1163, 286, 286, 425, 314, 280, - /* 870 */ 280, 287, 287, 465, 412, 411, 1543, 1163, 573, 576, - /* 880 */ 1163, 1200, 573, 413, 573, 126, 127, 81, 1221, 1221, - /* 890 */ 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, 125, - /* 900 */ 461, 1489, 13, 13, 1545, 123, 123, 123, 123, 122, - /* 910 */ 122, 121, 121, 121, 120, 117, 452, 202, 576, 466, - /* 920 */ 1591, 582, 2, 1252, 847, 848, 849, 1567, 319, 413, - /* 930 */ 147, 6, 415, 257, 256, 255, 208, 1334, 9, 1200, - /* 940 */ 264, 72, 72, 1440, 123, 123, 123, 123, 122, 122, - /* 950 */ 121, 121, 121, 120, 117, 452, 126, 127, 81, 1221, - /* 960 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 970 */ 125, 576, 286, 286, 576, 1217, 415, 581, 315, 1252, - /* 980 */ 425, 375, 1585, 360, 319, 573, 147, 499, 533, 1648, - /* 990 */ 401, 939, 499, 1334, 71, 71, 938, 72, 72, 242, - /* 1000 */ 1332, 105, 81, 1221, 1221, 1058, 1061, 1048, 1048, 124, - /* 1010 */ 124, 125, 125, 125, 125, 123, 123, 123, 123, 122, - /* 1020 */ 122, 121, 121, 121, 120, 117, 452, 1121, 286, 286, - /* 1030 */ 1426, 456, 1532, 1217, 447, 286, 286, 1496, 1359, 313, - /* 1040 */ 482, 573, 1122, 458, 355, 499, 358, 1270, 573, 209, - /* 1050 */ 576, 422, 179, 576, 1035, 242, 389, 1123, 527, 123, - /* 1060 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117, - /* 1070 */ 452, 1024, 108, 72, 72, 1023, 13, 13, 919, 576, - /* 1080 */ 1502, 576, 286, 286, 98, 534, 1541, 456, 920, 1338, - /* 1090 */ 1333, 203, 415, 286, 286, 573, 152, 211, 1502, 1504, - /* 1100 */ 430, 573, 56, 56, 57, 57, 573, 1023, 1023, 1025, - /* 1110 */ 451, 576, 415, 535, 12, 297, 126, 127, 81, 1221, - /* 1120 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 1130 */ 125, 576, 415, 871, 15, 15, 126, 127, 81, 1221, - /* 1140 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 1150 */ 125, 377, 533, 264, 44, 44, 126, 115, 81, 1221, - /* 1160 */ 1221, 1058, 1061, 1048, 1048, 124, 124, 125, 125, 125, - /* 1170 */ 125, 1502, 482, 1275, 421, 123, 123, 123, 123, 122, - /* 1180 */ 122, 121, 121, 121, 120, 117, 452, 205, 1217, 499, - /* 1190 */ 434, 871, 472, 322, 499, 123, 123, 123, 123, 122, - /* 1200 */ 122, 121, 121, 121, 120, 117, 452, 576, 561, 1144, - /* 1210 */ 1646, 1426, 1646, 547, 576, 123, 123, 123, 123, 122, - /* 1220 */ 122, 121, 121, 121, 120, 117, 452, 576, 1426, 576, - /* 1230 */ 13, 13, 546, 323, 1329, 415, 338, 58, 58, 353, - /* 1240 */ 1426, 1174, 326, 286, 286, 553, 1217, 300, 899, 534, - /* 1250 */ 45, 45, 59, 59, 1144, 1647, 573, 1647, 569, 421, - /* 1260 */ 127, 81, 1221, 1221, 1058, 1061, 1048, 1048, 124, 124, - /* 1270 */ 125, 125, 125, 125, 1371, 377, 504, 290, 1197, 516, - /* 1280 */ 1370, 431, 398, 398, 397, 275, 395, 900, 1142, 856, - /* 1290 */ 482, 258, 1426, 1174, 467, 1163, 12, 335, 432, 337, - /* 1300 */ 1121, 464, 236, 258, 325, 464, 548, 1548, 1163, 1102, - /* 1310 */ 495, 1163, 324, 1102, 444, 1122, 339, 520, 123, 123, - /* 1320 */ 123, 123, 122, 122, 121, 121, 121, 120, 117, 452, - /* 1330 */ 1123, 318, 567, 1142, 576, 1197, 1198, 1197, 112, 568, - /* 1340 */ 201, 4, 238, 437, 939, 494, 285, 228, 1521, 938, - /* 1350 */ 170, 564, 576, 142, 1520, 571, 576, 60, 60, 576, - /* 1360 */ 420, 576, 445, 576, 539, 302, 879, 8, 491, 576, - /* 1370 */ 237, 576, 420, 576, 489, 61, 61, 576, 453, 62, - /* 1380 */ 62, 336, 63, 63, 46, 46, 47, 47, 365, 576, - /* 1390 */ 565, 576, 48, 48, 50, 50, 51, 51, 576, 295, - /* 1400 */ 64, 64, 486, 295, 543, 416, 475, 1035, 576, 542, - /* 1410 */ 318, 567, 65, 65, 66, 66, 413, 479, 576, 1035, - /* 1420 */ 576, 14, 14, 879, 1024, 110, 110, 413, 1023, 576, - /* 1430 */ 478, 67, 67, 111, 459, 453, 577, 453, 98, 317, - /* 1440 */ 1023, 132, 132, 133, 133, 576, 1565, 576, 978, 413, - /* 1450 */ 6, 1566, 68, 68, 1564, 6, 979, 576, 6, 1563, - /* 1460 */ 1023, 1023, 1025, 6, 350, 218, 101, 535, 53, 53, - /* 1470 */ 69, 69, 1023, 1023, 1025, 1026, 28, 1590, 1185, 455, - /* 1480 */ 70, 70, 290, 87, 215, 31, 1367, 398, 398, 397, - /* 1490 */ 275, 395, 354, 109, 856, 107, 576, 112, 568, 487, - /* 1500 */ 4, 1216, 576, 239, 153, 576, 39, 236, 1303, 325, - /* 1510 */ 112, 568, 1302, 4, 571, 576, 32, 324, 576, 54, - /* 1520 */ 54, 576, 1139, 357, 402, 165, 165, 571, 166, 166, - /* 1530 */ 576, 291, 359, 576, 17, 361, 576, 453, 77, 77, - /* 1540 */ 1317, 55, 55, 1301, 73, 73, 576, 238, 474, 565, - /* 1550 */ 453, 476, 368, 135, 135, 170, 74, 74, 142, 163, - /* 1560 */ 163, 378, 565, 543, 576, 321, 576, 890, 544, 137, - /* 1570 */ 137, 343, 1357, 426, 298, 237, 543, 576, 1035, 576, - /* 1580 */ 344, 542, 101, 373, 110, 110, 162, 131, 131, 164, - /* 1590 */ 164, 1035, 111, 372, 453, 577, 453, 110, 110, 1023, - /* 1600 */ 157, 157, 141, 141, 576, 111, 576, 453, 577, 453, - /* 1610 */ 416, 288, 1023, 576, 886, 318, 567, 576, 219, 576, - /* 1620 */ 241, 1016, 481, 263, 263, 898, 897, 140, 140, 138, - /* 1630 */ 138, 1023, 1023, 1025, 1026, 28, 139, 139, 529, 459, - /* 1640 */ 76, 76, 78, 78, 1023, 1023, 1025, 1026, 28, 1185, - /* 1650 */ 455, 576, 1087, 290, 112, 568, 1579, 4, 398, 398, - /* 1660 */ 397, 275, 395, 576, 1027, 856, 576, 483, 349, 263, - /* 1670 */ 101, 571, 886, 1380, 75, 75, 1425, 505, 236, 260, - /* 1680 */ 325, 112, 568, 363, 4, 101, 43, 43, 324, 49, - /* 1690 */ 49, 905, 906, 161, 453, 101, 981, 982, 571, 1083, - /* 1700 */ 1353, 260, 969, 936, 263, 114, 565, 1099, 521, 1099, - /* 1710 */ 1087, 1098, 869, 1098, 151, 937, 1148, 114, 238, 1365, - /* 1720 */ 562, 453, 1027, 563, 1430, 1282, 170, 1273, 1261, 142, - /* 1730 */ 1605, 1260, 1262, 565, 1598, 1035, 500, 278, 213, 1350, - /* 1740 */ 310, 110, 110, 943, 311, 312, 237, 11, 234, 111, - /* 1750 */ 221, 453, 577, 453, 293, 399, 1023, 1412, 341, 331, - /* 1760 */ 334, 342, 1035, 299, 347, 1417, 1416, 485, 110, 110, - /* 1770 */ 510, 406, 225, 1300, 206, 371, 111, 1362, 453, 577, - /* 1780 */ 453, 416, 1363, 1023, 1493, 1492, 318, 567, 1023, 1023, - /* 1790 */ 1025, 1026, 28, 566, 207, 220, 80, 568, 393, 4, - /* 1800 */ 1601, 1361, 556, 1360, 1237, 181, 267, 232, 1540, 1538, - /* 1810 */ 459, 1234, 424, 571, 82, 1023, 1023, 1025, 1026, 28, - /* 1820 */ 86, 217, 85, 1498, 190, 129, 1407, 554, 330, 175, - /* 1830 */ 36, 1413, 1400, 333, 183, 469, 453, 470, 185, 186, - /* 1840 */ 187, 503, 244, 188, 99, 1419, 1418, 37, 565, 1421, - /* 1850 */ 492, 194, 248, 404, 92, 498, 112, 568, 1509, 4, - /* 1860 */ 477, 352, 407, 279, 1487, 250, 198, 501, 356, 519, - /* 1870 */ 409, 251, 252, 571, 1263, 1320, 1319, 1035, 438, 1318, - /* 1880 */ 94, 890, 1311, 110, 110, 1290, 1310, 226, 1615, 442, - /* 1890 */ 1584, 111, 528, 453, 577, 453, 453, 443, 1023, 1614, - /* 1900 */ 265, 266, 410, 1289, 370, 1288, 1613, 308, 565, 309, - /* 1910 */ 446, 376, 1570, 1569, 10, 1474, 1385, 555, 387, 106, - /* 1920 */ 316, 1384, 100, 35, 538, 579, 1191, 274, 1343, 276, - /* 1930 */ 1023, 1023, 1025, 1026, 28, 1342, 277, 1035, 580, 1258, - /* 1940 */ 385, 212, 1253, 110, 110, 391, 167, 392, 168, 1525, - /* 1950 */ 417, 111, 180, 453, 577, 453, 1526, 843, 1023, 148, - /* 1960 */ 306, 1524, 1523, 454, 169, 222, 223, 214, 320, 79, - /* 1970 */ 233, 1097, 145, 1095, 328, 182, 171, 1216, 240, 922, - /* 1980 */ 184, 340, 1111, 243, 189, 172, 173, 427, 429, 88, - /* 1990 */ 1023, 1023, 1025, 1026, 28, 89, 191, 418, 174, 90, - /* 2000 */ 91, 1114, 245, 246, 1110, 159, 18, 1103, 247, 351, - /* 2010 */ 263, 195, 1231, 497, 249, 196, 38, 858, 502, 372, - /* 2020 */ 253, 364, 888, 197, 506, 511, 93, 19, 20, 514, - /* 2030 */ 901, 367, 95, 307, 160, 96, 522, 97, 1179, 1064, - /* 2040 */ 1150, 40, 227, 21, 282, 176, 1149, 284, 973, 200, - /* 2050 */ 967, 114, 262, 1169, 22, 1165, 23, 24, 1173, 25, - /* 2060 */ 1167, 1154, 1172, 26, 34, 550, 27, 103, 204, 101, - /* 2070 */ 104, 1078, 7, 1065, 1063, 1067, 1120, 1068, 1119, 268, - /* 2080 */ 269, 29, 41, 1187, 1028, 870, 113, 30, 572, 396, - /* 2090 */ 1186, 144, 178, 1249, 1249, 1249, 932, 1249, 1249, 1249, - /* 2100 */ 1249, 1249, 1249, 270, 1249, 271, 1249, 1249, 1249, 1249, - /* 2110 */ 1249, 1606, + /* 0 */ 580, 128, 125, 232, 1626, 553, 580, 1294, 1285, 580, + /* 10 */ 328, 580, 1304, 212, 580, 128, 125, 232, 582, 416, + /* 20 */ 582, 395, 1546, 51, 51, 527, 409, 1297, 533, 51, + /* 30 */ 51, 987, 51, 51, 81, 81, 1111, 61, 61, 988, + /* 40 */ 1111, 1296, 384, 135, 136, 90, 1232, 1232, 1067, 1070, + /* 50 */ 1057, 1057, 133, 133, 134, 134, 134, 134, 1581, 416, + /* 60 */ 287, 287, 7, 287, 287, 426, 1054, 1054, 1068, 1071, + /* 70 */ 289, 560, 496, 577, 528, 565, 577, 501, 565, 486, + /* 80 */ 534, 262, 229, 135, 136, 90, 1232, 1232, 1067, 1070, + /* 90 */ 1057, 1057, 133, 133, 134, 134, 134, 134, 128, 125, + /* 100 */ 232, 1510, 132, 132, 132, 132, 131, 131, 130, 130, + /* 110 */ 130, 129, 126, 454, 1208, 1259, 1, 1, 586, 2, + /* 120 */ 1263, 1575, 424, 1586, 383, 320, 1178, 153, 1178, 1588, + /* 130 */ 416, 382, 1586, 547, 1345, 330, 111, 574, 574, 574, + /* 140 */ 293, 1058, 132, 132, 132, 132, 131, 131, 130, 130, + /* 150 */ 130, 129, 126, 454, 135, 136, 90, 1232, 1232, 1067, + /* 160 */ 1070, 1057, 1057, 133, 133, 134, 134, 134, 134, 287, + /* 170 */ 287, 1208, 1209, 1208, 255, 287, 287, 514, 511, 510, + /* 180 */ 137, 459, 577, 212, 565, 451, 450, 509, 577, 1620, + /* 190 */ 565, 134, 134, 134, 134, 127, 404, 243, 132, 132, + /* 200 */ 132, 132, 131, 131, 130, 130, 130, 129, 126, 454, + /* 210 */ 282, 475, 349, 132, 132, 132, 132, 131, 131, 130, + /* 220 */ 130, 130, 129, 126, 454, 578, 155, 940, 940, 458, + /* 230 */ 227, 525, 1240, 416, 1240, 134, 134, 134, 134, 132, + /* 240 */ 132, 132, 132, 131, 131, 130, 130, 130, 129, 126, + /* 250 */ 454, 130, 130, 130, 129, 126, 454, 135, 136, 90, + /* 260 */ 1232, 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, + /* 270 */ 134, 134, 128, 125, 232, 454, 580, 416, 401, 1253, + /* 280 */ 180, 92, 93, 132, 132, 132, 132, 131, 131, 130, + /* 290 */ 130, 130, 129, 126, 454, 385, 391, 1208, 387, 81, + /* 300 */ 81, 135, 136, 90, 1232, 1232, 1067, 1070, 1057, 1057, + /* 310 */ 133, 133, 134, 134, 134, 134, 132, 132, 132, 132, + /* 320 */ 131, 131, 130, 130, 130, 129, 126, 454, 131, 131, + /* 330 */ 130, 130, 130, 129, 126, 454, 560, 1208, 302, 319, + /* 340 */ 571, 121, 572, 484, 4, 559, 1153, 1661, 1632, 1661, + /* 350 */ 45, 128, 125, 232, 1208, 1209, 1208, 1254, 575, 1173, + /* 360 */ 132, 132, 132, 132, 131, 131, 130, 130, 130, 129, + /* 370 */ 126, 454, 1173, 287, 287, 1173, 1023, 580, 426, 1023, + /* 380 */ 416, 455, 1606, 586, 2, 1263, 577, 44, 565, 95, + /* 390 */ 320, 110, 153, 569, 1208, 1209, 1208, 526, 526, 1345, + /* 400 */ 81, 81, 7, 44, 135, 136, 90, 1232, 1232, 1067, + /* 410 */ 1070, 1057, 1057, 133, 133, 134, 134, 134, 134, 295, + /* 420 */ 1153, 1662, 1044, 1662, 1208, 1151, 319, 571, 119, 119, + /* 430 */ 347, 470, 333, 347, 287, 287, 120, 560, 455, 581, + /* 440 */ 455, 1173, 1173, 1032, 319, 571, 442, 577, 210, 565, + /* 450 */ 1343, 1455, 550, 535, 1173, 1173, 1602, 1173, 1173, 420, + /* 460 */ 319, 571, 243, 132, 132, 132, 132, 131, 131, 130, + /* 470 */ 130, 130, 129, 126, 454, 1032, 1032, 1034, 1035, 35, + /* 480 */ 44, 1208, 1209, 1208, 476, 287, 287, 1332, 416, 1311, + /* 490 */ 376, 1599, 363, 225, 458, 1208, 195, 1332, 577, 1151, + /* 500 */ 565, 1337, 1337, 274, 580, 1192, 580, 344, 46, 196, + /* 510 */ 541, 217, 135, 136, 90, 1232, 1232, 1067, 1070, 1057, + /* 520 */ 1057, 133, 133, 134, 134, 134, 134, 19, 19, 19, + /* 530 */ 19, 416, 585, 1208, 1263, 515, 1208, 319, 571, 320, + /* 540 */ 948, 153, 429, 495, 434, 947, 1208, 492, 1345, 1454, + /* 550 */ 536, 1281, 1208, 1209, 1208, 135, 136, 90, 1232, 1232, + /* 560 */ 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, 134, + /* 570 */ 579, 132, 132, 132, 132, 131, 131, 130, 130, 130, + /* 580 */ 129, 126, 454, 287, 287, 532, 287, 287, 376, 1599, + /* 590 */ 1208, 1209, 1208, 1208, 1209, 1208, 577, 490, 565, 577, + /* 600 */ 893, 565, 416, 1208, 1209, 1208, 890, 40, 22, 22, + /* 610 */ 220, 243, 529, 1453, 132, 132, 132, 132, 131, 131, + /* 620 */ 130, 130, 130, 129, 126, 454, 135, 136, 90, 1232, + /* 630 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 640 */ 134, 416, 180, 458, 1208, 883, 255, 287, 287, 514, + /* 650 */ 511, 510, 376, 1599, 1572, 1335, 1335, 580, 893, 509, + /* 660 */ 577, 44, 565, 563, 1211, 135, 136, 90, 1232, 1232, + /* 670 */ 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, 134, + /* 680 */ 81, 81, 426, 580, 381, 132, 132, 132, 132, 131, + /* 690 */ 131, 130, 130, 130, 129, 126, 454, 297, 287, 287, + /* 700 */ 464, 1208, 1209, 1208, 1208, 538, 19, 19, 452, 452, + /* 710 */ 452, 577, 416, 565, 230, 440, 1191, 539, 319, 571, + /* 720 */ 367, 436, 1211, 1439, 132, 132, 132, 132, 131, 131, + /* 730 */ 130, 130, 130, 129, 126, 454, 135, 136, 90, 1232, + /* 740 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 750 */ 134, 416, 211, 953, 1173, 1045, 1114, 1114, 498, 551, + /* 760 */ 551, 1208, 1209, 1208, 7, 543, 1574, 1173, 380, 580, + /* 770 */ 1173, 5, 1208, 490, 3, 135, 136, 90, 1232, 1232, + /* 780 */ 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, 134, + /* 790 */ 580, 517, 19, 19, 431, 132, 132, 132, 132, 131, + /* 800 */ 131, 130, 130, 130, 129, 126, 454, 305, 1208, 437, + /* 810 */ 225, 1208, 389, 19, 19, 273, 290, 375, 520, 370, + /* 820 */ 519, 260, 416, 542, 1572, 553, 1028, 366, 441, 1208, + /* 830 */ 1209, 1208, 906, 1556, 132, 132, 132, 132, 131, 131, + /* 840 */ 130, 130, 130, 129, 126, 454, 135, 136, 90, 1232, + /* 850 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 860 */ 134, 416, 1439, 518, 1285, 1208, 1209, 1208, 1208, 1209, + /* 870 */ 1208, 907, 48, 346, 1572, 1572, 1283, 1631, 1572, 915, + /* 880 */ 580, 129, 126, 454, 110, 135, 136, 90, 1232, 1232, + /* 890 */ 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, 134, + /* 900 */ 265, 580, 463, 19, 19, 132, 132, 132, 132, 131, + /* 910 */ 131, 130, 130, 130, 129, 126, 454, 1349, 204, 580, + /* 920 */ 463, 462, 50, 47, 19, 19, 49, 438, 1109, 577, + /* 930 */ 501, 565, 416, 432, 108, 1228, 1573, 1558, 380, 205, + /* 940 */ 554, 554, 81, 81, 132, 132, 132, 132, 131, 131, + /* 950 */ 130, 130, 130, 129, 126, 454, 135, 136, 90, 1232, + /* 960 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 970 */ 134, 484, 580, 1208, 580, 1545, 416, 1439, 973, 315, + /* 980 */ 1663, 402, 284, 501, 973, 897, 1573, 1573, 380, 380, + /* 990 */ 1573, 465, 380, 1228, 463, 80, 80, 81, 81, 501, + /* 1000 */ 378, 114, 90, 1232, 1232, 1067, 1070, 1057, 1057, 133, + /* 1010 */ 133, 134, 134, 134, 134, 132, 132, 132, 132, 131, + /* 1020 */ 131, 130, 130, 130, 129, 126, 454, 1208, 1509, 580, + /* 1030 */ 1208, 1209, 1208, 1370, 316, 490, 281, 281, 501, 435, + /* 1040 */ 561, 288, 288, 406, 1344, 475, 349, 298, 433, 577, + /* 1050 */ 580, 565, 81, 81, 577, 378, 565, 975, 390, 132, + /* 1060 */ 132, 132, 132, 131, 131, 130, 130, 130, 129, 126, + /* 1070 */ 454, 231, 117, 81, 81, 287, 287, 231, 287, 287, + /* 1080 */ 580, 1515, 580, 1340, 1208, 1209, 1208, 139, 577, 560, + /* 1090 */ 565, 577, 416, 565, 445, 460, 973, 213, 562, 1515, + /* 1100 */ 1517, 1554, 973, 143, 143, 145, 145, 1372, 314, 482, + /* 1110 */ 448, 974, 416, 854, 855, 856, 135, 136, 90, 1232, + /* 1120 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 1130 */ 134, 361, 416, 401, 1152, 304, 135, 136, 90, 1232, + /* 1140 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 1150 */ 134, 1579, 323, 6, 866, 7, 135, 124, 90, 1232, + /* 1160 */ 1232, 1067, 1070, 1057, 1057, 133, 133, 134, 134, 134, + /* 1170 */ 134, 413, 412, 1515, 212, 132, 132, 132, 132, 131, + /* 1180 */ 131, 130, 130, 130, 129, 126, 454, 415, 118, 1208, + /* 1190 */ 116, 10, 356, 265, 359, 132, 132, 132, 132, 131, + /* 1200 */ 131, 130, 130, 130, 129, 126, 454, 580, 324, 306, + /* 1210 */ 580, 306, 1254, 473, 158, 132, 132, 132, 132, 131, + /* 1220 */ 131, 130, 130, 130, 129, 126, 454, 207, 1228, 1130, + /* 1230 */ 65, 65, 474, 66, 66, 416, 451, 450, 886, 535, + /* 1240 */ 339, 258, 257, 256, 1131, 1237, 1208, 1209, 1208, 327, + /* 1250 */ 1239, 878, 159, 580, 16, 484, 1089, 1044, 1238, 1132, + /* 1260 */ 136, 90, 1232, 1232, 1067, 1070, 1057, 1057, 133, 133, + /* 1270 */ 134, 134, 134, 134, 1033, 580, 81, 81, 1032, 1044, + /* 1280 */ 926, 580, 467, 1240, 580, 1240, 1228, 506, 107, 1439, + /* 1290 */ 927, 6, 580, 414, 1502, 886, 1033, 484, 21, 21, + /* 1300 */ 1032, 336, 1384, 338, 53, 53, 501, 81, 81, 878, + /* 1310 */ 1032, 1032, 1034, 449, 259, 19, 19, 537, 132, 132, + /* 1320 */ 132, 132, 131, 131, 130, 130, 130, 129, 126, 454, + /* 1330 */ 555, 301, 1032, 1032, 1034, 107, 536, 549, 121, 572, + /* 1340 */ 1192, 4, 1130, 1580, 453, 580, 466, 7, 1286, 422, + /* 1350 */ 466, 354, 1439, 580, 522, 575, 548, 1131, 121, 572, + /* 1360 */ 446, 4, 1192, 468, 537, 1184, 1227, 9, 67, 67, + /* 1370 */ 491, 580, 1132, 303, 414, 575, 54, 54, 455, 580, + /* 1380 */ 123, 948, 580, 421, 580, 337, 947, 1383, 580, 236, + /* 1390 */ 569, 580, 1578, 568, 68, 68, 7, 580, 455, 366, + /* 1400 */ 423, 182, 69, 69, 545, 70, 70, 71, 71, 544, + /* 1410 */ 569, 72, 72, 488, 55, 55, 477, 1184, 296, 1044, + /* 1420 */ 56, 56, 296, 497, 545, 119, 119, 414, 1577, 546, + /* 1430 */ 573, 422, 7, 120, 1248, 455, 581, 455, 469, 1044, + /* 1440 */ 1032, 580, 1561, 556, 480, 119, 119, 531, 259, 121, + /* 1450 */ 572, 240, 4, 120, 580, 455, 581, 455, 580, 481, + /* 1460 */ 1032, 580, 156, 580, 57, 57, 575, 580, 286, 229, + /* 1470 */ 414, 340, 1032, 1032, 1034, 1035, 35, 59, 59, 219, + /* 1480 */ 987, 60, 60, 220, 73, 73, 74, 74, 988, 455, + /* 1490 */ 75, 75, 1032, 1032, 1034, 1035, 35, 96, 216, 291, + /* 1500 */ 556, 569, 1192, 318, 399, 399, 398, 276, 396, 580, + /* 1510 */ 489, 863, 478, 1315, 414, 545, 580, 421, 1534, 1148, + /* 1520 */ 544, 403, 1192, 292, 237, 1157, 326, 38, 23, 580, + /* 1530 */ 1044, 580, 20, 20, 325, 299, 119, 119, 164, 76, + /* 1540 */ 76, 1533, 121, 572, 120, 4, 455, 581, 455, 203, + /* 1550 */ 580, 1032, 141, 141, 142, 142, 580, 322, 39, 575, + /* 1560 */ 345, 1025, 110, 264, 239, 905, 904, 427, 242, 912, + /* 1570 */ 913, 374, 173, 77, 77, 43, 483, 1314, 264, 62, + /* 1580 */ 62, 373, 455, 1032, 1032, 1034, 1035, 35, 1605, 1196, + /* 1590 */ 457, 1096, 238, 291, 569, 163, 1313, 110, 399, 399, + /* 1600 */ 398, 276, 396, 990, 991, 863, 485, 350, 264, 110, + /* 1610 */ 1036, 493, 580, 1192, 507, 1092, 261, 261, 237, 580, + /* 1620 */ 326, 121, 572, 1044, 4, 351, 1380, 417, 325, 119, + /* 1630 */ 119, 952, 319, 571, 355, 78, 78, 120, 575, 455, + /* 1640 */ 581, 455, 79, 79, 1032, 358, 360, 580, 364, 1096, + /* 1650 */ 110, 580, 978, 946, 264, 123, 461, 362, 239, 580, + /* 1660 */ 523, 455, 943, 1108, 123, 1108, 173, 580, 1036, 43, + /* 1670 */ 63, 63, 1328, 569, 168, 168, 1032, 1032, 1034, 1035, + /* 1680 */ 35, 580, 169, 169, 1312, 876, 238, 157, 1593, 580, + /* 1690 */ 86, 86, 369, 89, 572, 379, 4, 1107, 945, 1107, + /* 1700 */ 123, 580, 1044, 1393, 64, 64, 1192, 1438, 119, 119, + /* 1710 */ 575, 580, 82, 82, 567, 580, 120, 165, 455, 581, + /* 1720 */ 455, 417, 1366, 1032, 144, 144, 319, 571, 580, 1378, + /* 1730 */ 566, 502, 279, 455, 83, 83, 1443, 580, 166, 166, + /* 1740 */ 580, 1293, 558, 580, 1284, 569, 580, 12, 580, 1272, + /* 1750 */ 461, 146, 146, 1271, 580, 1032, 1032, 1034, 1035, 35, + /* 1760 */ 140, 140, 1273, 167, 167, 1613, 160, 160, 1363, 150, + /* 1770 */ 150, 149, 149, 311, 1044, 580, 312, 147, 147, 313, + /* 1780 */ 119, 119, 222, 235, 580, 1192, 400, 580, 120, 580, + /* 1790 */ 455, 581, 455, 1196, 457, 1032, 512, 291, 148, 148, + /* 1800 */ 1425, 1616, 399, 399, 398, 276, 396, 85, 85, 863, + /* 1810 */ 87, 87, 84, 84, 557, 580, 294, 580, 1430, 342, + /* 1820 */ 343, 1429, 237, 300, 326, 332, 335, 1032, 1032, 1034, + /* 1830 */ 1035, 35, 325, 348, 407, 487, 226, 1311, 52, 52, + /* 1840 */ 58, 58, 372, 1375, 1506, 570, 1505, 121, 572, 221, + /* 1850 */ 4, 208, 268, 209, 394, 1248, 1553, 1192, 1376, 1374, + /* 1860 */ 1373, 1551, 239, 184, 575, 233, 425, 1245, 95, 218, + /* 1870 */ 173, 1511, 193, 43, 91, 94, 138, 556, 1420, 178, + /* 1880 */ 186, 1426, 13, 331, 471, 1413, 334, 455, 188, 189, + /* 1890 */ 238, 190, 191, 472, 505, 245, 108, 494, 1434, 569, + /* 1900 */ 249, 197, 1432, 405, 479, 1431, 408, 14, 1500, 101, + /* 1910 */ 1522, 500, 201, 280, 251, 503, 357, 1274, 252, 410, + /* 1920 */ 253, 521, 1331, 353, 1330, 417, 1329, 439, 1044, 1322, + /* 1930 */ 319, 571, 103, 1630, 119, 119, 897, 1321, 1629, 227, + /* 1940 */ 443, 1301, 120, 411, 455, 581, 455, 1300, 371, 1032, + /* 1950 */ 1299, 530, 1628, 1598, 461, 444, 309, 310, 377, 266, + /* 1960 */ 267, 1584, 1583, 447, 11, 1398, 1397, 1487, 388, 115, + /* 1970 */ 317, 109, 540, 214, 1354, 386, 42, 393, 1353, 583, + /* 1980 */ 1202, 1032, 1032, 1034, 1035, 35, 392, 278, 275, 277, + /* 1990 */ 584, 1269, 1264, 170, 1538, 418, 419, 1539, 1537, 171, + /* 2000 */ 1536, 154, 307, 223, 183, 224, 850, 456, 172, 215, + /* 2010 */ 88, 1192, 234, 1106, 321, 152, 1104, 329, 185, 174, + /* 2020 */ 1227, 187, 241, 929, 244, 341, 1120, 192, 175, 176, + /* 2030 */ 428, 430, 97, 194, 177, 98, 99, 100, 1123, 246, + /* 2040 */ 1119, 247, 161, 24, 248, 352, 1112, 264, 198, 1242, + /* 2050 */ 250, 499, 199, 15, 865, 504, 373, 254, 508, 513, + /* 2060 */ 200, 516, 102, 25, 179, 365, 26, 895, 104, 368, + /* 2070 */ 162, 908, 105, 308, 524, 106, 1189, 1073, 1159, 17, + /* 2080 */ 228, 283, 27, 1158, 285, 263, 982, 202, 976, 123, + /* 2090 */ 28, 1179, 1183, 29, 30, 1175, 31, 1177, 1164, 41, + /* 2100 */ 32, 206, 552, 33, 110, 1182, 112, 8, 113, 1087, + /* 2110 */ 1074, 1072, 1076, 34, 1077, 564, 1129, 269, 1128, 270, + /* 2120 */ 36, 18, 1198, 1037, 877, 151, 122, 37, 397, 271, + /* 2130 */ 272, 576, 181, 1197, 1260, 939, 1260, 1621, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 193, 193, 193, 274, 275, 276, 193, 274, 275, 276, - /* 10 */ 193, 223, 219, 225, 206, 210, 211, 212, 193, 19, - /* 20 */ 219, 233, 216, 216, 217, 216, 217, 193, 295, 216, - /* 30 */ 217, 31, 193, 216, 217, 193, 228, 213, 230, 39, - /* 40 */ 206, 216, 217, 43, 44, 45, 46, 47, 48, 49, - /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 193, 19, - /* 60 */ 185, 186, 187, 188, 189, 190, 253, 274, 275, 276, - /* 70 */ 195, 193, 197, 193, 261, 274, 275, 276, 253, 204, - /* 80 */ 238, 204, 81, 43, 44, 45, 46, 47, 48, 49, - /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 274, 275, - /* 100 */ 276, 262, 102, 103, 104, 105, 106, 107, 108, 109, - /* 110 */ 110, 111, 112, 113, 239, 240, 239, 240, 210, 211, - /* 120 */ 212, 314, 315, 314, 59, 316, 86, 252, 88, 252, - /* 130 */ 19, 314, 315, 256, 257, 113, 25, 72, 296, 138, - /* 140 */ 139, 266, 102, 103, 104, 105, 106, 107, 108, 109, + /* 0 */ 194, 276, 277, 278, 216, 194, 194, 217, 194, 194, + /* 10 */ 194, 194, 224, 194, 194, 276, 277, 278, 204, 19, + /* 20 */ 206, 202, 297, 217, 218, 205, 207, 217, 205, 217, + /* 30 */ 218, 31, 217, 218, 217, 218, 29, 217, 218, 39, + /* 40 */ 33, 217, 220, 43, 44, 45, 46, 47, 48, 49, + /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 312, 19, + /* 60 */ 240, 241, 316, 240, 241, 194, 46, 47, 48, 49, + /* 70 */ 22, 254, 65, 253, 254, 255, 253, 194, 255, 194, + /* 80 */ 263, 258, 259, 43, 44, 45, 46, 47, 48, 49, + /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 276, 277, + /* 100 */ 278, 285, 102, 103, 104, 105, 106, 107, 108, 109, + /* 110 */ 110, 111, 112, 113, 59, 186, 187, 188, 189, 190, + /* 120 */ 191, 310, 239, 317, 318, 196, 86, 198, 88, 317, + /* 130 */ 19, 319, 317, 318, 205, 264, 25, 211, 212, 213, + /* 140 */ 205, 121, 102, 103, 104, 105, 106, 107, 108, 109, /* 150 */ 110, 111, 112, 113, 43, 44, 45, 46, 47, 48, - /* 160 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 81, - /* 170 */ 292, 59, 292, 298, 108, 109, 110, 111, 112, 113, - /* 180 */ 69, 116, 117, 118, 72, 106, 107, 193, 111, 112, - /* 190 */ 113, 54, 55, 56, 57, 58, 102, 103, 104, 105, - /* 200 */ 106, 107, 108, 109, 110, 111, 112, 113, 120, 25, - /* 210 */ 216, 217, 145, 102, 103, 104, 105, 106, 107, 108, - /* 220 */ 109, 110, 111, 112, 113, 231, 138, 139, 116, 117, - /* 230 */ 118, 164, 153, 19, 155, 54, 55, 56, 57, 102, + /* 160 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 240, + /* 170 */ 241, 116, 117, 118, 119, 240, 241, 122, 123, 124, + /* 180 */ 69, 298, 253, 194, 255, 106, 107, 132, 253, 141, + /* 190 */ 255, 54, 55, 56, 57, 58, 207, 268, 102, 103, + /* 200 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + /* 210 */ 214, 128, 129, 102, 103, 104, 105, 106, 107, 108, + /* 220 */ 109, 110, 111, 112, 113, 134, 25, 136, 137, 300, + /* 230 */ 165, 166, 153, 19, 155, 54, 55, 56, 57, 102, /* 240 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 250 */ 113, 128, 129, 46, 47, 48, 49, 43, 44, 45, + /* 250 */ 113, 108, 109, 110, 111, 112, 113, 43, 44, 45, /* 260 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - /* 270 */ 56, 57, 216, 193, 25, 59, 193, 19, 165, 166, - /* 280 */ 193, 67, 24, 102, 103, 104, 105, 106, 107, 108, - /* 290 */ 109, 110, 111, 112, 113, 73, 216, 217, 59, 216, - /* 300 */ 217, 43, 44, 45, 46, 47, 48, 49, 50, 51, + /* 270 */ 56, 57, 276, 277, 278, 113, 194, 19, 22, 23, + /* 280 */ 194, 67, 24, 102, 103, 104, 105, 106, 107, 108, + /* 290 */ 109, 110, 111, 112, 113, 220, 250, 59, 252, 217, + /* 300 */ 218, 43, 44, 45, 46, 47, 48, 49, 50, 51, /* 310 */ 52, 53, 54, 55, 56, 57, 102, 103, 104, 105, - /* 320 */ 106, 107, 108, 109, 110, 111, 112, 113, 121, 145, - /* 330 */ 59, 193, 116, 117, 118, 119, 273, 204, 122, 123, - /* 340 */ 124, 19, 20, 134, 22, 136, 137, 19, 132, 127, - /* 350 */ 128, 129, 24, 22, 23, 116, 117, 118, 36, 193, + /* 320 */ 106, 107, 108, 109, 110, 111, 112, 113, 106, 107, + /* 330 */ 108, 109, 110, 111, 112, 113, 254, 59, 205, 138, + /* 340 */ 139, 19, 20, 194, 22, 263, 22, 23, 231, 25, + /* 350 */ 72, 276, 277, 278, 116, 117, 118, 101, 36, 76, /* 360 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 370 */ 112, 113, 239, 240, 311, 312, 215, 106, 107, 241, - /* 380 */ 19, 59, 216, 217, 223, 252, 115, 116, 117, 118, - /* 390 */ 151, 120, 26, 71, 193, 308, 309, 193, 149, 128, - /* 400 */ 313, 216, 269, 81, 43, 44, 45, 46, 47, 48, - /* 410 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 253, - /* 420 */ 216, 217, 100, 95, 153, 59, 155, 261, 106, 107, - /* 430 */ 25, 193, 101, 193, 193, 231, 114, 25, 116, 117, - /* 440 */ 118, 113, 304, 121, 193, 204, 59, 119, 120, 121, - /* 450 */ 122, 123, 124, 125, 216, 217, 193, 216, 217, 131, - /* 460 */ 138, 139, 230, 102, 103, 104, 105, 106, 107, 108, + /* 370 */ 112, 113, 89, 240, 241, 92, 73, 194, 194, 73, + /* 380 */ 19, 59, 188, 189, 190, 191, 253, 81, 255, 151, + /* 390 */ 196, 25, 198, 71, 116, 117, 118, 311, 312, 205, + /* 400 */ 217, 218, 316, 81, 43, 44, 45, 46, 47, 48, + /* 410 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 270, + /* 420 */ 22, 23, 100, 25, 59, 101, 138, 139, 106, 107, + /* 430 */ 127, 128, 129, 127, 240, 241, 114, 254, 116, 117, + /* 440 */ 118, 76, 76, 121, 138, 139, 263, 253, 264, 255, + /* 450 */ 205, 275, 87, 19, 89, 89, 194, 92, 92, 199, + /* 460 */ 138, 139, 268, 102, 103, 104, 105, 106, 107, 108, /* 470 */ 109, 110, 111, 112, 113, 153, 154, 155, 156, 157, - /* 480 */ 239, 240, 116, 117, 118, 76, 193, 23, 19, 25, - /* 490 */ 22, 253, 23, 252, 253, 108, 87, 204, 89, 261, - /* 500 */ 198, 92, 261, 116, 117, 118, 193, 306, 307, 216, - /* 510 */ 217, 150, 43, 44, 45, 46, 47, 48, 49, 50, - /* 520 */ 51, 52, 53, 54, 55, 56, 57, 59, 193, 216, - /* 530 */ 217, 19, 239, 240, 283, 23, 106, 107, 108, 109, - /* 540 */ 110, 111, 112, 113, 73, 252, 253, 142, 308, 309, - /* 550 */ 138, 139, 81, 313, 145, 43, 44, 45, 46, 47, + /* 480 */ 81, 116, 117, 118, 129, 240, 241, 224, 19, 226, + /* 490 */ 314, 315, 23, 25, 300, 59, 22, 234, 253, 101, + /* 500 */ 255, 236, 237, 26, 194, 183, 194, 152, 72, 22, + /* 510 */ 145, 150, 43, 44, 45, 46, 47, 48, 49, 50, + /* 520 */ 51, 52, 53, 54, 55, 56, 57, 217, 218, 217, + /* 530 */ 218, 19, 189, 59, 191, 23, 59, 138, 139, 196, + /* 540 */ 135, 198, 232, 283, 232, 140, 59, 287, 205, 275, + /* 550 */ 116, 205, 116, 117, 118, 43, 44, 45, 46, 47, /* 560 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 570 */ 307, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 580 */ 111, 112, 113, 281, 116, 117, 118, 285, 23, 193, - /* 590 */ 25, 119, 59, 193, 122, 123, 124, 59, 127, 203, - /* 600 */ 59, 205, 19, 268, 132, 25, 23, 22, 193, 138, - /* 610 */ 139, 249, 204, 251, 102, 103, 104, 105, 106, 107, + /* 570 */ 194, 102, 103, 104, 105, 106, 107, 108, 109, 110, + /* 580 */ 111, 112, 113, 240, 241, 194, 240, 241, 314, 315, + /* 590 */ 116, 117, 118, 116, 117, 118, 253, 194, 255, 253, + /* 600 */ 59, 255, 19, 116, 117, 118, 23, 22, 217, 218, + /* 610 */ 142, 268, 205, 275, 102, 103, 104, 105, 106, 107, /* 620 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 630 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 640 */ 57, 19, 22, 23, 59, 23, 25, 239, 240, 116, - /* 650 */ 117, 118, 193, 11, 116, 117, 118, 116, 117, 118, - /* 660 */ 252, 269, 22, 193, 15, 43, 44, 45, 46, 47, + /* 640 */ 57, 19, 194, 300, 59, 23, 119, 240, 241, 122, + /* 650 */ 123, 124, 314, 315, 194, 236, 237, 194, 117, 132, + /* 660 */ 253, 81, 255, 205, 59, 43, 44, 45, 46, 47, /* 670 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 680 */ 273, 143, 193, 118, 143, 102, 103, 104, 105, 106, - /* 690 */ 107, 108, 109, 110, 111, 112, 113, 76, 118, 59, - /* 700 */ 241, 116, 117, 118, 304, 216, 217, 292, 143, 60, - /* 710 */ 89, 241, 19, 92, 193, 193, 23, 22, 311, 312, - /* 720 */ 231, 101, 22, 143, 102, 103, 104, 105, 106, 107, + /* 680 */ 217, 218, 194, 194, 194, 102, 103, 104, 105, 106, + /* 690 */ 107, 108, 109, 110, 111, 112, 113, 294, 240, 241, + /* 700 */ 120, 116, 117, 118, 59, 194, 217, 218, 211, 212, + /* 710 */ 213, 253, 19, 255, 194, 19, 23, 254, 138, 139, + /* 720 */ 24, 232, 117, 194, 102, 103, 104, 105, 106, 107, /* 730 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 740 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 750 */ 57, 19, 193, 193, 59, 23, 116, 117, 118, 59, - /* 760 */ 201, 21, 241, 304, 193, 206, 127, 128, 129, 193, - /* 770 */ 128, 129, 235, 236, 304, 43, 44, 45, 46, 47, + /* 750 */ 57, 19, 264, 108, 76, 23, 127, 128, 129, 311, + /* 760 */ 312, 116, 117, 118, 316, 87, 306, 89, 308, 194, + /* 770 */ 92, 22, 59, 194, 22, 43, 44, 45, 46, 47, /* 780 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 790 */ 22, 193, 216, 217, 193, 102, 103, 104, 105, 106, - /* 800 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 193, - /* 810 */ 193, 116, 117, 118, 216, 217, 116, 117, 118, 226, - /* 820 */ 80, 193, 19, 235, 236, 304, 23, 211, 212, 231, - /* 830 */ 204, 216, 217, 205, 102, 103, 104, 105, 106, 107, + /* 790 */ 194, 95, 217, 218, 265, 102, 103, 104, 105, 106, + /* 800 */ 107, 108, 109, 110, 111, 112, 113, 232, 59, 113, + /* 810 */ 25, 59, 194, 217, 218, 119, 120, 121, 122, 123, + /* 820 */ 124, 125, 19, 145, 194, 194, 23, 131, 232, 116, + /* 830 */ 117, 118, 35, 194, 102, 103, 104, 105, 106, 107, /* 840 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 850 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 860 */ 57, 19, 193, 123, 76, 239, 240, 193, 253, 239, - /* 870 */ 240, 239, 240, 244, 106, 107, 193, 89, 252, 193, - /* 880 */ 92, 59, 252, 254, 252, 43, 44, 45, 46, 47, + /* 860 */ 57, 19, 194, 66, 194, 116, 117, 118, 116, 117, + /* 870 */ 118, 74, 242, 294, 194, 194, 206, 23, 194, 25, + /* 880 */ 194, 111, 112, 113, 25, 43, 44, 45, 46, 47, /* 890 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 900 */ 284, 161, 216, 217, 193, 102, 103, 104, 105, 106, - /* 910 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 244, - /* 920 */ 187, 188, 189, 190, 7, 8, 9, 309, 195, 254, - /* 930 */ 197, 313, 19, 127, 128, 129, 262, 204, 22, 117, - /* 940 */ 24, 216, 217, 273, 102, 103, 104, 105, 106, 107, + /* 900 */ 24, 194, 194, 217, 218, 102, 103, 104, 105, 106, + /* 910 */ 107, 108, 109, 110, 111, 112, 113, 241, 232, 194, + /* 920 */ 212, 213, 242, 242, 217, 218, 242, 130, 11, 253, + /* 930 */ 194, 255, 19, 265, 149, 59, 306, 194, 308, 232, + /* 940 */ 309, 310, 217, 218, 102, 103, 104, 105, 106, 107, /* 950 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 960 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 970 */ 57, 193, 239, 240, 193, 59, 19, 188, 253, 190, - /* 980 */ 193, 311, 312, 16, 195, 252, 197, 193, 19, 301, - /* 990 */ 302, 135, 193, 204, 216, 217, 140, 216, 217, 266, - /* 1000 */ 204, 159, 45, 46, 47, 48, 49, 50, 51, 52, + /* 970 */ 57, 194, 194, 59, 194, 239, 19, 194, 25, 254, + /* 980 */ 303, 304, 23, 194, 25, 126, 306, 306, 308, 308, + /* 990 */ 306, 271, 308, 117, 286, 217, 218, 217, 218, 194, + /* 1000 */ 194, 159, 45, 46, 47, 48, 49, 50, 51, 52, /* 1010 */ 53, 54, 55, 56, 57, 102, 103, 104, 105, 106, - /* 1020 */ 107, 108, 109, 110, 111, 112, 113, 12, 239, 240, - /* 1030 */ 193, 298, 238, 117, 253, 239, 240, 238, 259, 260, - /* 1040 */ 193, 252, 27, 193, 77, 193, 79, 204, 252, 262, - /* 1050 */ 193, 299, 300, 193, 100, 266, 278, 42, 204, 102, + /* 1020 */ 107, 108, 109, 110, 111, 112, 113, 59, 239, 194, + /* 1030 */ 116, 117, 118, 260, 254, 194, 240, 241, 194, 233, + /* 1040 */ 205, 240, 241, 205, 239, 128, 129, 270, 265, 253, + /* 1050 */ 194, 255, 217, 218, 253, 194, 255, 143, 280, 102, /* 1060 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, - /* 1070 */ 113, 117, 159, 216, 217, 121, 216, 217, 63, 193, - /* 1080 */ 193, 193, 239, 240, 115, 116, 193, 298, 73, 240, - /* 1090 */ 238, 231, 19, 239, 240, 252, 22, 24, 211, 212, - /* 1100 */ 263, 252, 216, 217, 216, 217, 252, 153, 154, 155, - /* 1110 */ 253, 193, 19, 144, 213, 268, 43, 44, 45, 46, + /* 1070 */ 113, 118, 159, 217, 218, 240, 241, 118, 240, 241, + /* 1080 */ 194, 194, 194, 239, 116, 117, 118, 22, 253, 254, + /* 1090 */ 255, 253, 19, 255, 233, 194, 143, 24, 263, 212, + /* 1100 */ 213, 194, 143, 217, 218, 217, 218, 261, 262, 271, + /* 1110 */ 254, 143, 19, 7, 8, 9, 43, 44, 45, 46, /* 1120 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1130 */ 57, 193, 19, 59, 216, 217, 43, 44, 45, 46, + /* 1130 */ 57, 16, 19, 22, 23, 294, 43, 44, 45, 46, /* 1140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1150 */ 57, 193, 19, 24, 216, 217, 43, 44, 45, 46, + /* 1150 */ 57, 312, 194, 214, 21, 316, 43, 44, 45, 46, /* 1160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1170 */ 57, 284, 193, 208, 209, 102, 103, 104, 105, 106, - /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 286, 59, 193, - /* 1190 */ 232, 117, 291, 193, 193, 102, 103, 104, 105, 106, - /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 193, 204, 22, - /* 1210 */ 23, 193, 25, 66, 193, 102, 103, 104, 105, 106, - /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 193, 193, 193, - /* 1230 */ 216, 217, 85, 193, 238, 19, 16, 216, 217, 238, - /* 1240 */ 193, 94, 193, 239, 240, 231, 117, 268, 35, 116, - /* 1250 */ 216, 217, 216, 217, 22, 23, 252, 25, 208, 209, + /* 1170 */ 57, 106, 107, 286, 194, 102, 103, 104, 105, 106, + /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 207, 158, 59, + /* 1190 */ 160, 22, 77, 24, 79, 102, 103, 104, 105, 106, + /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 194, 194, 229, + /* 1210 */ 194, 231, 101, 80, 22, 102, 103, 104, 105, 106, + /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 288, 59, 12, + /* 1230 */ 217, 218, 293, 217, 218, 19, 106, 107, 59, 19, + /* 1240 */ 16, 127, 128, 129, 27, 115, 116, 117, 118, 194, + /* 1250 */ 120, 59, 22, 194, 24, 194, 123, 100, 128, 42, /* 1260 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 1270 */ 54, 55, 56, 57, 193, 193, 19, 5, 59, 66, - /* 1280 */ 193, 263, 10, 11, 12, 13, 14, 74, 101, 17, - /* 1290 */ 193, 46, 193, 146, 193, 76, 213, 77, 263, 79, - /* 1300 */ 12, 260, 30, 46, 32, 264, 87, 193, 89, 29, - /* 1310 */ 263, 92, 40, 33, 232, 27, 193, 108, 102, 103, + /* 1270 */ 54, 55, 56, 57, 117, 194, 217, 218, 121, 100, + /* 1280 */ 63, 194, 245, 153, 194, 155, 117, 19, 115, 194, + /* 1290 */ 73, 214, 194, 256, 161, 116, 117, 194, 217, 218, + /* 1300 */ 121, 77, 194, 79, 217, 218, 194, 217, 218, 117, + /* 1310 */ 153, 154, 155, 254, 46, 217, 218, 144, 102, 103, /* 1320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - /* 1330 */ 42, 138, 139, 101, 193, 116, 117, 118, 19, 20, - /* 1340 */ 255, 22, 70, 130, 135, 65, 256, 257, 193, 140, - /* 1350 */ 78, 63, 193, 81, 193, 36, 193, 216, 217, 193, - /* 1360 */ 115, 193, 263, 193, 145, 268, 59, 48, 193, 193, - /* 1370 */ 98, 193, 115, 193, 291, 216, 217, 193, 59, 216, - /* 1380 */ 217, 161, 216, 217, 216, 217, 216, 217, 131, 193, - /* 1390 */ 71, 193, 216, 217, 216, 217, 216, 217, 193, 260, - /* 1400 */ 216, 217, 19, 264, 85, 133, 244, 100, 193, 90, - /* 1410 */ 138, 139, 216, 217, 216, 217, 254, 244, 193, 100, - /* 1420 */ 193, 216, 217, 116, 117, 106, 107, 254, 121, 193, - /* 1430 */ 115, 216, 217, 114, 162, 116, 117, 118, 115, 244, - /* 1440 */ 121, 216, 217, 216, 217, 193, 309, 193, 31, 254, - /* 1450 */ 313, 309, 216, 217, 309, 313, 39, 193, 313, 309, - /* 1460 */ 153, 154, 155, 313, 193, 150, 25, 144, 216, 217, - /* 1470 */ 216, 217, 153, 154, 155, 156, 157, 0, 1, 2, - /* 1480 */ 216, 217, 5, 149, 150, 22, 193, 10, 11, 12, - /* 1490 */ 13, 14, 193, 158, 17, 160, 193, 19, 20, 116, - /* 1500 */ 22, 25, 193, 24, 22, 193, 24, 30, 226, 32, - /* 1510 */ 19, 20, 226, 22, 36, 193, 53, 40, 193, 216, - /* 1520 */ 217, 193, 23, 193, 25, 216, 217, 36, 216, 217, - /* 1530 */ 193, 99, 193, 193, 22, 193, 193, 59, 216, 217, - /* 1540 */ 193, 216, 217, 193, 216, 217, 193, 70, 129, 71, - /* 1550 */ 59, 129, 193, 216, 217, 78, 216, 217, 81, 216, - /* 1560 */ 217, 193, 71, 85, 193, 133, 193, 126, 90, 216, - /* 1570 */ 217, 152, 258, 61, 152, 98, 85, 193, 100, 193, - /* 1580 */ 23, 90, 25, 121, 106, 107, 23, 216, 217, 216, - /* 1590 */ 217, 100, 114, 131, 116, 117, 118, 106, 107, 121, - /* 1600 */ 216, 217, 216, 217, 193, 114, 193, 116, 117, 118, - /* 1610 */ 133, 22, 121, 193, 59, 138, 139, 193, 142, 193, - /* 1620 */ 141, 23, 23, 25, 25, 120, 121, 216, 217, 216, - /* 1630 */ 217, 153, 154, 155, 156, 157, 216, 217, 19, 162, - /* 1640 */ 216, 217, 216, 217, 153, 154, 155, 156, 157, 1, - /* 1650 */ 2, 193, 59, 5, 19, 20, 318, 22, 10, 11, - /* 1660 */ 12, 13, 14, 193, 59, 17, 193, 23, 23, 25, - /* 1670 */ 25, 36, 117, 193, 216, 217, 193, 23, 30, 25, - /* 1680 */ 32, 19, 20, 23, 22, 25, 216, 217, 40, 216, - /* 1690 */ 217, 7, 8, 23, 59, 25, 83, 84, 36, 23, - /* 1700 */ 193, 25, 23, 23, 25, 25, 71, 153, 145, 155, - /* 1710 */ 117, 153, 23, 155, 25, 23, 97, 25, 70, 193, - /* 1720 */ 193, 59, 117, 236, 193, 193, 78, 193, 193, 81, - /* 1730 */ 141, 193, 193, 71, 193, 100, 288, 287, 242, 255, - /* 1740 */ 255, 106, 107, 108, 255, 255, 98, 243, 297, 114, - /* 1750 */ 214, 116, 117, 118, 245, 191, 121, 271, 293, 267, - /* 1760 */ 267, 246, 100, 246, 245, 271, 271, 293, 106, 107, - /* 1770 */ 220, 271, 229, 225, 249, 219, 114, 259, 116, 117, - /* 1780 */ 118, 133, 259, 121, 219, 219, 138, 139, 153, 154, - /* 1790 */ 155, 156, 157, 280, 249, 243, 19, 20, 245, 22, - /* 1800 */ 196, 259, 140, 259, 60, 297, 141, 297, 200, 200, - /* 1810 */ 162, 38, 200, 36, 294, 153, 154, 155, 156, 157, - /* 1820 */ 151, 150, 294, 283, 22, 148, 250, 145, 249, 43, - /* 1830 */ 270, 272, 250, 249, 234, 18, 59, 200, 237, 237, - /* 1840 */ 237, 18, 199, 237, 149, 272, 272, 270, 71, 234, - /* 1850 */ 200, 234, 199, 246, 158, 62, 19, 20, 290, 22, - /* 1860 */ 246, 289, 246, 200, 246, 199, 22, 221, 200, 115, - /* 1870 */ 221, 199, 199, 36, 200, 218, 218, 100, 64, 218, - /* 1880 */ 22, 126, 227, 106, 107, 218, 227, 165, 224, 24, - /* 1890 */ 312, 114, 305, 116, 117, 118, 59, 113, 121, 224, - /* 1900 */ 200, 91, 221, 220, 218, 218, 218, 282, 71, 282, - /* 1910 */ 82, 221, 317, 317, 22, 277, 265, 140, 200, 158, - /* 1920 */ 279, 265, 147, 25, 146, 202, 13, 194, 250, 194, - /* 1930 */ 153, 154, 155, 156, 157, 250, 6, 100, 192, 192, - /* 1940 */ 249, 248, 192, 106, 107, 247, 207, 246, 207, 213, - /* 1950 */ 303, 114, 300, 116, 117, 118, 213, 4, 121, 222, - /* 1960 */ 222, 213, 213, 3, 207, 214, 214, 22, 163, 213, - /* 1970 */ 15, 23, 16, 23, 139, 151, 130, 25, 24, 20, - /* 1980 */ 142, 16, 1, 144, 142, 130, 130, 61, 37, 53, - /* 1990 */ 153, 154, 155, 156, 157, 53, 151, 303, 130, 53, - /* 2000 */ 53, 116, 34, 141, 1, 5, 22, 68, 115, 161, - /* 2010 */ 25, 68, 75, 41, 141, 115, 24, 20, 19, 131, - /* 2020 */ 125, 23, 59, 22, 67, 67, 22, 22, 22, 96, - /* 2030 */ 28, 24, 22, 67, 23, 149, 22, 25, 23, 23, - /* 2040 */ 23, 22, 141, 34, 23, 37, 97, 23, 116, 22, - /* 2050 */ 143, 25, 34, 75, 34, 88, 34, 34, 75, 34, - /* 2060 */ 86, 23, 93, 34, 22, 24, 34, 142, 25, 25, - /* 2070 */ 142, 23, 44, 23, 23, 23, 23, 11, 23, 25, - /* 2080 */ 22, 22, 22, 1, 23, 23, 22, 22, 25, 15, - /* 2090 */ 1, 23, 25, 319, 319, 319, 135, 319, 319, 319, - /* 2100 */ 319, 319, 319, 141, 319, 141, 319, 319, 319, 319, - /* 2110 */ 319, 141, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2120 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2130 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2140 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2150 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2160 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2170 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2180 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2190 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2200 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2210 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2220 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2230 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2240 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2250 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2260 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2270 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2280 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2290 */ 319, 319, 319, 319, 319, 319, 319, + /* 1330 */ 232, 270, 153, 154, 155, 115, 116, 66, 19, 20, + /* 1340 */ 183, 22, 12, 312, 254, 194, 262, 316, 209, 210, + /* 1350 */ 266, 239, 194, 194, 108, 36, 85, 27, 19, 20, + /* 1360 */ 265, 22, 183, 245, 144, 94, 25, 48, 217, 218, + /* 1370 */ 293, 194, 42, 270, 256, 36, 217, 218, 59, 194, + /* 1380 */ 25, 135, 194, 115, 194, 161, 140, 194, 194, 15, + /* 1390 */ 71, 194, 312, 63, 217, 218, 316, 194, 59, 131, + /* 1400 */ 301, 302, 217, 218, 85, 217, 218, 217, 218, 90, + /* 1410 */ 71, 217, 218, 19, 217, 218, 245, 146, 262, 100, + /* 1420 */ 217, 218, 266, 265, 85, 106, 107, 256, 312, 90, + /* 1430 */ 209, 210, 316, 114, 60, 116, 117, 118, 194, 100, + /* 1440 */ 121, 194, 194, 145, 115, 106, 107, 19, 46, 19, + /* 1450 */ 20, 24, 22, 114, 194, 116, 117, 118, 194, 245, + /* 1460 */ 121, 194, 164, 194, 217, 218, 36, 194, 258, 259, + /* 1470 */ 256, 194, 153, 154, 155, 156, 157, 217, 218, 150, + /* 1480 */ 31, 217, 218, 142, 217, 218, 217, 218, 39, 59, + /* 1490 */ 217, 218, 153, 154, 155, 156, 157, 149, 150, 5, + /* 1500 */ 145, 71, 183, 245, 10, 11, 12, 13, 14, 194, + /* 1510 */ 116, 17, 129, 227, 256, 85, 194, 115, 194, 23, + /* 1520 */ 90, 25, 183, 99, 30, 97, 32, 22, 22, 194, + /* 1530 */ 100, 194, 217, 218, 40, 152, 106, 107, 23, 217, + /* 1540 */ 218, 194, 19, 20, 114, 22, 116, 117, 118, 257, + /* 1550 */ 194, 121, 217, 218, 217, 218, 194, 133, 53, 36, + /* 1560 */ 23, 23, 25, 25, 70, 120, 121, 61, 141, 7, + /* 1570 */ 8, 121, 78, 217, 218, 81, 23, 227, 25, 217, + /* 1580 */ 218, 131, 59, 153, 154, 155, 156, 157, 0, 1, + /* 1590 */ 2, 59, 98, 5, 71, 23, 227, 25, 10, 11, + /* 1600 */ 12, 13, 14, 83, 84, 17, 23, 23, 25, 25, + /* 1610 */ 59, 194, 194, 183, 23, 23, 25, 25, 30, 194, + /* 1620 */ 32, 19, 20, 100, 22, 194, 194, 133, 40, 106, + /* 1630 */ 107, 108, 138, 139, 194, 217, 218, 114, 36, 116, + /* 1640 */ 117, 118, 217, 218, 121, 194, 194, 194, 23, 117, + /* 1650 */ 25, 194, 23, 23, 25, 25, 162, 194, 70, 194, + /* 1660 */ 145, 59, 23, 153, 25, 155, 78, 194, 117, 81, + /* 1670 */ 217, 218, 194, 71, 217, 218, 153, 154, 155, 156, + /* 1680 */ 157, 194, 217, 218, 194, 23, 98, 25, 321, 194, + /* 1690 */ 217, 218, 194, 19, 20, 194, 22, 153, 23, 155, + /* 1700 */ 25, 194, 100, 194, 217, 218, 183, 194, 106, 107, + /* 1710 */ 36, 194, 217, 218, 237, 194, 114, 243, 116, 117, + /* 1720 */ 118, 133, 194, 121, 217, 218, 138, 139, 194, 194, + /* 1730 */ 194, 290, 289, 59, 217, 218, 194, 194, 217, 218, + /* 1740 */ 194, 194, 140, 194, 194, 71, 194, 244, 194, 194, + /* 1750 */ 162, 217, 218, 194, 194, 153, 154, 155, 156, 157, + /* 1760 */ 217, 218, 194, 217, 218, 194, 217, 218, 257, 217, + /* 1770 */ 218, 217, 218, 257, 100, 194, 257, 217, 218, 257, + /* 1780 */ 106, 107, 215, 299, 194, 183, 192, 194, 114, 194, + /* 1790 */ 116, 117, 118, 1, 2, 121, 221, 5, 217, 218, + /* 1800 */ 273, 197, 10, 11, 12, 13, 14, 217, 218, 17, + /* 1810 */ 217, 218, 217, 218, 140, 194, 246, 194, 273, 295, + /* 1820 */ 247, 273, 30, 247, 32, 269, 269, 153, 154, 155, + /* 1830 */ 156, 157, 40, 246, 273, 295, 230, 226, 217, 218, + /* 1840 */ 217, 218, 220, 261, 220, 282, 220, 19, 20, 244, + /* 1850 */ 22, 250, 141, 250, 246, 60, 201, 183, 261, 261, + /* 1860 */ 261, 201, 70, 299, 36, 299, 201, 38, 151, 150, + /* 1870 */ 78, 285, 22, 81, 296, 296, 148, 145, 251, 43, + /* 1880 */ 235, 274, 272, 250, 18, 251, 250, 59, 238, 238, + /* 1890 */ 98, 238, 238, 201, 18, 200, 149, 201, 235, 71, + /* 1900 */ 200, 235, 274, 247, 247, 274, 247, 272, 247, 158, + /* 1910 */ 292, 62, 22, 201, 200, 222, 201, 201, 200, 222, + /* 1920 */ 200, 115, 219, 291, 219, 133, 219, 64, 100, 228, + /* 1930 */ 138, 139, 22, 225, 106, 107, 126, 228, 225, 165, + /* 1940 */ 24, 219, 114, 222, 116, 117, 118, 221, 219, 121, + /* 1950 */ 219, 307, 219, 315, 162, 113, 284, 284, 222, 201, + /* 1960 */ 91, 320, 320, 82, 22, 267, 267, 279, 201, 158, + /* 1970 */ 281, 147, 146, 249, 251, 250, 25, 247, 251, 203, + /* 1980 */ 13, 153, 154, 155, 156, 157, 248, 6, 195, 195, + /* 1990 */ 193, 193, 193, 208, 214, 305, 305, 214, 214, 208, + /* 2000 */ 214, 223, 223, 215, 302, 215, 4, 3, 208, 22, + /* 2010 */ 214, 183, 15, 23, 163, 16, 23, 139, 151, 130, + /* 2020 */ 25, 142, 24, 20, 144, 16, 1, 142, 130, 130, + /* 2030 */ 61, 37, 53, 151, 130, 53, 53, 53, 116, 34, + /* 2040 */ 1, 141, 5, 22, 115, 161, 68, 25, 68, 75, + /* 2050 */ 141, 41, 115, 24, 20, 19, 131, 125, 67, 67, + /* 2060 */ 22, 96, 22, 22, 37, 23, 22, 59, 22, 24, + /* 2070 */ 23, 28, 149, 67, 22, 25, 23, 23, 23, 22, + /* 2080 */ 141, 23, 34, 97, 23, 34, 116, 22, 143, 25, + /* 2090 */ 34, 75, 75, 34, 34, 88, 34, 86, 23, 22, + /* 2100 */ 34, 25, 24, 34, 25, 93, 142, 44, 142, 23, + /* 2110 */ 23, 23, 23, 22, 11, 25, 23, 25, 23, 22, + /* 2120 */ 22, 22, 1, 23, 23, 23, 22, 22, 15, 141, + /* 2130 */ 141, 25, 25, 1, 322, 135, 322, 141, 322, 322, + /* 2140 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2150 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2160 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2170 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2180 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2190 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2200 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2210 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2220 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2230 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2240 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2250 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2260 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2270 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2280 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2290 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2300 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2310 */ 322, 322, 322, 322, 322, 322, 322, 322, 322, 322, + /* 2320 */ 322, 322, 322, 322, }; -#define YY_SHIFT_COUNT (582) +#define YY_SHIFT_COUNT (586) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2089) +#define YY_SHIFT_MAX (2132) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 1648, 1477, 1272, 322, 322, 1, 1319, 1478, 1491, 1837, - /* 10 */ 1837, 1837, 471, 0, 0, 214, 1093, 1837, 1837, 1837, - /* 20 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 30 */ 1837, 271, 271, 1219, 1219, 216, 88, 1, 1, 1, - /* 40 */ 1, 1, 40, 111, 258, 361, 469, 512, 583, 622, - /* 50 */ 693, 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, - /* 60 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, - /* 70 */ 1093, 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635, - /* 80 */ 1662, 1777, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 90 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 100 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 110 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 120 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 130 */ 1837, 137, 181, 181, 181, 181, 181, 181, 181, 94, - /* 140 */ 430, 66, 65, 112, 366, 533, 533, 740, 1257, 533, - /* 150 */ 533, 79, 79, 533, 412, 412, 412, 77, 412, 123, - /* 160 */ 113, 113, 113, 22, 22, 2112, 2112, 328, 328, 328, - /* 170 */ 239, 468, 468, 468, 468, 1015, 1015, 409, 366, 1187, - /* 180 */ 1232, 533, 533, 533, 533, 533, 533, 533, 533, 533, - /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, - /* 200 */ 533, 969, 621, 621, 533, 642, 788, 788, 1133, 1133, - /* 210 */ 822, 822, 67, 1193, 2112, 2112, 2112, 2112, 2112, 2112, - /* 220 */ 2112, 1307, 954, 954, 585, 472, 640, 387, 695, 538, - /* 230 */ 541, 700, 533, 533, 533, 533, 533, 533, 533, 533, - /* 240 */ 533, 533, 222, 533, 533, 533, 533, 533, 533, 533, - /* 250 */ 533, 533, 533, 533, 533, 1213, 1213, 1213, 533, 533, - /* 260 */ 533, 565, 533, 533, 533, 916, 1147, 533, 533, 1288, - /* 270 */ 533, 533, 533, 533, 533, 533, 533, 533, 639, 1280, - /* 280 */ 209, 1129, 1129, 1129, 1129, 580, 209, 209, 1209, 768, - /* 290 */ 917, 649, 1315, 1334, 405, 1334, 1383, 249, 1315, 1315, - /* 300 */ 249, 1315, 405, 1383, 1441, 464, 1245, 1417, 1417, 1417, - /* 310 */ 1323, 1323, 1323, 1323, 184, 184, 1335, 1476, 856, 1482, - /* 320 */ 1744, 1744, 1665, 1665, 1773, 1773, 1665, 1669, 1671, 1802, - /* 330 */ 1677, 1682, 1786, 1677, 1682, 1817, 1817, 1817, 1817, 1665, - /* 340 */ 1823, 1695, 1671, 1671, 1695, 1802, 1786, 1695, 1786, 1695, - /* 350 */ 1665, 1823, 1696, 1793, 1665, 1823, 1844, 1665, 1823, 1665, - /* 360 */ 1823, 1844, 1754, 1754, 1754, 1814, 1858, 1858, 1844, 1754, - /* 370 */ 1755, 1754, 1814, 1754, 1754, 1722, 1865, 1784, 1784, 1844, - /* 380 */ 1665, 1810, 1810, 1828, 1828, 1677, 1682, 1892, 1665, 1761, - /* 390 */ 1677, 1775, 1778, 1695, 1898, 1913, 1913, 1930, 1930, 1930, - /* 400 */ 2112, 2112, 2112, 2112, 2112, 2112, 2112, 2112, 2112, 2112, - /* 410 */ 2112, 2112, 2112, 2112, 2112, 207, 1220, 331, 620, 967, - /* 420 */ 806, 1074, 1499, 1432, 1463, 1479, 1419, 1422, 1557, 1512, - /* 430 */ 1598, 1599, 1644, 1645, 1654, 1660, 1555, 1505, 1684, 1462, - /* 440 */ 1670, 1563, 1619, 1593, 1676, 1679, 1613, 1680, 1554, 1558, - /* 450 */ 1689, 1692, 1605, 1589, 1953, 1960, 1945, 1805, 1955, 1956, - /* 460 */ 1948, 1950, 1835, 1824, 1846, 1952, 1952, 1954, 1838, 1959, - /* 470 */ 1839, 1965, 1981, 1842, 1855, 1952, 1856, 1926, 1951, 1952, - /* 480 */ 1845, 1936, 1942, 1946, 1947, 1868, 1885, 1968, 1862, 2003, - /* 490 */ 2000, 1984, 1893, 1848, 1939, 1985, 1943, 1937, 1972, 1873, - /* 500 */ 1900, 1992, 1997, 1999, 1888, 1895, 2001, 1957, 2004, 2005, - /* 510 */ 1998, 2006, 1958, 1963, 2007, 1933, 2002, 2010, 1966, 2008, - /* 520 */ 2011, 2009, 1886, 2014, 2015, 2016, 2012, 2017, 2019, 1949, - /* 530 */ 1901, 2021, 2024, 1932, 2018, 2027, 1907, 2026, 2020, 2022, - /* 540 */ 2023, 2025, 1967, 1978, 1974, 2028, 1983, 1969, 2029, 2038, - /* 550 */ 2042, 2041, 2043, 2044, 2032, 1925, 1928, 2048, 2026, 2050, - /* 560 */ 2051, 2052, 2053, 2054, 2055, 2058, 2066, 2059, 2060, 2061, - /* 570 */ 2062, 2064, 2065, 2063, 1961, 1962, 1964, 1970, 2067, 2068, - /* 580 */ 2074, 2082, 2089, + /* 0 */ 1792, 1588, 1494, 322, 322, 399, 306, 1319, 1339, 1430, + /* 10 */ 1828, 1828, 1828, 580, 399, 399, 399, 399, 399, 0, + /* 20 */ 0, 214, 1093, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 30 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1130, 1130, + /* 40 */ 365, 365, 55, 278, 436, 713, 713, 201, 201, 201, + /* 50 */ 201, 40, 111, 258, 361, 469, 512, 583, 622, 693, + /* 60 */ 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, 1093, + /* 70 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, + /* 80 */ 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1523, 1602, + /* 90 */ 1674, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 100 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 110 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 120 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 130 */ 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, 1828, + /* 140 */ 137, 181, 181, 181, 181, 181, 181, 181, 96, 222, + /* 150 */ 143, 477, 713, 1133, 1268, 713, 713, 79, 79, 713, + /* 160 */ 770, 83, 65, 65, 65, 288, 162, 162, 2138, 2138, + /* 170 */ 696, 696, 696, 238, 474, 474, 474, 474, 1217, 1217, + /* 180 */ 678, 477, 324, 398, 713, 713, 713, 713, 713, 713, + /* 190 */ 713, 713, 713, 713, 713, 713, 713, 713, 713, 713, + /* 200 */ 713, 713, 713, 1220, 366, 366, 713, 917, 283, 283, + /* 210 */ 434, 434, 605, 605, 1298, 2138, 2138, 2138, 2138, 2138, + /* 220 */ 2138, 2138, 1179, 1157, 1157, 487, 527, 585, 645, 749, + /* 230 */ 914, 968, 752, 713, 713, 713, 713, 713, 713, 713, + /* 240 */ 713, 713, 713, 303, 713, 713, 713, 713, 713, 713, + /* 250 */ 713, 713, 713, 713, 713, 713, 797, 797, 797, 713, + /* 260 */ 713, 713, 959, 713, 713, 713, 1169, 1271, 713, 713, + /* 270 */ 1330, 713, 713, 713, 713, 713, 713, 713, 713, 629, + /* 280 */ 7, 91, 876, 876, 876, 876, 953, 91, 91, 1246, + /* 290 */ 1065, 1106, 1374, 1329, 1348, 468, 1348, 1394, 785, 1329, + /* 300 */ 1329, 785, 1329, 468, 1394, 859, 854, 1402, 1449, 1449, + /* 310 */ 1449, 1173, 1173, 1173, 1173, 1355, 1355, 1030, 1341, 405, + /* 320 */ 1230, 1795, 1795, 1711, 1711, 1829, 1829, 1711, 1717, 1719, + /* 330 */ 1850, 1728, 1732, 1836, 1728, 1732, 1866, 1866, 1866, 1866, + /* 340 */ 1711, 1876, 1747, 1719, 1719, 1747, 1850, 1836, 1747, 1836, + /* 350 */ 1747, 1711, 1876, 1751, 1849, 1711, 1876, 1890, 1711, 1876, + /* 360 */ 1711, 1876, 1890, 1806, 1806, 1806, 1863, 1910, 1910, 1890, + /* 370 */ 1806, 1810, 1806, 1863, 1806, 1806, 1774, 1916, 1842, 1842, + /* 380 */ 1890, 1711, 1869, 1869, 1881, 1881, 1728, 1732, 1942, 1711, + /* 390 */ 1811, 1728, 1824, 1826, 1747, 1951, 1967, 1967, 1981, 1981, + /* 400 */ 1981, 2138, 2138, 2138, 2138, 2138, 2138, 2138, 2138, 2138, + /* 410 */ 2138, 2138, 2138, 2138, 2138, 2138, 20, 1224, 256, 1111, + /* 420 */ 1115, 1114, 1192, 1496, 1424, 1505, 1427, 355, 1383, 1537, + /* 430 */ 1506, 1538, 1553, 1583, 1584, 1591, 1625, 541, 1445, 1562, + /* 440 */ 1450, 1572, 1515, 1428, 1532, 1592, 1629, 1520, 1630, 1639, + /* 450 */ 1510, 1544, 1662, 1675, 1551, 48, 2002, 2004, 1987, 1851, + /* 460 */ 1997, 1999, 1990, 1993, 1878, 1867, 1889, 1995, 1995, 1998, + /* 470 */ 1879, 2003, 1880, 2009, 2025, 1885, 1898, 1995, 1899, 1969, + /* 480 */ 1994, 1995, 1882, 1979, 1982, 1983, 1984, 1904, 1922, 2005, + /* 490 */ 1900, 2039, 2037, 2021, 1929, 1884, 1978, 2022, 1980, 1974, + /* 500 */ 2010, 1909, 1937, 2029, 2034, 2036, 1925, 1932, 2038, 1991, + /* 510 */ 2040, 2041, 2042, 2044, 1992, 2008, 2045, 1965, 2043, 2046, + /* 520 */ 2006, 2027, 2047, 2048, 1923, 2052, 2053, 2054, 2050, 2055, + /* 530 */ 2057, 1986, 1939, 2058, 2061, 1970, 2051, 2065, 1945, 2064, + /* 540 */ 2056, 2059, 2060, 2062, 2007, 2016, 2011, 2063, 2017, 2012, + /* 550 */ 2066, 2075, 2077, 2078, 2076, 2079, 2069, 1964, 1966, 2086, + /* 560 */ 2064, 2087, 2088, 2089, 2091, 2090, 2093, 2092, 2095, 2097, + /* 570 */ 2103, 2098, 2099, 2100, 2101, 2104, 2105, 2106, 2000, 1988, + /* 580 */ 1989, 1996, 2107, 2102, 2113, 2121, 2132, }; -#define YY_REDUCE_COUNT (414) -#define YY_REDUCE_MIN (-271) -#define YY_REDUCE_MAX (1757) +#define YY_REDUCE_COUNT (415) +#define YY_REDUCE_MIN (-275) +#define YY_REDUCE_MAX (1800) static const short yy_reduce_ofst[] = { - /* 0 */ -125, 733, 789, 241, 293, -123, -193, -191, -183, -187, - /* 10 */ 166, 238, 133, -207, -199, -267, -176, -6, 204, 489, - /* 20 */ 576, 598, -175, 686, 860, 615, 725, 1014, 778, 781, - /* 30 */ 857, 616, 887, 87, 240, -192, 408, 626, 796, 843, - /* 40 */ 854, 1004, -271, -271, -271, -271, -271, -271, -271, -271, - /* 50 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 60 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 70 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, 80, - /* 80 */ 83, 313, 886, 888, 918, 938, 1021, 1034, 1036, 1141, - /* 90 */ 1159, 1163, 1166, 1168, 1170, 1176, 1178, 1180, 1184, 1196, - /* 100 */ 1198, 1205, 1215, 1225, 1227, 1236, 1252, 1254, 1264, 1303, - /* 110 */ 1309, 1312, 1322, 1325, 1328, 1337, 1340, 1343, 1353, 1371, - /* 120 */ 1373, 1384, 1386, 1411, 1413, 1420, 1424, 1426, 1458, 1470, - /* 130 */ 1473, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 140 */ -271, -271, 138, 459, 396, -158, 470, 302, -212, 521, - /* 150 */ 201, -195, -92, 559, 630, 632, 630, -271, 632, 901, - /* 160 */ 63, 407, 670, -271, -271, -271, -271, 161, 161, 161, - /* 170 */ 251, 335, 847, 979, 1097, 537, 588, 618, 628, 688, - /* 180 */ 688, -166, -161, 674, 787, 794, 799, 852, 996, -122, - /* 190 */ 837, -120, 1018, 1035, 415, 1047, 1001, 958, 1082, 400, - /* 200 */ 1099, 779, 1137, 1142, 263, 1083, 1145, 1150, 1041, 1139, - /* 210 */ 965, 1050, 362, 849, 752, 629, 675, 1162, 1173, 1090, - /* 220 */ 1195, -194, 56, 185, -135, 232, 522, 560, 571, 601, - /* 230 */ 617, 669, 683, 711, 850, 893, 1000, 1040, 1049, 1081, - /* 240 */ 1087, 1101, 392, 1114, 1123, 1155, 1161, 1175, 1271, 1293, - /* 250 */ 1299, 1330, 1339, 1342, 1347, 593, 1282, 1286, 1350, 1359, - /* 260 */ 1368, 1314, 1480, 1483, 1507, 1085, 1338, 1526, 1527, 1487, - /* 270 */ 1531, 560, 1532, 1534, 1535, 1538, 1539, 1541, 1448, 1450, - /* 280 */ 1496, 1484, 1485, 1489, 1490, 1314, 1496, 1496, 1504, 1536, - /* 290 */ 1564, 1451, 1486, 1492, 1509, 1493, 1465, 1515, 1494, 1495, - /* 300 */ 1517, 1500, 1519, 1474, 1550, 1543, 1548, 1556, 1565, 1566, - /* 310 */ 1518, 1523, 1542, 1544, 1525, 1545, 1513, 1553, 1552, 1604, - /* 320 */ 1508, 1510, 1608, 1609, 1520, 1528, 1612, 1540, 1559, 1560, - /* 330 */ 1576, 1579, 1600, 1582, 1584, 1601, 1602, 1603, 1606, 1637, - /* 340 */ 1643, 1607, 1573, 1574, 1614, 1577, 1615, 1616, 1617, 1618, - /* 350 */ 1650, 1653, 1568, 1572, 1663, 1666, 1646, 1668, 1672, 1674, - /* 360 */ 1673, 1649, 1657, 1658, 1661, 1655, 1664, 1675, 1681, 1667, - /* 370 */ 1683, 1686, 1659, 1687, 1688, 1578, 1587, 1625, 1627, 1690, - /* 380 */ 1700, 1595, 1596, 1651, 1656, 1678, 1691, 1638, 1718, 1641, - /* 390 */ 1685, 1693, 1698, 1701, 1723, 1733, 1735, 1746, 1747, 1750, - /* 400 */ 1647, 1694, 1652, 1739, 1736, 1743, 1748, 1749, 1741, 1737, - /* 410 */ 1738, 1751, 1752, 1756, 1757, + /* 0 */ -71, 194, 343, 835, -180, -177, 838, -194, -188, -185, + /* 10 */ -183, 82, 183, -65, 133, 245, 346, 407, 458, -178, + /* 20 */ 75, -275, -4, 310, 312, 489, 575, 596, 463, 686, + /* 30 */ 707, 725, 780, 1098, 856, 778, 1059, 1090, 708, 887, + /* 40 */ 86, 448, 980, 630, 680, 681, 684, 796, 801, 796, + /* 50 */ 801, -261, -261, -261, -261, -261, -261, -261, -261, -261, + /* 60 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, + /* 70 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, + /* 80 */ -261, -261, -261, -261, -261, -261, -261, -261, 391, 886, + /* 90 */ 888, 1013, 1016, 1081, 1087, 1151, 1159, 1177, 1185, 1188, + /* 100 */ 1190, 1194, 1197, 1203, 1247, 1260, 1264, 1267, 1269, 1273, + /* 110 */ 1315, 1322, 1335, 1337, 1356, 1362, 1418, 1425, 1453, 1457, + /* 120 */ 1465, 1473, 1487, 1495, 1507, 1517, 1521, 1534, 1543, 1546, + /* 130 */ 1549, 1552, 1554, 1560, 1581, 1590, 1593, 1595, 1621, 1623, + /* 140 */ -261, -261, -261, -261, -261, -261, -261, -261, -261, -261, + /* 150 */ -261, -186, -117, 260, 263, 460, 631, -74, 497, -181, + /* 160 */ -261, 939, 176, 274, 338, 676, -261, -261, -261, -261, + /* 170 */ -212, -212, -212, -184, 149, 777, 1061, 1103, 265, 419, + /* 180 */ -254, 670, 677, 677, -11, -129, 184, 488, 736, 789, + /* 190 */ 805, 844, 403, 529, 579, 668, 783, 841, 1158, 1112, + /* 200 */ 806, 861, 1095, 846, 839, 1031, -189, 1077, 1080, 1116, + /* 210 */ 1084, 1156, 1139, 1221, 46, 1099, 1037, 1118, 1171, 1214, + /* 220 */ 1210, 1258, -210, -190, -176, -115, 117, 262, 376, 490, + /* 230 */ 511, 520, 618, 639, 743, 901, 907, 958, 1014, 1055, + /* 240 */ 1108, 1193, 1244, 720, 1248, 1277, 1324, 1347, 1417, 1431, + /* 250 */ 1432, 1440, 1451, 1452, 1463, 1478, 1286, 1350, 1369, 1490, + /* 260 */ 1498, 1501, 773, 1509, 1513, 1528, 1292, 1367, 1535, 1536, + /* 270 */ 1477, 1542, 376, 1547, 1550, 1555, 1559, 1568, 1571, 1441, + /* 280 */ 1443, 1474, 1511, 1516, 1519, 1522, 773, 1474, 1474, 1503, + /* 290 */ 1567, 1594, 1484, 1527, 1556, 1570, 1557, 1524, 1573, 1545, + /* 300 */ 1548, 1576, 1561, 1587, 1540, 1575, 1606, 1611, 1622, 1624, + /* 310 */ 1626, 1582, 1597, 1598, 1599, 1601, 1603, 1563, 1608, 1605, + /* 320 */ 1604, 1564, 1566, 1655, 1660, 1578, 1579, 1665, 1586, 1607, + /* 330 */ 1610, 1627, 1633, 1645, 1634, 1636, 1650, 1651, 1653, 1654, + /* 340 */ 1692, 1695, 1656, 1628, 1631, 1657, 1635, 1663, 1659, 1666, + /* 350 */ 1661, 1696, 1700, 1618, 1632, 1712, 1714, 1693, 1715, 1718, + /* 360 */ 1716, 1720, 1697, 1703, 1705, 1707, 1701, 1708, 1713, 1721, + /* 370 */ 1722, 1726, 1729, 1709, 1731, 1733, 1638, 1644, 1672, 1673, + /* 380 */ 1736, 1758, 1641, 1642, 1698, 1699, 1723, 1725, 1688, 1767, + /* 390 */ 1689, 1727, 1724, 1738, 1730, 1776, 1793, 1794, 1797, 1798, + /* 400 */ 1799, 1690, 1691, 1702, 1785, 1780, 1783, 1784, 1786, 1791, + /* 410 */ 1778, 1779, 1788, 1790, 1796, 1800, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1652, 1652, 1652, 1482, 1247, 1358, 1247, 1247, 1247, 1482, - /* 10 */ 1482, 1482, 1247, 1388, 1388, 1535, 1280, 1247, 1247, 1247, - /* 20 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1481, 1247, - /* 30 */ 1247, 1247, 1247, 1568, 1568, 1247, 1247, 1247, 1247, 1247, - /* 40 */ 1247, 1247, 1247, 1397, 1247, 1404, 1247, 1247, 1247, 1247, - /* 50 */ 1247, 1483, 1484, 1247, 1247, 1247, 1534, 1536, 1499, 1411, - /* 60 */ 1410, 1409, 1408, 1517, 1376, 1402, 1395, 1399, 1478, 1479, - /* 70 */ 1477, 1630, 1484, 1483, 1247, 1398, 1446, 1462, 1445, 1247, - /* 80 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 90 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 100 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 110 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 120 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 130 */ 1247, 1454, 1461, 1460, 1459, 1468, 1458, 1455, 1448, 1447, - /* 140 */ 1449, 1450, 1247, 1247, 1271, 1247, 1247, 1268, 1322, 1247, - /* 150 */ 1247, 1247, 1247, 1247, 1554, 1553, 1247, 1451, 1247, 1280, - /* 160 */ 1439, 1438, 1437, 1465, 1452, 1464, 1463, 1542, 1604, 1603, - /* 170 */ 1500, 1247, 1247, 1247, 1247, 1247, 1247, 1568, 1247, 1247, - /* 180 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 190 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 200 */ 1247, 1378, 1568, 1568, 1247, 1280, 1568, 1568, 1379, 1379, - /* 210 */ 1276, 1276, 1382, 1247, 1549, 1349, 1349, 1349, 1349, 1358, - /* 220 */ 1349, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 230 */ 1247, 1247, 1247, 1247, 1247, 1247, 1539, 1537, 1247, 1247, - /* 240 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 250 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 260 */ 1247, 1247, 1247, 1247, 1247, 1354, 1247, 1247, 1247, 1247, - /* 270 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1597, 1247, 1512, - /* 280 */ 1336, 1354, 1354, 1354, 1354, 1356, 1337, 1335, 1348, 1281, - /* 290 */ 1254, 1644, 1414, 1403, 1355, 1403, 1641, 1401, 1414, 1414, - /* 300 */ 1401, 1414, 1355, 1641, 1297, 1619, 1292, 1388, 1388, 1388, - /* 310 */ 1378, 1378, 1378, 1378, 1382, 1382, 1480, 1355, 1348, 1247, - /* 320 */ 1644, 1644, 1364, 1364, 1643, 1643, 1364, 1500, 1627, 1423, - /* 330 */ 1396, 1382, 1325, 1396, 1382, 1331, 1331, 1331, 1331, 1364, - /* 340 */ 1265, 1401, 1627, 1627, 1401, 1423, 1325, 1401, 1325, 1401, - /* 350 */ 1364, 1265, 1516, 1638, 1364, 1265, 1490, 1364, 1265, 1364, - /* 360 */ 1265, 1490, 1323, 1323, 1323, 1312, 1247, 1247, 1490, 1323, - /* 370 */ 1297, 1323, 1312, 1323, 1323, 1586, 1247, 1494, 1494, 1490, - /* 380 */ 1364, 1578, 1578, 1391, 1391, 1396, 1382, 1485, 1364, 1247, - /* 390 */ 1396, 1394, 1392, 1401, 1315, 1600, 1600, 1596, 1596, 1596, - /* 400 */ 1649, 1649, 1549, 1612, 1280, 1280, 1280, 1280, 1612, 1299, - /* 410 */ 1299, 1281, 1281, 1280, 1612, 1247, 1247, 1247, 1247, 1247, - /* 420 */ 1247, 1607, 1247, 1544, 1501, 1368, 1247, 1247, 1247, 1247, - /* 430 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 440 */ 1247, 1247, 1555, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 450 */ 1247, 1247, 1247, 1428, 1247, 1250, 1546, 1247, 1247, 1247, - /* 460 */ 1247, 1247, 1247, 1247, 1247, 1405, 1406, 1369, 1247, 1247, - /* 470 */ 1247, 1247, 1247, 1247, 1247, 1420, 1247, 1247, 1247, 1415, - /* 480 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1640, 1247, - /* 490 */ 1247, 1247, 1247, 1247, 1247, 1515, 1514, 1247, 1247, 1366, - /* 500 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 510 */ 1247, 1247, 1247, 1295, 1247, 1247, 1247, 1247, 1247, 1247, - /* 520 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 530 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1393, 1247, 1247, - /* 540 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 550 */ 1247, 1247, 1583, 1383, 1247, 1247, 1247, 1247, 1631, 1247, - /* 560 */ 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - /* 570 */ 1247, 1247, 1247, 1623, 1339, 1429, 1247, 1432, 1269, 1247, - /* 580 */ 1259, 1247, 1247, + /* 0 */ 1667, 1667, 1667, 1495, 1258, 1371, 1258, 1258, 1258, 1258, + /* 10 */ 1495, 1495, 1495, 1258, 1258, 1258, 1258, 1258, 1258, 1401, + /* 20 */ 1401, 1548, 1291, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 30 */ 1258, 1258, 1258, 1258, 1258, 1494, 1258, 1258, 1258, 1258, + /* 40 */ 1582, 1582, 1258, 1258, 1258, 1258, 1258, 1567, 1566, 1258, + /* 50 */ 1258, 1258, 1410, 1258, 1417, 1258, 1258, 1258, 1258, 1258, + /* 60 */ 1496, 1497, 1258, 1258, 1258, 1547, 1549, 1512, 1424, 1423, + /* 70 */ 1422, 1421, 1530, 1389, 1415, 1408, 1412, 1491, 1492, 1490, + /* 80 */ 1645, 1497, 1496, 1258, 1411, 1459, 1475, 1458, 1258, 1258, + /* 90 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 100 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 110 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 120 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 130 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 140 */ 1467, 1474, 1473, 1472, 1481, 1471, 1468, 1461, 1460, 1462, + /* 150 */ 1463, 1282, 1258, 1279, 1333, 1258, 1258, 1258, 1258, 1258, + /* 160 */ 1464, 1291, 1452, 1451, 1450, 1258, 1478, 1465, 1477, 1476, + /* 170 */ 1555, 1619, 1618, 1513, 1258, 1258, 1258, 1258, 1258, 1258, + /* 180 */ 1582, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 190 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 200 */ 1258, 1258, 1258, 1391, 1582, 1582, 1258, 1291, 1582, 1582, + /* 210 */ 1392, 1392, 1287, 1287, 1395, 1562, 1362, 1362, 1362, 1362, + /* 220 */ 1371, 1362, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 230 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1552, 1550, 1258, + /* 240 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 250 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 260 */ 1258, 1258, 1258, 1258, 1258, 1258, 1367, 1258, 1258, 1258, + /* 270 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1612, 1258, + /* 280 */ 1525, 1347, 1367, 1367, 1367, 1367, 1369, 1348, 1346, 1361, + /* 290 */ 1292, 1265, 1659, 1427, 1416, 1368, 1416, 1656, 1414, 1427, + /* 300 */ 1427, 1414, 1427, 1368, 1656, 1308, 1634, 1303, 1401, 1401, + /* 310 */ 1401, 1391, 1391, 1391, 1391, 1395, 1395, 1493, 1368, 1361, + /* 320 */ 1258, 1659, 1659, 1377, 1377, 1658, 1658, 1377, 1513, 1642, + /* 330 */ 1436, 1409, 1395, 1336, 1409, 1395, 1342, 1342, 1342, 1342, + /* 340 */ 1377, 1276, 1414, 1642, 1642, 1414, 1436, 1336, 1414, 1336, + /* 350 */ 1414, 1377, 1276, 1529, 1653, 1377, 1276, 1503, 1377, 1276, + /* 360 */ 1377, 1276, 1503, 1334, 1334, 1334, 1323, 1258, 1258, 1503, + /* 370 */ 1334, 1308, 1334, 1323, 1334, 1334, 1600, 1258, 1507, 1507, + /* 380 */ 1503, 1377, 1592, 1592, 1404, 1404, 1409, 1395, 1498, 1377, + /* 390 */ 1258, 1409, 1407, 1405, 1414, 1326, 1615, 1615, 1611, 1611, + /* 400 */ 1611, 1664, 1664, 1562, 1627, 1291, 1291, 1291, 1291, 1627, + /* 410 */ 1310, 1310, 1292, 1292, 1291, 1627, 1258, 1258, 1258, 1258, + /* 420 */ 1258, 1258, 1622, 1258, 1557, 1514, 1381, 1258, 1258, 1258, + /* 430 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 440 */ 1258, 1258, 1258, 1568, 1258, 1258, 1258, 1258, 1258, 1258, + /* 450 */ 1258, 1258, 1258, 1258, 1258, 1441, 1258, 1261, 1559, 1258, + /* 460 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1418, 1419, 1382, + /* 470 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1433, 1258, 1258, + /* 480 */ 1258, 1428, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 490 */ 1655, 1258, 1258, 1258, 1258, 1258, 1258, 1528, 1527, 1258, + /* 500 */ 1258, 1379, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 510 */ 1258, 1258, 1258, 1258, 1258, 1306, 1258, 1258, 1258, 1258, + /* 520 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 530 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1406, + /* 540 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1258, + /* 550 */ 1258, 1258, 1258, 1258, 1597, 1396, 1258, 1258, 1258, 1258, + /* 560 */ 1646, 1258, 1258, 1258, 1258, 1356, 1258, 1258, 1258, 1258, + /* 570 */ 1258, 1258, 1258, 1258, 1258, 1258, 1258, 1638, 1350, 1442, + /* 580 */ 1258, 1445, 1280, 1258, 1270, 1258, 1258, }; /********** End of lemon-generated parsing tables *****************************/ @@ -172536,8 +173901,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* TRUEFALSE => nothing */ 0, /* ISNOT => nothing */ 0, /* FUNCTION => nothing */ - 0, /* UMINUS => nothing */ 0, /* UPLUS => nothing */ + 0, /* UMINUS => nothing */ 0, /* TRUTH => nothing */ 0, /* REGISTER => nothing */ 0, /* VECTOR => nothing */ @@ -172546,6 +173911,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* ASTERISK => nothing */ 0, /* SPAN => nothing */ 0, /* ERROR => nothing */ + 0, /* QNUMBER => nothing */ 0, /* SPACE => nothing */ 0, /* ILLEGAL => nothing */ }; @@ -172588,14 +173954,9 @@ struct yyParser { #endif sqlite3ParserARG_SDECL /* A place to hold %extra_argument */ sqlite3ParserCTX_SDECL /* A place to hold %extra_context */ -#if YYSTACKDEPTH<=0 - int yystksz; /* Current side of the stack */ - yyStackEntry *yystack; /* The parser's stack */ - yyStackEntry yystk0; /* First stack entry */ -#else - yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */ - yyStackEntry *yystackEnd; /* Last entry in the stack */ -#endif + yyStackEntry *yystackEnd; /* Last entry in the stack */ + yyStackEntry *yystack; /* The parser stack */ + yyStackEntry yystk0[YYSTACKDEPTH]; /* Initial stack space */ }; typedef struct yyParser yyParser; @@ -172809,8 +174170,8 @@ static const char *const yyTokenName[] = { /* 170 */ "TRUEFALSE", /* 171 */ "ISNOT", /* 172 */ "FUNCTION", - /* 173 */ "UMINUS", - /* 174 */ "UPLUS", + /* 173 */ "UPLUS", + /* 174 */ "UMINUS", /* 175 */ "TRUTH", /* 176 */ "REGISTER", /* 177 */ "VECTOR", @@ -172819,142 +174180,145 @@ static const char *const yyTokenName[] = { /* 180 */ "ASTERISK", /* 181 */ "SPAN", /* 182 */ "ERROR", - /* 183 */ "SPACE", - /* 184 */ "ILLEGAL", - /* 185 */ "input", - /* 186 */ "cmdlist", - /* 187 */ "ecmd", - /* 188 */ "cmdx", - /* 189 */ "explain", - /* 190 */ "cmd", - /* 191 */ "transtype", - /* 192 */ "trans_opt", - /* 193 */ "nm", - /* 194 */ "savepoint_opt", - /* 195 */ "create_table", - /* 196 */ "create_table_args", - /* 197 */ "createkw", - /* 198 */ "temp", - /* 199 */ "ifnotexists", - /* 200 */ "dbnm", - /* 201 */ "columnlist", - /* 202 */ "conslist_opt", - /* 203 */ "table_option_set", - /* 204 */ "select", - /* 205 */ "table_option", - /* 206 */ "columnname", - /* 207 */ "carglist", - /* 208 */ "typetoken", - /* 209 */ "typename", - /* 210 */ "signed", - /* 211 */ "plus_num", - /* 212 */ "minus_num", - /* 213 */ "scanpt", - /* 214 */ "scantok", - /* 215 */ "ccons", - /* 216 */ "term", - /* 217 */ "expr", - /* 218 */ "onconf", - /* 219 */ "sortorder", - /* 220 */ "autoinc", - /* 221 */ "eidlist_opt", - /* 222 */ "refargs", - /* 223 */ "defer_subclause", - /* 224 */ "generated", - /* 225 */ "refarg", - /* 226 */ "refact", - /* 227 */ "init_deferred_pred_opt", - /* 228 */ "conslist", - /* 229 */ "tconscomma", - /* 230 */ "tcons", - /* 231 */ "sortlist", - /* 232 */ "eidlist", - /* 233 */ "defer_subclause_opt", - /* 234 */ "orconf", - /* 235 */ "resolvetype", - /* 236 */ "raisetype", - /* 237 */ "ifexists", - /* 238 */ "fullname", - /* 239 */ "selectnowith", - /* 240 */ "oneselect", - /* 241 */ "wqlist", - /* 242 */ "multiselect_op", - /* 243 */ "distinct", - /* 244 */ "selcollist", - /* 245 */ "from", - /* 246 */ "where_opt", - /* 247 */ "groupby_opt", - /* 248 */ "having_opt", - /* 249 */ "orderby_opt", - /* 250 */ "limit_opt", - /* 251 */ "window_clause", - /* 252 */ "values", - /* 253 */ "nexprlist", - /* 254 */ "sclp", - /* 255 */ "as", - /* 256 */ "seltablist", - /* 257 */ "stl_prefix", - /* 258 */ "joinop", - /* 259 */ "on_using", - /* 260 */ "indexed_by", - /* 261 */ "exprlist", - /* 262 */ "xfullname", - /* 263 */ "idlist", - /* 264 */ "indexed_opt", - /* 265 */ "nulls", - /* 266 */ "with", - /* 267 */ "where_opt_ret", - /* 268 */ "setlist", - /* 269 */ "insert_cmd", - /* 270 */ "idlist_opt", - /* 271 */ "upsert", - /* 272 */ "returning", - /* 273 */ "filter_over", - /* 274 */ "likeop", - /* 275 */ "between_op", - /* 276 */ "in_op", - /* 277 */ "paren_exprlist", - /* 278 */ "case_operand", - /* 279 */ "case_exprlist", - /* 280 */ "case_else", - /* 281 */ "uniqueflag", - /* 282 */ "collate", - /* 283 */ "vinto", - /* 284 */ "nmnum", - /* 285 */ "trigger_decl", - /* 286 */ "trigger_cmd_list", - /* 287 */ "trigger_time", - /* 288 */ "trigger_event", - /* 289 */ "foreach_clause", - /* 290 */ "when_clause", - /* 291 */ "trigger_cmd", - /* 292 */ "trnm", - /* 293 */ "tridxby", - /* 294 */ "database_kw_opt", - /* 295 */ "key_opt", - /* 296 */ "add_column_fullname", - /* 297 */ "kwcolumn_opt", - /* 298 */ "create_vtab", - /* 299 */ "vtabarglist", - /* 300 */ "vtabarg", - /* 301 */ "vtabargtoken", - /* 302 */ "lp", - /* 303 */ "anylist", - /* 304 */ "wqitem", - /* 305 */ "wqas", - /* 306 */ "windowdefn_list", - /* 307 */ "windowdefn", - /* 308 */ "window", - /* 309 */ "frame_opt", - /* 310 */ "part_opt", - /* 311 */ "filter_clause", - /* 312 */ "over_clause", - /* 313 */ "range_or_rows", - /* 314 */ "frame_bound", - /* 315 */ "frame_bound_s", - /* 316 */ "frame_bound_e", - /* 317 */ "frame_exclude_opt", - /* 318 */ "frame_exclude", + /* 183 */ "QNUMBER", + /* 184 */ "SPACE", + /* 185 */ "ILLEGAL", + /* 186 */ "input", + /* 187 */ "cmdlist", + /* 188 */ "ecmd", + /* 189 */ "cmdx", + /* 190 */ "explain", + /* 191 */ "cmd", + /* 192 */ "transtype", + /* 193 */ "trans_opt", + /* 194 */ "nm", + /* 195 */ "savepoint_opt", + /* 196 */ "create_table", + /* 197 */ "create_table_args", + /* 198 */ "createkw", + /* 199 */ "temp", + /* 200 */ "ifnotexists", + /* 201 */ "dbnm", + /* 202 */ "columnlist", + /* 203 */ "conslist_opt", + /* 204 */ "table_option_set", + /* 205 */ "select", + /* 206 */ "table_option", + /* 207 */ "columnname", + /* 208 */ "carglist", + /* 209 */ "typetoken", + /* 210 */ "typename", + /* 211 */ "signed", + /* 212 */ "plus_num", + /* 213 */ "minus_num", + /* 214 */ "scanpt", + /* 215 */ "scantok", + /* 216 */ "ccons", + /* 217 */ "term", + /* 218 */ "expr", + /* 219 */ "onconf", + /* 220 */ "sortorder", + /* 221 */ "autoinc", + /* 222 */ "eidlist_opt", + /* 223 */ "refargs", + /* 224 */ "defer_subclause", + /* 225 */ "generated", + /* 226 */ "refarg", + /* 227 */ "refact", + /* 228 */ "init_deferred_pred_opt", + /* 229 */ "conslist", + /* 230 */ "tconscomma", + /* 231 */ "tcons", + /* 232 */ "sortlist", + /* 233 */ "eidlist", + /* 234 */ "defer_subclause_opt", + /* 235 */ "orconf", + /* 236 */ "resolvetype", + /* 237 */ "raisetype", + /* 238 */ "ifexists", + /* 239 */ "fullname", + /* 240 */ "selectnowith", + /* 241 */ "oneselect", + /* 242 */ "wqlist", + /* 243 */ "multiselect_op", + /* 244 */ "distinct", + /* 245 */ "selcollist", + /* 246 */ "from", + /* 247 */ "where_opt", + /* 248 */ "groupby_opt", + /* 249 */ "having_opt", + /* 250 */ "orderby_opt", + /* 251 */ "limit_opt", + /* 252 */ "window_clause", + /* 253 */ "values", + /* 254 */ "nexprlist", + /* 255 */ "mvalues", + /* 256 */ "sclp", + /* 257 */ "as", + /* 258 */ "seltablist", + /* 259 */ "stl_prefix", + /* 260 */ "joinop", + /* 261 */ "on_using", + /* 262 */ "indexed_by", + /* 263 */ "exprlist", + /* 264 */ "xfullname", + /* 265 */ "idlist", + /* 266 */ "indexed_opt", + /* 267 */ "nulls", + /* 268 */ "with", + /* 269 */ "where_opt_ret", + /* 270 */ "setlist", + /* 271 */ "insert_cmd", + /* 272 */ "idlist_opt", + /* 273 */ "upsert", + /* 274 */ "returning", + /* 275 */ "filter_over", + /* 276 */ "likeop", + /* 277 */ "between_op", + /* 278 */ "in_op", + /* 279 */ "paren_exprlist", + /* 280 */ "case_operand", + /* 281 */ "case_exprlist", + /* 282 */ "case_else", + /* 283 */ "uniqueflag", + /* 284 */ "collate", + /* 285 */ "vinto", + /* 286 */ "nmnum", + /* 287 */ "trigger_decl", + /* 288 */ "trigger_cmd_list", + /* 289 */ "trigger_time", + /* 290 */ "trigger_event", + /* 291 */ "foreach_clause", + /* 292 */ "when_clause", + /* 293 */ "trigger_cmd", + /* 294 */ "trnm", + /* 295 */ "tridxby", + /* 296 */ "database_kw_opt", + /* 297 */ "key_opt", + /* 298 */ "add_column_fullname", + /* 299 */ "kwcolumn_opt", + /* 300 */ "create_vtab", + /* 301 */ "vtabarglist", + /* 302 */ "vtabarg", + /* 303 */ "vtabargtoken", + /* 304 */ "lp", + /* 305 */ "anylist", + /* 306 */ "wqitem", + /* 307 */ "wqas", + /* 308 */ "withnm", + /* 309 */ "windowdefn_list", + /* 310 */ "windowdefn", + /* 311 */ "window", + /* 312 */ "frame_opt", + /* 313 */ "part_opt", + /* 314 */ "filter_clause", + /* 315 */ "over_clause", + /* 316 */ "range_or_rows", + /* 317 */ "frame_bound", + /* 318 */ "frame_bound_s", + /* 319 */ "frame_bound_e", + /* 320 */ "frame_exclude_opt", + /* 321 */ "frame_exclude", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -173057,351 +174421,363 @@ static const char *const yyRuleName[] = { /* 92 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt", /* 93 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt", /* 94 */ "values ::= VALUES LP nexprlist RP", - /* 95 */ "values ::= values COMMA LP nexprlist RP", - /* 96 */ "distinct ::= DISTINCT", - /* 97 */ "distinct ::= ALL", - /* 98 */ "distinct ::=", - /* 99 */ "sclp ::=", - /* 100 */ "selcollist ::= sclp scanpt expr scanpt as", - /* 101 */ "selcollist ::= sclp scanpt STAR", - /* 102 */ "selcollist ::= sclp scanpt nm DOT STAR", - /* 103 */ "as ::= AS nm", - /* 104 */ "as ::=", - /* 105 */ "from ::=", - /* 106 */ "from ::= FROM seltablist", - /* 107 */ "stl_prefix ::= seltablist joinop", - /* 108 */ "stl_prefix ::=", - /* 109 */ "seltablist ::= stl_prefix nm dbnm as on_using", - /* 110 */ "seltablist ::= stl_prefix nm dbnm as indexed_by on_using", - /* 111 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using", - /* 112 */ "seltablist ::= stl_prefix LP select RP as on_using", - /* 113 */ "seltablist ::= stl_prefix LP seltablist RP as on_using", - /* 114 */ "dbnm ::=", - /* 115 */ "dbnm ::= DOT nm", - /* 116 */ "fullname ::= nm", - /* 117 */ "fullname ::= nm DOT nm", - /* 118 */ "xfullname ::= nm", - /* 119 */ "xfullname ::= nm DOT nm", - /* 120 */ "xfullname ::= nm DOT nm AS nm", - /* 121 */ "xfullname ::= nm AS nm", - /* 122 */ "joinop ::= COMMA|JOIN", - /* 123 */ "joinop ::= JOIN_KW JOIN", - /* 124 */ "joinop ::= JOIN_KW nm JOIN", - /* 125 */ "joinop ::= JOIN_KW nm nm JOIN", - /* 126 */ "on_using ::= ON expr", - /* 127 */ "on_using ::= USING LP idlist RP", - /* 128 */ "on_using ::=", - /* 129 */ "indexed_opt ::=", - /* 130 */ "indexed_by ::= INDEXED BY nm", - /* 131 */ "indexed_by ::= NOT INDEXED", - /* 132 */ "orderby_opt ::=", - /* 133 */ "orderby_opt ::= ORDER BY sortlist", - /* 134 */ "sortlist ::= sortlist COMMA expr sortorder nulls", - /* 135 */ "sortlist ::= expr sortorder nulls", - /* 136 */ "sortorder ::= ASC", - /* 137 */ "sortorder ::= DESC", - /* 138 */ "sortorder ::=", - /* 139 */ "nulls ::= NULLS FIRST", - /* 140 */ "nulls ::= NULLS LAST", - /* 141 */ "nulls ::=", - /* 142 */ "groupby_opt ::=", - /* 143 */ "groupby_opt ::= GROUP BY nexprlist", - /* 144 */ "having_opt ::=", - /* 145 */ "having_opt ::= HAVING expr", - /* 146 */ "limit_opt ::=", - /* 147 */ "limit_opt ::= LIMIT expr", - /* 148 */ "limit_opt ::= LIMIT expr OFFSET expr", - /* 149 */ "limit_opt ::= LIMIT expr COMMA expr", - /* 150 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt", - /* 151 */ "where_opt ::=", - /* 152 */ "where_opt ::= WHERE expr", - /* 153 */ "where_opt_ret ::=", - /* 154 */ "where_opt_ret ::= WHERE expr", - /* 155 */ "where_opt_ret ::= RETURNING selcollist", - /* 156 */ "where_opt_ret ::= WHERE expr RETURNING selcollist", - /* 157 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt", - /* 158 */ "setlist ::= setlist COMMA nm EQ expr", - /* 159 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", - /* 160 */ "setlist ::= nm EQ expr", - /* 161 */ "setlist ::= LP idlist RP EQ expr", - /* 162 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert", - /* 163 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning", - /* 164 */ "upsert ::=", - /* 165 */ "upsert ::= RETURNING selcollist", - /* 166 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert", - /* 167 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert", - /* 168 */ "upsert ::= ON CONFLICT DO NOTHING returning", - /* 169 */ "upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning", - /* 170 */ "returning ::= RETURNING selcollist", - /* 171 */ "insert_cmd ::= INSERT orconf", - /* 172 */ "insert_cmd ::= REPLACE", - /* 173 */ "idlist_opt ::=", - /* 174 */ "idlist_opt ::= LP idlist RP", - /* 175 */ "idlist ::= idlist COMMA nm", - /* 176 */ "idlist ::= nm", - /* 177 */ "expr ::= LP expr RP", - /* 178 */ "expr ::= ID|INDEXED|JOIN_KW", - /* 179 */ "expr ::= nm DOT nm", - /* 180 */ "expr ::= nm DOT nm DOT nm", - /* 181 */ "term ::= NULL|FLOAT|BLOB", - /* 182 */ "term ::= STRING", - /* 183 */ "term ::= INTEGER", - /* 184 */ "expr ::= VARIABLE", - /* 185 */ "expr ::= expr COLLATE ID|STRING", - /* 186 */ "expr ::= CAST LP expr AS typetoken RP", - /* 187 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP", - /* 188 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP", - /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP", - /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over", - /* 191 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over", - /* 192 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over", - /* 193 */ "term ::= CTIME_KW", - /* 194 */ "expr ::= LP nexprlist COMMA expr RP", - /* 195 */ "expr ::= expr AND expr", - /* 196 */ "expr ::= expr OR expr", - /* 197 */ "expr ::= expr LT|GT|GE|LE expr", - /* 198 */ "expr ::= expr EQ|NE expr", - /* 199 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", - /* 200 */ "expr ::= expr PLUS|MINUS expr", - /* 201 */ "expr ::= expr STAR|SLASH|REM expr", - /* 202 */ "expr ::= expr CONCAT expr", - /* 203 */ "likeop ::= NOT LIKE_KW|MATCH", - /* 204 */ "expr ::= expr likeop expr", - /* 205 */ "expr ::= expr likeop expr ESCAPE expr", - /* 206 */ "expr ::= expr ISNULL|NOTNULL", - /* 207 */ "expr ::= expr NOT NULL", - /* 208 */ "expr ::= expr IS expr", - /* 209 */ "expr ::= expr IS NOT expr", - /* 210 */ "expr ::= expr IS NOT DISTINCT FROM expr", - /* 211 */ "expr ::= expr IS DISTINCT FROM expr", - /* 212 */ "expr ::= NOT expr", - /* 213 */ "expr ::= BITNOT expr", - /* 214 */ "expr ::= PLUS|MINUS expr", - /* 215 */ "expr ::= expr PTR expr", - /* 216 */ "between_op ::= BETWEEN", - /* 217 */ "between_op ::= NOT BETWEEN", - /* 218 */ "expr ::= expr between_op expr AND expr", - /* 219 */ "in_op ::= IN", - /* 220 */ "in_op ::= NOT IN", - /* 221 */ "expr ::= expr in_op LP exprlist RP", - /* 222 */ "expr ::= LP select RP", - /* 223 */ "expr ::= expr in_op LP select RP", - /* 224 */ "expr ::= expr in_op nm dbnm paren_exprlist", - /* 225 */ "expr ::= EXISTS LP select RP", - /* 226 */ "expr ::= CASE case_operand case_exprlist case_else END", - /* 227 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", - /* 228 */ "case_exprlist ::= WHEN expr THEN expr", - /* 229 */ "case_else ::= ELSE expr", - /* 230 */ "case_else ::=", - /* 231 */ "case_operand ::=", - /* 232 */ "exprlist ::=", - /* 233 */ "nexprlist ::= nexprlist COMMA expr", - /* 234 */ "nexprlist ::= expr", - /* 235 */ "paren_exprlist ::=", - /* 236 */ "paren_exprlist ::= LP exprlist RP", - /* 237 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", - /* 238 */ "uniqueflag ::= UNIQUE", - /* 239 */ "uniqueflag ::=", - /* 240 */ "eidlist_opt ::=", - /* 241 */ "eidlist_opt ::= LP eidlist RP", - /* 242 */ "eidlist ::= eidlist COMMA nm collate sortorder", - /* 243 */ "eidlist ::= nm collate sortorder", - /* 244 */ "collate ::=", - /* 245 */ "collate ::= COLLATE ID|STRING", - /* 246 */ "cmd ::= DROP INDEX ifexists fullname", - /* 247 */ "cmd ::= VACUUM vinto", - /* 248 */ "cmd ::= VACUUM nm vinto", - /* 249 */ "vinto ::= INTO expr", - /* 250 */ "vinto ::=", - /* 251 */ "cmd ::= PRAGMA nm dbnm", - /* 252 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", - /* 253 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", - /* 254 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", - /* 255 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", - /* 256 */ "plus_num ::= PLUS INTEGER|FLOAT", - /* 257 */ "minus_num ::= MINUS INTEGER|FLOAT", - /* 258 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", - /* 259 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", - /* 260 */ "trigger_time ::= BEFORE|AFTER", - /* 261 */ "trigger_time ::= INSTEAD OF", - /* 262 */ "trigger_time ::=", - /* 263 */ "trigger_event ::= DELETE|INSERT", - /* 264 */ "trigger_event ::= UPDATE", - /* 265 */ "trigger_event ::= UPDATE OF idlist", - /* 266 */ "when_clause ::=", - /* 267 */ "when_clause ::= WHEN expr", - /* 268 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", - /* 269 */ "trigger_cmd_list ::= trigger_cmd SEMI", - /* 270 */ "trnm ::= nm DOT nm", - /* 271 */ "tridxby ::= INDEXED BY nm", - /* 272 */ "tridxby ::= NOT INDEXED", - /* 273 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", - /* 274 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", - /* 275 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", - /* 276 */ "trigger_cmd ::= scanpt select scanpt", - /* 277 */ "expr ::= RAISE LP IGNORE RP", - /* 278 */ "expr ::= RAISE LP raisetype COMMA nm RP", - /* 279 */ "raisetype ::= ROLLBACK", - /* 280 */ "raisetype ::= ABORT", - /* 281 */ "raisetype ::= FAIL", - /* 282 */ "cmd ::= DROP TRIGGER ifexists fullname", - /* 283 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", - /* 284 */ "cmd ::= DETACH database_kw_opt expr", - /* 285 */ "key_opt ::=", - /* 286 */ "key_opt ::= KEY expr", - /* 287 */ "cmd ::= REINDEX", - /* 288 */ "cmd ::= REINDEX nm dbnm", - /* 289 */ "cmd ::= ANALYZE", - /* 290 */ "cmd ::= ANALYZE nm dbnm", - /* 291 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", - /* 292 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", - /* 293 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm", - /* 294 */ "add_column_fullname ::= fullname", - /* 295 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", - /* 296 */ "cmd ::= create_vtab", - /* 297 */ "cmd ::= create_vtab LP vtabarglist RP", - /* 298 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", - /* 299 */ "vtabarg ::=", - /* 300 */ "vtabargtoken ::= ANY", - /* 301 */ "vtabargtoken ::= lp anylist RP", - /* 302 */ "lp ::= LP", - /* 303 */ "with ::= WITH wqlist", - /* 304 */ "with ::= WITH RECURSIVE wqlist", - /* 305 */ "wqas ::= AS", - /* 306 */ "wqas ::= AS MATERIALIZED", - /* 307 */ "wqas ::= AS NOT MATERIALIZED", - /* 308 */ "wqitem ::= nm eidlist_opt wqas LP select RP", - /* 309 */ "wqlist ::= wqitem", - /* 310 */ "wqlist ::= wqlist COMMA wqitem", - /* 311 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", - /* 312 */ "windowdefn ::= nm AS LP window RP", - /* 313 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", - /* 314 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", - /* 315 */ "window ::= ORDER BY sortlist frame_opt", - /* 316 */ "window ::= nm ORDER BY sortlist frame_opt", - /* 317 */ "window ::= nm frame_opt", - /* 318 */ "frame_opt ::=", - /* 319 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", - /* 320 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", - /* 321 */ "range_or_rows ::= RANGE|ROWS|GROUPS", - /* 322 */ "frame_bound_s ::= frame_bound", - /* 323 */ "frame_bound_s ::= UNBOUNDED PRECEDING", - /* 324 */ "frame_bound_e ::= frame_bound", - /* 325 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", - /* 326 */ "frame_bound ::= expr PRECEDING|FOLLOWING", - /* 327 */ "frame_bound ::= CURRENT ROW", - /* 328 */ "frame_exclude_opt ::=", - /* 329 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", - /* 330 */ "frame_exclude ::= NO OTHERS", - /* 331 */ "frame_exclude ::= CURRENT ROW", - /* 332 */ "frame_exclude ::= GROUP|TIES", - /* 333 */ "window_clause ::= WINDOW windowdefn_list", - /* 334 */ "filter_over ::= filter_clause over_clause", - /* 335 */ "filter_over ::= over_clause", - /* 336 */ "filter_over ::= filter_clause", - /* 337 */ "over_clause ::= OVER LP window RP", - /* 338 */ "over_clause ::= OVER nm", - /* 339 */ "filter_clause ::= FILTER LP WHERE expr RP", - /* 340 */ "input ::= cmdlist", - /* 341 */ "cmdlist ::= cmdlist ecmd", - /* 342 */ "cmdlist ::= ecmd", - /* 343 */ "ecmd ::= SEMI", - /* 344 */ "ecmd ::= cmdx SEMI", - /* 345 */ "ecmd ::= explain cmdx SEMI", - /* 346 */ "trans_opt ::=", - /* 347 */ "trans_opt ::= TRANSACTION", - /* 348 */ "trans_opt ::= TRANSACTION nm", - /* 349 */ "savepoint_opt ::= SAVEPOINT", - /* 350 */ "savepoint_opt ::=", - /* 351 */ "cmd ::= create_table create_table_args", - /* 352 */ "table_option_set ::= table_option", - /* 353 */ "columnlist ::= columnlist COMMA columnname carglist", - /* 354 */ "columnlist ::= columnname carglist", - /* 355 */ "nm ::= ID|INDEXED|JOIN_KW", - /* 356 */ "nm ::= STRING", - /* 357 */ "typetoken ::= typename", - /* 358 */ "typename ::= ID|STRING", - /* 359 */ "signed ::= plus_num", - /* 360 */ "signed ::= minus_num", - /* 361 */ "carglist ::= carglist ccons", - /* 362 */ "carglist ::=", - /* 363 */ "ccons ::= NULL onconf", - /* 364 */ "ccons ::= GENERATED ALWAYS AS generated", - /* 365 */ "ccons ::= AS generated", - /* 366 */ "conslist_opt ::= COMMA conslist", - /* 367 */ "conslist ::= conslist tconscomma tcons", - /* 368 */ "conslist ::= tcons", - /* 369 */ "tconscomma ::=", - /* 370 */ "defer_subclause_opt ::= defer_subclause", - /* 371 */ "resolvetype ::= raisetype", - /* 372 */ "selectnowith ::= oneselect", - /* 373 */ "oneselect ::= values", - /* 374 */ "sclp ::= selcollist COMMA", - /* 375 */ "as ::= ID|STRING", - /* 376 */ "indexed_opt ::= indexed_by", - /* 377 */ "returning ::=", - /* 378 */ "expr ::= term", - /* 379 */ "likeop ::= LIKE_KW|MATCH", - /* 380 */ "case_operand ::= expr", - /* 381 */ "exprlist ::= nexprlist", - /* 382 */ "nmnum ::= plus_num", - /* 383 */ "nmnum ::= nm", - /* 384 */ "nmnum ::= ON", - /* 385 */ "nmnum ::= DELETE", - /* 386 */ "nmnum ::= DEFAULT", - /* 387 */ "plus_num ::= INTEGER|FLOAT", - /* 388 */ "foreach_clause ::=", - /* 389 */ "foreach_clause ::= FOR EACH ROW", - /* 390 */ "trnm ::= nm", - /* 391 */ "tridxby ::=", - /* 392 */ "database_kw_opt ::= DATABASE", - /* 393 */ "database_kw_opt ::=", - /* 394 */ "kwcolumn_opt ::=", - /* 395 */ "kwcolumn_opt ::= COLUMNKW", - /* 396 */ "vtabarglist ::= vtabarg", - /* 397 */ "vtabarglist ::= vtabarglist COMMA vtabarg", - /* 398 */ "vtabarg ::= vtabarg vtabargtoken", - /* 399 */ "anylist ::=", - /* 400 */ "anylist ::= anylist LP anylist RP", - /* 401 */ "anylist ::= anylist ANY", - /* 402 */ "with ::=", - /* 403 */ "windowdefn_list ::= windowdefn", - /* 404 */ "window ::= frame_opt", + /* 95 */ "oneselect ::= mvalues", + /* 96 */ "mvalues ::= values COMMA LP nexprlist RP", + /* 97 */ "mvalues ::= mvalues COMMA LP nexprlist RP", + /* 98 */ "distinct ::= DISTINCT", + /* 99 */ "distinct ::= ALL", + /* 100 */ "distinct ::=", + /* 101 */ "sclp ::=", + /* 102 */ "selcollist ::= sclp scanpt expr scanpt as", + /* 103 */ "selcollist ::= sclp scanpt STAR", + /* 104 */ "selcollist ::= sclp scanpt nm DOT STAR", + /* 105 */ "as ::= AS nm", + /* 106 */ "as ::=", + /* 107 */ "from ::=", + /* 108 */ "from ::= FROM seltablist", + /* 109 */ "stl_prefix ::= seltablist joinop", + /* 110 */ "stl_prefix ::=", + /* 111 */ "seltablist ::= stl_prefix nm dbnm as on_using", + /* 112 */ "seltablist ::= stl_prefix nm dbnm as indexed_by on_using", + /* 113 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using", + /* 114 */ "seltablist ::= stl_prefix LP select RP as on_using", + /* 115 */ "seltablist ::= stl_prefix LP seltablist RP as on_using", + /* 116 */ "dbnm ::=", + /* 117 */ "dbnm ::= DOT nm", + /* 118 */ "fullname ::= nm", + /* 119 */ "fullname ::= nm DOT nm", + /* 120 */ "xfullname ::= nm", + /* 121 */ "xfullname ::= nm DOT nm", + /* 122 */ "xfullname ::= nm DOT nm AS nm", + /* 123 */ "xfullname ::= nm AS nm", + /* 124 */ "joinop ::= COMMA|JOIN", + /* 125 */ "joinop ::= JOIN_KW JOIN", + /* 126 */ "joinop ::= JOIN_KW nm JOIN", + /* 127 */ "joinop ::= JOIN_KW nm nm JOIN", + /* 128 */ "on_using ::= ON expr", + /* 129 */ "on_using ::= USING LP idlist RP", + /* 130 */ "on_using ::=", + /* 131 */ "indexed_opt ::=", + /* 132 */ "indexed_by ::= INDEXED BY nm", + /* 133 */ "indexed_by ::= NOT INDEXED", + /* 134 */ "orderby_opt ::=", + /* 135 */ "orderby_opt ::= ORDER BY sortlist", + /* 136 */ "sortlist ::= sortlist COMMA expr sortorder nulls", + /* 137 */ "sortlist ::= expr sortorder nulls", + /* 138 */ "sortorder ::= ASC", + /* 139 */ "sortorder ::= DESC", + /* 140 */ "sortorder ::=", + /* 141 */ "nulls ::= NULLS FIRST", + /* 142 */ "nulls ::= NULLS LAST", + /* 143 */ "nulls ::=", + /* 144 */ "groupby_opt ::=", + /* 145 */ "groupby_opt ::= GROUP BY nexprlist", + /* 146 */ "having_opt ::=", + /* 147 */ "having_opt ::= HAVING expr", + /* 148 */ "limit_opt ::=", + /* 149 */ "limit_opt ::= LIMIT expr", + /* 150 */ "limit_opt ::= LIMIT expr OFFSET expr", + /* 151 */ "limit_opt ::= LIMIT expr COMMA expr", + /* 152 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt", + /* 153 */ "where_opt ::=", + /* 154 */ "where_opt ::= WHERE expr", + /* 155 */ "where_opt_ret ::=", + /* 156 */ "where_opt_ret ::= WHERE expr", + /* 157 */ "where_opt_ret ::= RETURNING selcollist", + /* 158 */ "where_opt_ret ::= WHERE expr RETURNING selcollist", + /* 159 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt", + /* 160 */ "setlist ::= setlist COMMA nm EQ expr", + /* 161 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", + /* 162 */ "setlist ::= nm EQ expr", + /* 163 */ "setlist ::= LP idlist RP EQ expr", + /* 164 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert", + /* 165 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning", + /* 166 */ "upsert ::=", + /* 167 */ "upsert ::= RETURNING selcollist", + /* 168 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert", + /* 169 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert", + /* 170 */ "upsert ::= ON CONFLICT DO NOTHING returning", + /* 171 */ "upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning", + /* 172 */ "returning ::= RETURNING selcollist", + /* 173 */ "insert_cmd ::= INSERT orconf", + /* 174 */ "insert_cmd ::= REPLACE", + /* 175 */ "idlist_opt ::=", + /* 176 */ "idlist_opt ::= LP idlist RP", + /* 177 */ "idlist ::= idlist COMMA nm", + /* 178 */ "idlist ::= nm", + /* 179 */ "expr ::= LP expr RP", + /* 180 */ "expr ::= ID|INDEXED|JOIN_KW", + /* 181 */ "expr ::= nm DOT nm", + /* 182 */ "expr ::= nm DOT nm DOT nm", + /* 183 */ "term ::= NULL|FLOAT|BLOB", + /* 184 */ "term ::= STRING", + /* 185 */ "term ::= INTEGER", + /* 186 */ "expr ::= VARIABLE", + /* 187 */ "expr ::= expr COLLATE ID|STRING", + /* 188 */ "expr ::= CAST LP expr AS typetoken RP", + /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP", + /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP", + /* 191 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP", + /* 192 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over", + /* 193 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over", + /* 194 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over", + /* 195 */ "term ::= CTIME_KW", + /* 196 */ "expr ::= LP nexprlist COMMA expr RP", + /* 197 */ "expr ::= expr AND expr", + /* 198 */ "expr ::= expr OR expr", + /* 199 */ "expr ::= expr LT|GT|GE|LE expr", + /* 200 */ "expr ::= expr EQ|NE expr", + /* 201 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", + /* 202 */ "expr ::= expr PLUS|MINUS expr", + /* 203 */ "expr ::= expr STAR|SLASH|REM expr", + /* 204 */ "expr ::= expr CONCAT expr", + /* 205 */ "likeop ::= NOT LIKE_KW|MATCH", + /* 206 */ "expr ::= expr likeop expr", + /* 207 */ "expr ::= expr likeop expr ESCAPE expr", + /* 208 */ "expr ::= expr ISNULL|NOTNULL", + /* 209 */ "expr ::= expr NOT NULL", + /* 210 */ "expr ::= expr IS expr", + /* 211 */ "expr ::= expr IS NOT expr", + /* 212 */ "expr ::= expr IS NOT DISTINCT FROM expr", + /* 213 */ "expr ::= expr IS DISTINCT FROM expr", + /* 214 */ "expr ::= NOT expr", + /* 215 */ "expr ::= BITNOT expr", + /* 216 */ "expr ::= PLUS|MINUS expr", + /* 217 */ "expr ::= expr PTR expr", + /* 218 */ "between_op ::= BETWEEN", + /* 219 */ "between_op ::= NOT BETWEEN", + /* 220 */ "expr ::= expr between_op expr AND expr", + /* 221 */ "in_op ::= IN", + /* 222 */ "in_op ::= NOT IN", + /* 223 */ "expr ::= expr in_op LP exprlist RP", + /* 224 */ "expr ::= LP select RP", + /* 225 */ "expr ::= expr in_op LP select RP", + /* 226 */ "expr ::= expr in_op nm dbnm paren_exprlist", + /* 227 */ "expr ::= EXISTS LP select RP", + /* 228 */ "expr ::= CASE case_operand case_exprlist case_else END", + /* 229 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", + /* 230 */ "case_exprlist ::= WHEN expr THEN expr", + /* 231 */ "case_else ::= ELSE expr", + /* 232 */ "case_else ::=", + /* 233 */ "case_operand ::=", + /* 234 */ "exprlist ::=", + /* 235 */ "nexprlist ::= nexprlist COMMA expr", + /* 236 */ "nexprlist ::= expr", + /* 237 */ "paren_exprlist ::=", + /* 238 */ "paren_exprlist ::= LP exprlist RP", + /* 239 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", + /* 240 */ "uniqueflag ::= UNIQUE", + /* 241 */ "uniqueflag ::=", + /* 242 */ "eidlist_opt ::=", + /* 243 */ "eidlist_opt ::= LP eidlist RP", + /* 244 */ "eidlist ::= eidlist COMMA nm collate sortorder", + /* 245 */ "eidlist ::= nm collate sortorder", + /* 246 */ "collate ::=", + /* 247 */ "collate ::= COLLATE ID|STRING", + /* 248 */ "cmd ::= DROP INDEX ifexists fullname", + /* 249 */ "cmd ::= VACUUM vinto", + /* 250 */ "cmd ::= VACUUM nm vinto", + /* 251 */ "vinto ::= INTO expr", + /* 252 */ "vinto ::=", + /* 253 */ "cmd ::= PRAGMA nm dbnm", + /* 254 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", + /* 255 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", + /* 256 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", + /* 257 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", + /* 258 */ "plus_num ::= PLUS INTEGER|FLOAT", + /* 259 */ "minus_num ::= MINUS INTEGER|FLOAT", + /* 260 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", + /* 261 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", + /* 262 */ "trigger_time ::= BEFORE|AFTER", + /* 263 */ "trigger_time ::= INSTEAD OF", + /* 264 */ "trigger_time ::=", + /* 265 */ "trigger_event ::= DELETE|INSERT", + /* 266 */ "trigger_event ::= UPDATE", + /* 267 */ "trigger_event ::= UPDATE OF idlist", + /* 268 */ "when_clause ::=", + /* 269 */ "when_clause ::= WHEN expr", + /* 270 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", + /* 271 */ "trigger_cmd_list ::= trigger_cmd SEMI", + /* 272 */ "trnm ::= nm DOT nm", + /* 273 */ "tridxby ::= INDEXED BY nm", + /* 274 */ "tridxby ::= NOT INDEXED", + /* 275 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", + /* 276 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", + /* 277 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", + /* 278 */ "trigger_cmd ::= scanpt select scanpt", + /* 279 */ "expr ::= RAISE LP IGNORE RP", + /* 280 */ "expr ::= RAISE LP raisetype COMMA nm RP", + /* 281 */ "raisetype ::= ROLLBACK", + /* 282 */ "raisetype ::= ABORT", + /* 283 */ "raisetype ::= FAIL", + /* 284 */ "cmd ::= DROP TRIGGER ifexists fullname", + /* 285 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", + /* 286 */ "cmd ::= DETACH database_kw_opt expr", + /* 287 */ "key_opt ::=", + /* 288 */ "key_opt ::= KEY expr", + /* 289 */ "cmd ::= REINDEX", + /* 290 */ "cmd ::= REINDEX nm dbnm", + /* 291 */ "cmd ::= ANALYZE", + /* 292 */ "cmd ::= ANALYZE nm dbnm", + /* 293 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", + /* 294 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", + /* 295 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm", + /* 296 */ "add_column_fullname ::= fullname", + /* 297 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", + /* 298 */ "cmd ::= create_vtab", + /* 299 */ "cmd ::= create_vtab LP vtabarglist RP", + /* 300 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", + /* 301 */ "vtabarg ::=", + /* 302 */ "vtabargtoken ::= ANY", + /* 303 */ "vtabargtoken ::= lp anylist RP", + /* 304 */ "lp ::= LP", + /* 305 */ "with ::= WITH wqlist", + /* 306 */ "with ::= WITH RECURSIVE wqlist", + /* 307 */ "wqas ::= AS", + /* 308 */ "wqas ::= AS MATERIALIZED", + /* 309 */ "wqas ::= AS NOT MATERIALIZED", + /* 310 */ "wqitem ::= withnm eidlist_opt wqas LP select RP", + /* 311 */ "withnm ::= nm", + /* 312 */ "wqlist ::= wqitem", + /* 313 */ "wqlist ::= wqlist COMMA wqitem", + /* 314 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", + /* 315 */ "windowdefn ::= nm AS LP window RP", + /* 316 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", + /* 317 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", + /* 318 */ "window ::= ORDER BY sortlist frame_opt", + /* 319 */ "window ::= nm ORDER BY sortlist frame_opt", + /* 320 */ "window ::= nm frame_opt", + /* 321 */ "frame_opt ::=", + /* 322 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", + /* 323 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", + /* 324 */ "range_or_rows ::= RANGE|ROWS|GROUPS", + /* 325 */ "frame_bound_s ::= frame_bound", + /* 326 */ "frame_bound_s ::= UNBOUNDED PRECEDING", + /* 327 */ "frame_bound_e ::= frame_bound", + /* 328 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", + /* 329 */ "frame_bound ::= expr PRECEDING|FOLLOWING", + /* 330 */ "frame_bound ::= CURRENT ROW", + /* 331 */ "frame_exclude_opt ::=", + /* 332 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", + /* 333 */ "frame_exclude ::= NO OTHERS", + /* 334 */ "frame_exclude ::= CURRENT ROW", + /* 335 */ "frame_exclude ::= GROUP|TIES", + /* 336 */ "window_clause ::= WINDOW windowdefn_list", + /* 337 */ "filter_over ::= filter_clause over_clause", + /* 338 */ "filter_over ::= over_clause", + /* 339 */ "filter_over ::= filter_clause", + /* 340 */ "over_clause ::= OVER LP window RP", + /* 341 */ "over_clause ::= OVER nm", + /* 342 */ "filter_clause ::= FILTER LP WHERE expr RP", + /* 343 */ "term ::= QNUMBER", + /* 344 */ "input ::= cmdlist", + /* 345 */ "cmdlist ::= cmdlist ecmd", + /* 346 */ "cmdlist ::= ecmd", + /* 347 */ "ecmd ::= SEMI", + /* 348 */ "ecmd ::= cmdx SEMI", + /* 349 */ "ecmd ::= explain cmdx SEMI", + /* 350 */ "trans_opt ::=", + /* 351 */ "trans_opt ::= TRANSACTION", + /* 352 */ "trans_opt ::= TRANSACTION nm", + /* 353 */ "savepoint_opt ::= SAVEPOINT", + /* 354 */ "savepoint_opt ::=", + /* 355 */ "cmd ::= create_table create_table_args", + /* 356 */ "table_option_set ::= table_option", + /* 357 */ "columnlist ::= columnlist COMMA columnname carglist", + /* 358 */ "columnlist ::= columnname carglist", + /* 359 */ "nm ::= ID|INDEXED|JOIN_KW", + /* 360 */ "nm ::= STRING", + /* 361 */ "typetoken ::= typename", + /* 362 */ "typename ::= ID|STRING", + /* 363 */ "signed ::= plus_num", + /* 364 */ "signed ::= minus_num", + /* 365 */ "carglist ::= carglist ccons", + /* 366 */ "carglist ::=", + /* 367 */ "ccons ::= NULL onconf", + /* 368 */ "ccons ::= GENERATED ALWAYS AS generated", + /* 369 */ "ccons ::= AS generated", + /* 370 */ "conslist_opt ::= COMMA conslist", + /* 371 */ "conslist ::= conslist tconscomma tcons", + /* 372 */ "conslist ::= tcons", + /* 373 */ "tconscomma ::=", + /* 374 */ "defer_subclause_opt ::= defer_subclause", + /* 375 */ "resolvetype ::= raisetype", + /* 376 */ "selectnowith ::= oneselect", + /* 377 */ "oneselect ::= values", + /* 378 */ "sclp ::= selcollist COMMA", + /* 379 */ "as ::= ID|STRING", + /* 380 */ "indexed_opt ::= indexed_by", + /* 381 */ "returning ::=", + /* 382 */ "expr ::= term", + /* 383 */ "likeop ::= LIKE_KW|MATCH", + /* 384 */ "case_operand ::= expr", + /* 385 */ "exprlist ::= nexprlist", + /* 386 */ "nmnum ::= plus_num", + /* 387 */ "nmnum ::= nm", + /* 388 */ "nmnum ::= ON", + /* 389 */ "nmnum ::= DELETE", + /* 390 */ "nmnum ::= DEFAULT", + /* 391 */ "plus_num ::= INTEGER|FLOAT", + /* 392 */ "foreach_clause ::=", + /* 393 */ "foreach_clause ::= FOR EACH ROW", + /* 394 */ "trnm ::= nm", + /* 395 */ "tridxby ::=", + /* 396 */ "database_kw_opt ::= DATABASE", + /* 397 */ "database_kw_opt ::=", + /* 398 */ "kwcolumn_opt ::=", + /* 399 */ "kwcolumn_opt ::= COLUMNKW", + /* 400 */ "vtabarglist ::= vtabarg", + /* 401 */ "vtabarglist ::= vtabarglist COMMA vtabarg", + /* 402 */ "vtabarg ::= vtabarg vtabargtoken", + /* 403 */ "anylist ::=", + /* 404 */ "anylist ::= anylist LP anylist RP", + /* 405 */ "anylist ::= anylist ANY", + /* 406 */ "with ::=", + /* 407 */ "windowdefn_list ::= windowdefn", + /* 408 */ "window ::= frame_opt", }; #endif /* NDEBUG */ -#if YYSTACKDEPTH<=0 +#if YYGROWABLESTACK /* ** Try to increase the size of the parser stack. Return the number ** of errors. Return 0 on success. */ static int yyGrowStack(yyParser *p){ + int oldSize = 1 + (int)(p->yystackEnd - p->yystack); int newSize; int idx; yyStackEntry *pNew; - newSize = p->yystksz*2 + 100; - idx = p->yytos ? (int)(p->yytos - p->yystack) : 0; - if( p->yystack==&p->yystk0 ){ - pNew = malloc(newSize*sizeof(pNew[0])); - if( pNew ) pNew[0] = p->yystk0; + newSize = oldSize*2 + 100; + idx = (int)(p->yytos - p->yystack); + if( p->yystack==p->yystk0 ){ + pNew = YYREALLOC(0, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; + memcpy(pNew, p->yystack, oldSize*sizeof(pNew[0])); }else{ - pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); + pNew = YYREALLOC(p->yystack, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; } - if( pNew ){ - p->yystack = pNew; - p->yytos = &p->yystack[idx]; + p->yystack = pNew; + p->yytos = &p->yystack[idx]; #ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", - yyTracePrompt, p->yystksz, newSize); - } -#endif - p->yystksz = newSize; + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", + yyTracePrompt, oldSize, newSize); } - return pNew==0; +#endif + p->yystackEnd = &p->yystack[newSize-1]; + return 0; } +#endif /* YYGROWABLESTACK */ + +#if !YYGROWABLESTACK +/* For builds that do no have a growable stack, yyGrowStack always +** returns an error. +*/ +# define yyGrowStack(X) 1 #endif /* Datatype of the argument to the memory allocated passed as the @@ -173421,24 +174797,14 @@ SQLITE_PRIVATE void sqlite3ParserInit(void *yypRawParser sqlite3ParserCTX_PDECL) #ifdef YYTRACKMAXSTACKDEPTH yypParser->yyhwm = 0; #endif -#if YYSTACKDEPTH<=0 - yypParser->yytos = NULL; - yypParser->yystack = NULL; - yypParser->yystksz = 0; - if( yyGrowStack(yypParser) ){ - yypParser->yystack = &yypParser->yystk0; - yypParser->yystksz = 1; - } -#endif + yypParser->yystack = yypParser->yystk0; + yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; #ifndef YYNOERRORRECOVERY yypParser->yyerrcnt = -1; #endif yypParser->yytos = yypParser->yystack; yypParser->yystack[0].stateno = 0; yypParser->yystack[0].major = 0; -#if YYSTACKDEPTH>0 - yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; -#endif } #ifndef sqlite3Parser_ENGINEALWAYSONSTACK @@ -173492,97 +174858,98 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 204: /* select */ - case 239: /* selectnowith */ - case 240: /* oneselect */ - case 252: /* values */ + case 205: /* select */ + case 240: /* selectnowith */ + case 241: /* oneselect */ + case 253: /* values */ + case 255: /* mvalues */ { -sqlite3SelectDelete(pParse->db, (yypminor->yy47)); -} - break; - case 216: /* term */ - case 217: /* expr */ - case 246: /* where_opt */ - case 248: /* having_opt */ - case 267: /* where_opt_ret */ - case 278: /* case_operand */ - case 280: /* case_else */ - case 283: /* vinto */ - case 290: /* when_clause */ - case 295: /* key_opt */ - case 311: /* filter_clause */ +sqlite3SelectDelete(pParse->db, (yypminor->yy555)); +} + break; + case 217: /* term */ + case 218: /* expr */ + case 247: /* where_opt */ + case 249: /* having_opt */ + case 269: /* where_opt_ret */ + case 280: /* case_operand */ + case 282: /* case_else */ + case 285: /* vinto */ + case 292: /* when_clause */ + case 297: /* key_opt */ + case 314: /* filter_clause */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy528)); -} - break; - case 221: /* eidlist_opt */ - case 231: /* sortlist */ - case 232: /* eidlist */ - case 244: /* selcollist */ - case 247: /* groupby_opt */ - case 249: /* orderby_opt */ - case 253: /* nexprlist */ - case 254: /* sclp */ - case 261: /* exprlist */ - case 268: /* setlist */ - case 277: /* paren_exprlist */ - case 279: /* case_exprlist */ - case 310: /* part_opt */ +sqlite3ExprDelete(pParse->db, (yypminor->yy454)); +} + break; + case 222: /* eidlist_opt */ + case 232: /* sortlist */ + case 233: /* eidlist */ + case 245: /* selcollist */ + case 248: /* groupby_opt */ + case 250: /* orderby_opt */ + case 254: /* nexprlist */ + case 256: /* sclp */ + case 263: /* exprlist */ + case 270: /* setlist */ + case 279: /* paren_exprlist */ + case 281: /* case_exprlist */ + case 313: /* part_opt */ { -sqlite3ExprListDelete(pParse->db, (yypminor->yy322)); +sqlite3ExprListDelete(pParse->db, (yypminor->yy14)); } break; - case 238: /* fullname */ - case 245: /* from */ - case 256: /* seltablist */ - case 257: /* stl_prefix */ - case 262: /* xfullname */ + case 239: /* fullname */ + case 246: /* from */ + case 258: /* seltablist */ + case 259: /* stl_prefix */ + case 264: /* xfullname */ { -sqlite3SrcListDelete(pParse->db, (yypminor->yy131)); +sqlite3SrcListDelete(pParse->db, (yypminor->yy203)); } break; - case 241: /* wqlist */ + case 242: /* wqlist */ { -sqlite3WithDelete(pParse->db, (yypminor->yy521)); +sqlite3WithDelete(pParse->db, (yypminor->yy59)); } break; - case 251: /* window_clause */ - case 306: /* windowdefn_list */ + case 252: /* window_clause */ + case 309: /* windowdefn_list */ { -sqlite3WindowListDelete(pParse->db, (yypminor->yy41)); +sqlite3WindowListDelete(pParse->db, (yypminor->yy211)); } break; - case 263: /* idlist */ - case 270: /* idlist_opt */ + case 265: /* idlist */ + case 272: /* idlist_opt */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy254)); +sqlite3IdListDelete(pParse->db, (yypminor->yy132)); } break; - case 273: /* filter_over */ - case 307: /* windowdefn */ - case 308: /* window */ - case 309: /* frame_opt */ - case 312: /* over_clause */ + case 275: /* filter_over */ + case 310: /* windowdefn */ + case 311: /* window */ + case 312: /* frame_opt */ + case 315: /* over_clause */ { -sqlite3WindowDelete(pParse->db, (yypminor->yy41)); +sqlite3WindowDelete(pParse->db, (yypminor->yy211)); } break; - case 286: /* trigger_cmd_list */ - case 291: /* trigger_cmd */ + case 288: /* trigger_cmd_list */ + case 293: /* trigger_cmd */ { -sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy33)); +sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy427)); } break; - case 288: /* trigger_event */ + case 290: /* trigger_event */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy180).b); +sqlite3IdListDelete(pParse->db, (yypminor->yy286).b); } break; - case 314: /* frame_bound */ - case 315: /* frame_bound_s */ - case 316: /* frame_bound_e */ + case 317: /* frame_bound */ + case 318: /* frame_bound_s */ + case 319: /* frame_bound_e */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy595).pExpr); +sqlite3ExprDelete(pParse->db, (yypminor->yy509).pExpr); } break; /********* End destructor definitions *****************************************/ @@ -173616,9 +174983,26 @@ static void yy_pop_parser_stack(yyParser *pParser){ */ SQLITE_PRIVATE void sqlite3ParserFinalize(void *p){ yyParser *pParser = (yyParser*)p; - while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser); -#if YYSTACKDEPTH<=0 - if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack); + + /* In-lined version of calling yy_pop_parser_stack() for each + ** element left in the stack */ + yyStackEntry *yytos = pParser->yytos; + while( yytos>pParser->yystack ){ +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sPopping %s\n", + yyTracePrompt, + yyTokenName[yytos->major]); + } +#endif + if( yytos->major>=YY_MIN_DSTRCTR ){ + yy_destructor(pParser, yytos->major, &yytos->minor); + } + yytos--; + } + +#if YYGROWABLESTACK + if( pParser->yystack!=pParser->yystk0 ) YYFREE(pParser->yystack); #endif } @@ -173801,7 +175185,7 @@ static void yyStackOverflow(yyParser *yypParser){ ** stack every overflows */ /******** Begin %stack_overflow code ******************************************/ - sqlite3ErrorMsg(pParse, "parser stack overflow"); + sqlite3OomFault(pParse->db); /******** End %stack_overflow code ********************************************/ sqlite3ParserARG_STORE /* Suppress warning about unused %extra_argument var */ sqlite3ParserCTX_STORE @@ -173845,25 +175229,19 @@ static void yy_shift( assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) ); } #endif -#if YYSTACKDEPTH>0 - if( yypParser->yytos>yypParser->yystackEnd ){ - yypParser->yytos--; - yyStackOverflow(yypParser); - return; - } -#else - if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){ + yytos = yypParser->yytos; + if( yytos>yypParser->yystackEnd ){ if( yyGrowStack(yypParser) ){ yypParser->yytos--; yyStackOverflow(yypParser); return; } + yytos = yypParser->yytos; + assert( yytos <= yypParser->yystackEnd ); } -#endif if( yyNewState > YY_MAX_SHIFT ){ yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; } - yytos = yypParser->yytos; yytos->stateno = yyNewState; yytos->major = yyMajor; yytos->minor.yy0 = yyMinor; @@ -173873,411 +175251,415 @@ static void yy_shift( /* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side ** of that rule */ static const YYCODETYPE yyRuleInfoLhs[] = { - 189, /* (0) explain ::= EXPLAIN */ - 189, /* (1) explain ::= EXPLAIN QUERY PLAN */ - 188, /* (2) cmdx ::= cmd */ - 190, /* (3) cmd ::= BEGIN transtype trans_opt */ - 191, /* (4) transtype ::= */ - 191, /* (5) transtype ::= DEFERRED */ - 191, /* (6) transtype ::= IMMEDIATE */ - 191, /* (7) transtype ::= EXCLUSIVE */ - 190, /* (8) cmd ::= COMMIT|END trans_opt */ - 190, /* (9) cmd ::= ROLLBACK trans_opt */ - 190, /* (10) cmd ::= SAVEPOINT nm */ - 190, /* (11) cmd ::= RELEASE savepoint_opt nm */ - 190, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ - 195, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ - 197, /* (14) createkw ::= CREATE */ - 199, /* (15) ifnotexists ::= */ - 199, /* (16) ifnotexists ::= IF NOT EXISTS */ - 198, /* (17) temp ::= TEMP */ - 198, /* (18) temp ::= */ - 196, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ - 196, /* (20) create_table_args ::= AS select */ - 203, /* (21) table_option_set ::= */ - 203, /* (22) table_option_set ::= table_option_set COMMA table_option */ - 205, /* (23) table_option ::= WITHOUT nm */ - 205, /* (24) table_option ::= nm */ - 206, /* (25) columnname ::= nm typetoken */ - 208, /* (26) typetoken ::= */ - 208, /* (27) typetoken ::= typename LP signed RP */ - 208, /* (28) typetoken ::= typename LP signed COMMA signed RP */ - 209, /* (29) typename ::= typename ID|STRING */ - 213, /* (30) scanpt ::= */ - 214, /* (31) scantok ::= */ - 215, /* (32) ccons ::= CONSTRAINT nm */ - 215, /* (33) ccons ::= DEFAULT scantok term */ - 215, /* (34) ccons ::= DEFAULT LP expr RP */ - 215, /* (35) ccons ::= DEFAULT PLUS scantok term */ - 215, /* (36) ccons ::= DEFAULT MINUS scantok term */ - 215, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ - 215, /* (38) ccons ::= NOT NULL onconf */ - 215, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ - 215, /* (40) ccons ::= UNIQUE onconf */ - 215, /* (41) ccons ::= CHECK LP expr RP */ - 215, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ - 215, /* (43) ccons ::= defer_subclause */ - 215, /* (44) ccons ::= COLLATE ID|STRING */ - 224, /* (45) generated ::= LP expr RP */ - 224, /* (46) generated ::= LP expr RP ID */ - 220, /* (47) autoinc ::= */ - 220, /* (48) autoinc ::= AUTOINCR */ - 222, /* (49) refargs ::= */ - 222, /* (50) refargs ::= refargs refarg */ - 225, /* (51) refarg ::= MATCH nm */ - 225, /* (52) refarg ::= ON INSERT refact */ - 225, /* (53) refarg ::= ON DELETE refact */ - 225, /* (54) refarg ::= ON UPDATE refact */ - 226, /* (55) refact ::= SET NULL */ - 226, /* (56) refact ::= SET DEFAULT */ - 226, /* (57) refact ::= CASCADE */ - 226, /* (58) refact ::= RESTRICT */ - 226, /* (59) refact ::= NO ACTION */ - 223, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ - 223, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ - 227, /* (62) init_deferred_pred_opt ::= */ - 227, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ - 227, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ - 202, /* (65) conslist_opt ::= */ - 229, /* (66) tconscomma ::= COMMA */ - 230, /* (67) tcons ::= CONSTRAINT nm */ - 230, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ - 230, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ - 230, /* (70) tcons ::= CHECK LP expr RP onconf */ - 230, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ - 233, /* (72) defer_subclause_opt ::= */ - 218, /* (73) onconf ::= */ - 218, /* (74) onconf ::= ON CONFLICT resolvetype */ - 234, /* (75) orconf ::= */ - 234, /* (76) orconf ::= OR resolvetype */ - 235, /* (77) resolvetype ::= IGNORE */ - 235, /* (78) resolvetype ::= REPLACE */ - 190, /* (79) cmd ::= DROP TABLE ifexists fullname */ - 237, /* (80) ifexists ::= IF EXISTS */ - 237, /* (81) ifexists ::= */ - 190, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ - 190, /* (83) cmd ::= DROP VIEW ifexists fullname */ - 190, /* (84) cmd ::= select */ - 204, /* (85) select ::= WITH wqlist selectnowith */ - 204, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ - 204, /* (87) select ::= selectnowith */ - 239, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ - 242, /* (89) multiselect_op ::= UNION */ - 242, /* (90) multiselect_op ::= UNION ALL */ - 242, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ - 240, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ - 240, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ - 252, /* (94) values ::= VALUES LP nexprlist RP */ - 252, /* (95) values ::= values COMMA LP nexprlist RP */ - 243, /* (96) distinct ::= DISTINCT */ - 243, /* (97) distinct ::= ALL */ - 243, /* (98) distinct ::= */ - 254, /* (99) sclp ::= */ - 244, /* (100) selcollist ::= sclp scanpt expr scanpt as */ - 244, /* (101) selcollist ::= sclp scanpt STAR */ - 244, /* (102) selcollist ::= sclp scanpt nm DOT STAR */ - 255, /* (103) as ::= AS nm */ - 255, /* (104) as ::= */ - 245, /* (105) from ::= */ - 245, /* (106) from ::= FROM seltablist */ - 257, /* (107) stl_prefix ::= seltablist joinop */ - 257, /* (108) stl_prefix ::= */ - 256, /* (109) seltablist ::= stl_prefix nm dbnm as on_using */ - 256, /* (110) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ - 256, /* (111) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ - 256, /* (112) seltablist ::= stl_prefix LP select RP as on_using */ - 256, /* (113) seltablist ::= stl_prefix LP seltablist RP as on_using */ - 200, /* (114) dbnm ::= */ - 200, /* (115) dbnm ::= DOT nm */ - 238, /* (116) fullname ::= nm */ - 238, /* (117) fullname ::= nm DOT nm */ - 262, /* (118) xfullname ::= nm */ - 262, /* (119) xfullname ::= nm DOT nm */ - 262, /* (120) xfullname ::= nm DOT nm AS nm */ - 262, /* (121) xfullname ::= nm AS nm */ - 258, /* (122) joinop ::= COMMA|JOIN */ - 258, /* (123) joinop ::= JOIN_KW JOIN */ - 258, /* (124) joinop ::= JOIN_KW nm JOIN */ - 258, /* (125) joinop ::= JOIN_KW nm nm JOIN */ - 259, /* (126) on_using ::= ON expr */ - 259, /* (127) on_using ::= USING LP idlist RP */ - 259, /* (128) on_using ::= */ - 264, /* (129) indexed_opt ::= */ - 260, /* (130) indexed_by ::= INDEXED BY nm */ - 260, /* (131) indexed_by ::= NOT INDEXED */ - 249, /* (132) orderby_opt ::= */ - 249, /* (133) orderby_opt ::= ORDER BY sortlist */ - 231, /* (134) sortlist ::= sortlist COMMA expr sortorder nulls */ - 231, /* (135) sortlist ::= expr sortorder nulls */ - 219, /* (136) sortorder ::= ASC */ - 219, /* (137) sortorder ::= DESC */ - 219, /* (138) sortorder ::= */ - 265, /* (139) nulls ::= NULLS FIRST */ - 265, /* (140) nulls ::= NULLS LAST */ - 265, /* (141) nulls ::= */ - 247, /* (142) groupby_opt ::= */ - 247, /* (143) groupby_opt ::= GROUP BY nexprlist */ - 248, /* (144) having_opt ::= */ - 248, /* (145) having_opt ::= HAVING expr */ - 250, /* (146) limit_opt ::= */ - 250, /* (147) limit_opt ::= LIMIT expr */ - 250, /* (148) limit_opt ::= LIMIT expr OFFSET expr */ - 250, /* (149) limit_opt ::= LIMIT expr COMMA expr */ - 190, /* (150) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ - 246, /* (151) where_opt ::= */ - 246, /* (152) where_opt ::= WHERE expr */ - 267, /* (153) where_opt_ret ::= */ - 267, /* (154) where_opt_ret ::= WHERE expr */ - 267, /* (155) where_opt_ret ::= RETURNING selcollist */ - 267, /* (156) where_opt_ret ::= WHERE expr RETURNING selcollist */ - 190, /* (157) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ - 268, /* (158) setlist ::= setlist COMMA nm EQ expr */ - 268, /* (159) setlist ::= setlist COMMA LP idlist RP EQ expr */ - 268, /* (160) setlist ::= nm EQ expr */ - 268, /* (161) setlist ::= LP idlist RP EQ expr */ - 190, /* (162) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - 190, /* (163) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ - 271, /* (164) upsert ::= */ - 271, /* (165) upsert ::= RETURNING selcollist */ - 271, /* (166) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ - 271, /* (167) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ - 271, /* (168) upsert ::= ON CONFLICT DO NOTHING returning */ - 271, /* (169) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ - 272, /* (170) returning ::= RETURNING selcollist */ - 269, /* (171) insert_cmd ::= INSERT orconf */ - 269, /* (172) insert_cmd ::= REPLACE */ - 270, /* (173) idlist_opt ::= */ - 270, /* (174) idlist_opt ::= LP idlist RP */ - 263, /* (175) idlist ::= idlist COMMA nm */ - 263, /* (176) idlist ::= nm */ - 217, /* (177) expr ::= LP expr RP */ - 217, /* (178) expr ::= ID|INDEXED|JOIN_KW */ - 217, /* (179) expr ::= nm DOT nm */ - 217, /* (180) expr ::= nm DOT nm DOT nm */ - 216, /* (181) term ::= NULL|FLOAT|BLOB */ - 216, /* (182) term ::= STRING */ - 216, /* (183) term ::= INTEGER */ - 217, /* (184) expr ::= VARIABLE */ - 217, /* (185) expr ::= expr COLLATE ID|STRING */ - 217, /* (186) expr ::= CAST LP expr AS typetoken RP */ - 217, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ - 217, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ - 217, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ - 217, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ - 217, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ - 217, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ - 216, /* (193) term ::= CTIME_KW */ - 217, /* (194) expr ::= LP nexprlist COMMA expr RP */ - 217, /* (195) expr ::= expr AND expr */ - 217, /* (196) expr ::= expr OR expr */ - 217, /* (197) expr ::= expr LT|GT|GE|LE expr */ - 217, /* (198) expr ::= expr EQ|NE expr */ - 217, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - 217, /* (200) expr ::= expr PLUS|MINUS expr */ - 217, /* (201) expr ::= expr STAR|SLASH|REM expr */ - 217, /* (202) expr ::= expr CONCAT expr */ - 274, /* (203) likeop ::= NOT LIKE_KW|MATCH */ - 217, /* (204) expr ::= expr likeop expr */ - 217, /* (205) expr ::= expr likeop expr ESCAPE expr */ - 217, /* (206) expr ::= expr ISNULL|NOTNULL */ - 217, /* (207) expr ::= expr NOT NULL */ - 217, /* (208) expr ::= expr IS expr */ - 217, /* (209) expr ::= expr IS NOT expr */ - 217, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */ - 217, /* (211) expr ::= expr IS DISTINCT FROM expr */ - 217, /* (212) expr ::= NOT expr */ - 217, /* (213) expr ::= BITNOT expr */ - 217, /* (214) expr ::= PLUS|MINUS expr */ - 217, /* (215) expr ::= expr PTR expr */ - 275, /* (216) between_op ::= BETWEEN */ - 275, /* (217) between_op ::= NOT BETWEEN */ - 217, /* (218) expr ::= expr between_op expr AND expr */ - 276, /* (219) in_op ::= IN */ - 276, /* (220) in_op ::= NOT IN */ - 217, /* (221) expr ::= expr in_op LP exprlist RP */ - 217, /* (222) expr ::= LP select RP */ - 217, /* (223) expr ::= expr in_op LP select RP */ - 217, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */ - 217, /* (225) expr ::= EXISTS LP select RP */ - 217, /* (226) expr ::= CASE case_operand case_exprlist case_else END */ - 279, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - 279, /* (228) case_exprlist ::= WHEN expr THEN expr */ - 280, /* (229) case_else ::= ELSE expr */ - 280, /* (230) case_else ::= */ - 278, /* (231) case_operand ::= */ - 261, /* (232) exprlist ::= */ - 253, /* (233) nexprlist ::= nexprlist COMMA expr */ - 253, /* (234) nexprlist ::= expr */ - 277, /* (235) paren_exprlist ::= */ - 277, /* (236) paren_exprlist ::= LP exprlist RP */ - 190, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - 281, /* (238) uniqueflag ::= UNIQUE */ - 281, /* (239) uniqueflag ::= */ - 221, /* (240) eidlist_opt ::= */ - 221, /* (241) eidlist_opt ::= LP eidlist RP */ - 232, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */ - 232, /* (243) eidlist ::= nm collate sortorder */ - 282, /* (244) collate ::= */ - 282, /* (245) collate ::= COLLATE ID|STRING */ - 190, /* (246) cmd ::= DROP INDEX ifexists fullname */ - 190, /* (247) cmd ::= VACUUM vinto */ - 190, /* (248) cmd ::= VACUUM nm vinto */ - 283, /* (249) vinto ::= INTO expr */ - 283, /* (250) vinto ::= */ - 190, /* (251) cmd ::= PRAGMA nm dbnm */ - 190, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */ - 190, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - 190, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */ - 190, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - 211, /* (256) plus_num ::= PLUS INTEGER|FLOAT */ - 212, /* (257) minus_num ::= MINUS INTEGER|FLOAT */ - 190, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - 285, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - 287, /* (260) trigger_time ::= BEFORE|AFTER */ - 287, /* (261) trigger_time ::= INSTEAD OF */ - 287, /* (262) trigger_time ::= */ - 288, /* (263) trigger_event ::= DELETE|INSERT */ - 288, /* (264) trigger_event ::= UPDATE */ - 288, /* (265) trigger_event ::= UPDATE OF idlist */ - 290, /* (266) when_clause ::= */ - 290, /* (267) when_clause ::= WHEN expr */ - 286, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - 286, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */ - 292, /* (270) trnm ::= nm DOT nm */ - 293, /* (271) tridxby ::= INDEXED BY nm */ - 293, /* (272) tridxby ::= NOT INDEXED */ - 291, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - 291, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - 291, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - 291, /* (276) trigger_cmd ::= scanpt select scanpt */ - 217, /* (277) expr ::= RAISE LP IGNORE RP */ - 217, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */ - 236, /* (279) raisetype ::= ROLLBACK */ - 236, /* (280) raisetype ::= ABORT */ - 236, /* (281) raisetype ::= FAIL */ - 190, /* (282) cmd ::= DROP TRIGGER ifexists fullname */ - 190, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - 190, /* (284) cmd ::= DETACH database_kw_opt expr */ - 295, /* (285) key_opt ::= */ - 295, /* (286) key_opt ::= KEY expr */ - 190, /* (287) cmd ::= REINDEX */ - 190, /* (288) cmd ::= REINDEX nm dbnm */ - 190, /* (289) cmd ::= ANALYZE */ - 190, /* (290) cmd ::= ANALYZE nm dbnm */ - 190, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */ - 190, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - 190, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - 296, /* (294) add_column_fullname ::= fullname */ - 190, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - 190, /* (296) cmd ::= create_vtab */ - 190, /* (297) cmd ::= create_vtab LP vtabarglist RP */ - 298, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 300, /* (299) vtabarg ::= */ - 301, /* (300) vtabargtoken ::= ANY */ - 301, /* (301) vtabargtoken ::= lp anylist RP */ - 302, /* (302) lp ::= LP */ - 266, /* (303) with ::= WITH wqlist */ - 266, /* (304) with ::= WITH RECURSIVE wqlist */ - 305, /* (305) wqas ::= AS */ - 305, /* (306) wqas ::= AS MATERIALIZED */ - 305, /* (307) wqas ::= AS NOT MATERIALIZED */ - 304, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */ - 241, /* (309) wqlist ::= wqitem */ - 241, /* (310) wqlist ::= wqlist COMMA wqitem */ - 306, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - 307, /* (312) windowdefn ::= nm AS LP window RP */ - 308, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - 308, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - 308, /* (315) window ::= ORDER BY sortlist frame_opt */ - 308, /* (316) window ::= nm ORDER BY sortlist frame_opt */ - 308, /* (317) window ::= nm frame_opt */ - 309, /* (318) frame_opt ::= */ - 309, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - 309, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - 313, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ - 315, /* (322) frame_bound_s ::= frame_bound */ - 315, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ - 316, /* (324) frame_bound_e ::= frame_bound */ - 316, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ - 314, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ - 314, /* (327) frame_bound ::= CURRENT ROW */ - 317, /* (328) frame_exclude_opt ::= */ - 317, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ - 318, /* (330) frame_exclude ::= NO OTHERS */ - 318, /* (331) frame_exclude ::= CURRENT ROW */ - 318, /* (332) frame_exclude ::= GROUP|TIES */ - 251, /* (333) window_clause ::= WINDOW windowdefn_list */ - 273, /* (334) filter_over ::= filter_clause over_clause */ - 273, /* (335) filter_over ::= over_clause */ - 273, /* (336) filter_over ::= filter_clause */ - 312, /* (337) over_clause ::= OVER LP window RP */ - 312, /* (338) over_clause ::= OVER nm */ - 311, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ - 185, /* (340) input ::= cmdlist */ - 186, /* (341) cmdlist ::= cmdlist ecmd */ - 186, /* (342) cmdlist ::= ecmd */ - 187, /* (343) ecmd ::= SEMI */ - 187, /* (344) ecmd ::= cmdx SEMI */ - 187, /* (345) ecmd ::= explain cmdx SEMI */ - 192, /* (346) trans_opt ::= */ - 192, /* (347) trans_opt ::= TRANSACTION */ - 192, /* (348) trans_opt ::= TRANSACTION nm */ - 194, /* (349) savepoint_opt ::= SAVEPOINT */ - 194, /* (350) savepoint_opt ::= */ - 190, /* (351) cmd ::= create_table create_table_args */ - 203, /* (352) table_option_set ::= table_option */ - 201, /* (353) columnlist ::= columnlist COMMA columnname carglist */ - 201, /* (354) columnlist ::= columnname carglist */ - 193, /* (355) nm ::= ID|INDEXED|JOIN_KW */ - 193, /* (356) nm ::= STRING */ - 208, /* (357) typetoken ::= typename */ - 209, /* (358) typename ::= ID|STRING */ - 210, /* (359) signed ::= plus_num */ - 210, /* (360) signed ::= minus_num */ - 207, /* (361) carglist ::= carglist ccons */ - 207, /* (362) carglist ::= */ - 215, /* (363) ccons ::= NULL onconf */ - 215, /* (364) ccons ::= GENERATED ALWAYS AS generated */ - 215, /* (365) ccons ::= AS generated */ - 202, /* (366) conslist_opt ::= COMMA conslist */ - 228, /* (367) conslist ::= conslist tconscomma tcons */ - 228, /* (368) conslist ::= tcons */ - 229, /* (369) tconscomma ::= */ - 233, /* (370) defer_subclause_opt ::= defer_subclause */ - 235, /* (371) resolvetype ::= raisetype */ - 239, /* (372) selectnowith ::= oneselect */ - 240, /* (373) oneselect ::= values */ - 254, /* (374) sclp ::= selcollist COMMA */ - 255, /* (375) as ::= ID|STRING */ - 264, /* (376) indexed_opt ::= indexed_by */ - 272, /* (377) returning ::= */ - 217, /* (378) expr ::= term */ - 274, /* (379) likeop ::= LIKE_KW|MATCH */ - 278, /* (380) case_operand ::= expr */ - 261, /* (381) exprlist ::= nexprlist */ - 284, /* (382) nmnum ::= plus_num */ - 284, /* (383) nmnum ::= nm */ - 284, /* (384) nmnum ::= ON */ - 284, /* (385) nmnum ::= DELETE */ - 284, /* (386) nmnum ::= DEFAULT */ - 211, /* (387) plus_num ::= INTEGER|FLOAT */ - 289, /* (388) foreach_clause ::= */ - 289, /* (389) foreach_clause ::= FOR EACH ROW */ - 292, /* (390) trnm ::= nm */ - 293, /* (391) tridxby ::= */ - 294, /* (392) database_kw_opt ::= DATABASE */ - 294, /* (393) database_kw_opt ::= */ - 297, /* (394) kwcolumn_opt ::= */ - 297, /* (395) kwcolumn_opt ::= COLUMNKW */ - 299, /* (396) vtabarglist ::= vtabarg */ - 299, /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ - 300, /* (398) vtabarg ::= vtabarg vtabargtoken */ - 303, /* (399) anylist ::= */ - 303, /* (400) anylist ::= anylist LP anylist RP */ - 303, /* (401) anylist ::= anylist ANY */ - 266, /* (402) with ::= */ - 306, /* (403) windowdefn_list ::= windowdefn */ - 308, /* (404) window ::= frame_opt */ + 190, /* (0) explain ::= EXPLAIN */ + 190, /* (1) explain ::= EXPLAIN QUERY PLAN */ + 189, /* (2) cmdx ::= cmd */ + 191, /* (3) cmd ::= BEGIN transtype trans_opt */ + 192, /* (4) transtype ::= */ + 192, /* (5) transtype ::= DEFERRED */ + 192, /* (6) transtype ::= IMMEDIATE */ + 192, /* (7) transtype ::= EXCLUSIVE */ + 191, /* (8) cmd ::= COMMIT|END trans_opt */ + 191, /* (9) cmd ::= ROLLBACK trans_opt */ + 191, /* (10) cmd ::= SAVEPOINT nm */ + 191, /* (11) cmd ::= RELEASE savepoint_opt nm */ + 191, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ + 196, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ + 198, /* (14) createkw ::= CREATE */ + 200, /* (15) ifnotexists ::= */ + 200, /* (16) ifnotexists ::= IF NOT EXISTS */ + 199, /* (17) temp ::= TEMP */ + 199, /* (18) temp ::= */ + 197, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_option_set */ + 197, /* (20) create_table_args ::= AS select */ + 204, /* (21) table_option_set ::= */ + 204, /* (22) table_option_set ::= table_option_set COMMA table_option */ + 206, /* (23) table_option ::= WITHOUT nm */ + 206, /* (24) table_option ::= nm */ + 207, /* (25) columnname ::= nm typetoken */ + 209, /* (26) typetoken ::= */ + 209, /* (27) typetoken ::= typename LP signed RP */ + 209, /* (28) typetoken ::= typename LP signed COMMA signed RP */ + 210, /* (29) typename ::= typename ID|STRING */ + 214, /* (30) scanpt ::= */ + 215, /* (31) scantok ::= */ + 216, /* (32) ccons ::= CONSTRAINT nm */ + 216, /* (33) ccons ::= DEFAULT scantok term */ + 216, /* (34) ccons ::= DEFAULT LP expr RP */ + 216, /* (35) ccons ::= DEFAULT PLUS scantok term */ + 216, /* (36) ccons ::= DEFAULT MINUS scantok term */ + 216, /* (37) ccons ::= DEFAULT scantok ID|INDEXED */ + 216, /* (38) ccons ::= NOT NULL onconf */ + 216, /* (39) ccons ::= PRIMARY KEY sortorder onconf autoinc */ + 216, /* (40) ccons ::= UNIQUE onconf */ + 216, /* (41) ccons ::= CHECK LP expr RP */ + 216, /* (42) ccons ::= REFERENCES nm eidlist_opt refargs */ + 216, /* (43) ccons ::= defer_subclause */ + 216, /* (44) ccons ::= COLLATE ID|STRING */ + 225, /* (45) generated ::= LP expr RP */ + 225, /* (46) generated ::= LP expr RP ID */ + 221, /* (47) autoinc ::= */ + 221, /* (48) autoinc ::= AUTOINCR */ + 223, /* (49) refargs ::= */ + 223, /* (50) refargs ::= refargs refarg */ + 226, /* (51) refarg ::= MATCH nm */ + 226, /* (52) refarg ::= ON INSERT refact */ + 226, /* (53) refarg ::= ON DELETE refact */ + 226, /* (54) refarg ::= ON UPDATE refact */ + 227, /* (55) refact ::= SET NULL */ + 227, /* (56) refact ::= SET DEFAULT */ + 227, /* (57) refact ::= CASCADE */ + 227, /* (58) refact ::= RESTRICT */ + 227, /* (59) refact ::= NO ACTION */ + 224, /* (60) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ + 224, /* (61) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ + 228, /* (62) init_deferred_pred_opt ::= */ + 228, /* (63) init_deferred_pred_opt ::= INITIALLY DEFERRED */ + 228, /* (64) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ + 203, /* (65) conslist_opt ::= */ + 230, /* (66) tconscomma ::= COMMA */ + 231, /* (67) tcons ::= CONSTRAINT nm */ + 231, /* (68) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ + 231, /* (69) tcons ::= UNIQUE LP sortlist RP onconf */ + 231, /* (70) tcons ::= CHECK LP expr RP onconf */ + 231, /* (71) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ + 234, /* (72) defer_subclause_opt ::= */ + 219, /* (73) onconf ::= */ + 219, /* (74) onconf ::= ON CONFLICT resolvetype */ + 235, /* (75) orconf ::= */ + 235, /* (76) orconf ::= OR resolvetype */ + 236, /* (77) resolvetype ::= IGNORE */ + 236, /* (78) resolvetype ::= REPLACE */ + 191, /* (79) cmd ::= DROP TABLE ifexists fullname */ + 238, /* (80) ifexists ::= IF EXISTS */ + 238, /* (81) ifexists ::= */ + 191, /* (82) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ + 191, /* (83) cmd ::= DROP VIEW ifexists fullname */ + 191, /* (84) cmd ::= select */ + 205, /* (85) select ::= WITH wqlist selectnowith */ + 205, /* (86) select ::= WITH RECURSIVE wqlist selectnowith */ + 205, /* (87) select ::= selectnowith */ + 240, /* (88) selectnowith ::= selectnowith multiselect_op oneselect */ + 243, /* (89) multiselect_op ::= UNION */ + 243, /* (90) multiselect_op ::= UNION ALL */ + 243, /* (91) multiselect_op ::= EXCEPT|INTERSECT */ + 241, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ + 241, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ + 253, /* (94) values ::= VALUES LP nexprlist RP */ + 241, /* (95) oneselect ::= mvalues */ + 255, /* (96) mvalues ::= values COMMA LP nexprlist RP */ + 255, /* (97) mvalues ::= mvalues COMMA LP nexprlist RP */ + 244, /* (98) distinct ::= DISTINCT */ + 244, /* (99) distinct ::= ALL */ + 244, /* (100) distinct ::= */ + 256, /* (101) sclp ::= */ + 245, /* (102) selcollist ::= sclp scanpt expr scanpt as */ + 245, /* (103) selcollist ::= sclp scanpt STAR */ + 245, /* (104) selcollist ::= sclp scanpt nm DOT STAR */ + 257, /* (105) as ::= AS nm */ + 257, /* (106) as ::= */ + 246, /* (107) from ::= */ + 246, /* (108) from ::= FROM seltablist */ + 259, /* (109) stl_prefix ::= seltablist joinop */ + 259, /* (110) stl_prefix ::= */ + 258, /* (111) seltablist ::= stl_prefix nm dbnm as on_using */ + 258, /* (112) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ + 258, /* (113) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ + 258, /* (114) seltablist ::= stl_prefix LP select RP as on_using */ + 258, /* (115) seltablist ::= stl_prefix LP seltablist RP as on_using */ + 201, /* (116) dbnm ::= */ + 201, /* (117) dbnm ::= DOT nm */ + 239, /* (118) fullname ::= nm */ + 239, /* (119) fullname ::= nm DOT nm */ + 264, /* (120) xfullname ::= nm */ + 264, /* (121) xfullname ::= nm DOT nm */ + 264, /* (122) xfullname ::= nm DOT nm AS nm */ + 264, /* (123) xfullname ::= nm AS nm */ + 260, /* (124) joinop ::= COMMA|JOIN */ + 260, /* (125) joinop ::= JOIN_KW JOIN */ + 260, /* (126) joinop ::= JOIN_KW nm JOIN */ + 260, /* (127) joinop ::= JOIN_KW nm nm JOIN */ + 261, /* (128) on_using ::= ON expr */ + 261, /* (129) on_using ::= USING LP idlist RP */ + 261, /* (130) on_using ::= */ + 266, /* (131) indexed_opt ::= */ + 262, /* (132) indexed_by ::= INDEXED BY nm */ + 262, /* (133) indexed_by ::= NOT INDEXED */ + 250, /* (134) orderby_opt ::= */ + 250, /* (135) orderby_opt ::= ORDER BY sortlist */ + 232, /* (136) sortlist ::= sortlist COMMA expr sortorder nulls */ + 232, /* (137) sortlist ::= expr sortorder nulls */ + 220, /* (138) sortorder ::= ASC */ + 220, /* (139) sortorder ::= DESC */ + 220, /* (140) sortorder ::= */ + 267, /* (141) nulls ::= NULLS FIRST */ + 267, /* (142) nulls ::= NULLS LAST */ + 267, /* (143) nulls ::= */ + 248, /* (144) groupby_opt ::= */ + 248, /* (145) groupby_opt ::= GROUP BY nexprlist */ + 249, /* (146) having_opt ::= */ + 249, /* (147) having_opt ::= HAVING expr */ + 251, /* (148) limit_opt ::= */ + 251, /* (149) limit_opt ::= LIMIT expr */ + 251, /* (150) limit_opt ::= LIMIT expr OFFSET expr */ + 251, /* (151) limit_opt ::= LIMIT expr COMMA expr */ + 191, /* (152) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ + 247, /* (153) where_opt ::= */ + 247, /* (154) where_opt ::= WHERE expr */ + 269, /* (155) where_opt_ret ::= */ + 269, /* (156) where_opt_ret ::= WHERE expr */ + 269, /* (157) where_opt_ret ::= RETURNING selcollist */ + 269, /* (158) where_opt_ret ::= WHERE expr RETURNING selcollist */ + 191, /* (159) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ + 270, /* (160) setlist ::= setlist COMMA nm EQ expr */ + 270, /* (161) setlist ::= setlist COMMA LP idlist RP EQ expr */ + 270, /* (162) setlist ::= nm EQ expr */ + 270, /* (163) setlist ::= LP idlist RP EQ expr */ + 191, /* (164) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + 191, /* (165) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 273, /* (166) upsert ::= */ + 273, /* (167) upsert ::= RETURNING selcollist */ + 273, /* (168) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + 273, /* (169) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + 273, /* (170) upsert ::= ON CONFLICT DO NOTHING returning */ + 273, /* (171) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + 274, /* (172) returning ::= RETURNING selcollist */ + 271, /* (173) insert_cmd ::= INSERT orconf */ + 271, /* (174) insert_cmd ::= REPLACE */ + 272, /* (175) idlist_opt ::= */ + 272, /* (176) idlist_opt ::= LP idlist RP */ + 265, /* (177) idlist ::= idlist COMMA nm */ + 265, /* (178) idlist ::= nm */ + 218, /* (179) expr ::= LP expr RP */ + 218, /* (180) expr ::= ID|INDEXED|JOIN_KW */ + 218, /* (181) expr ::= nm DOT nm */ + 218, /* (182) expr ::= nm DOT nm DOT nm */ + 217, /* (183) term ::= NULL|FLOAT|BLOB */ + 217, /* (184) term ::= STRING */ + 217, /* (185) term ::= INTEGER */ + 218, /* (186) expr ::= VARIABLE */ + 218, /* (187) expr ::= expr COLLATE ID|STRING */ + 218, /* (188) expr ::= CAST LP expr AS typetoken RP */ + 218, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + 218, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + 218, /* (191) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + 218, /* (192) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + 218, /* (193) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + 218, /* (194) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + 217, /* (195) term ::= CTIME_KW */ + 218, /* (196) expr ::= LP nexprlist COMMA expr RP */ + 218, /* (197) expr ::= expr AND expr */ + 218, /* (198) expr ::= expr OR expr */ + 218, /* (199) expr ::= expr LT|GT|GE|LE expr */ + 218, /* (200) expr ::= expr EQ|NE expr */ + 218, /* (201) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + 218, /* (202) expr ::= expr PLUS|MINUS expr */ + 218, /* (203) expr ::= expr STAR|SLASH|REM expr */ + 218, /* (204) expr ::= expr CONCAT expr */ + 276, /* (205) likeop ::= NOT LIKE_KW|MATCH */ + 218, /* (206) expr ::= expr likeop expr */ + 218, /* (207) expr ::= expr likeop expr ESCAPE expr */ + 218, /* (208) expr ::= expr ISNULL|NOTNULL */ + 218, /* (209) expr ::= expr NOT NULL */ + 218, /* (210) expr ::= expr IS expr */ + 218, /* (211) expr ::= expr IS NOT expr */ + 218, /* (212) expr ::= expr IS NOT DISTINCT FROM expr */ + 218, /* (213) expr ::= expr IS DISTINCT FROM expr */ + 218, /* (214) expr ::= NOT expr */ + 218, /* (215) expr ::= BITNOT expr */ + 218, /* (216) expr ::= PLUS|MINUS expr */ + 218, /* (217) expr ::= expr PTR expr */ + 277, /* (218) between_op ::= BETWEEN */ + 277, /* (219) between_op ::= NOT BETWEEN */ + 218, /* (220) expr ::= expr between_op expr AND expr */ + 278, /* (221) in_op ::= IN */ + 278, /* (222) in_op ::= NOT IN */ + 218, /* (223) expr ::= expr in_op LP exprlist RP */ + 218, /* (224) expr ::= LP select RP */ + 218, /* (225) expr ::= expr in_op LP select RP */ + 218, /* (226) expr ::= expr in_op nm dbnm paren_exprlist */ + 218, /* (227) expr ::= EXISTS LP select RP */ + 218, /* (228) expr ::= CASE case_operand case_exprlist case_else END */ + 281, /* (229) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + 281, /* (230) case_exprlist ::= WHEN expr THEN expr */ + 282, /* (231) case_else ::= ELSE expr */ + 282, /* (232) case_else ::= */ + 280, /* (233) case_operand ::= */ + 263, /* (234) exprlist ::= */ + 254, /* (235) nexprlist ::= nexprlist COMMA expr */ + 254, /* (236) nexprlist ::= expr */ + 279, /* (237) paren_exprlist ::= */ + 279, /* (238) paren_exprlist ::= LP exprlist RP */ + 191, /* (239) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + 283, /* (240) uniqueflag ::= UNIQUE */ + 283, /* (241) uniqueflag ::= */ + 222, /* (242) eidlist_opt ::= */ + 222, /* (243) eidlist_opt ::= LP eidlist RP */ + 233, /* (244) eidlist ::= eidlist COMMA nm collate sortorder */ + 233, /* (245) eidlist ::= nm collate sortorder */ + 284, /* (246) collate ::= */ + 284, /* (247) collate ::= COLLATE ID|STRING */ + 191, /* (248) cmd ::= DROP INDEX ifexists fullname */ + 191, /* (249) cmd ::= VACUUM vinto */ + 191, /* (250) cmd ::= VACUUM nm vinto */ + 285, /* (251) vinto ::= INTO expr */ + 285, /* (252) vinto ::= */ + 191, /* (253) cmd ::= PRAGMA nm dbnm */ + 191, /* (254) cmd ::= PRAGMA nm dbnm EQ nmnum */ + 191, /* (255) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + 191, /* (256) cmd ::= PRAGMA nm dbnm EQ minus_num */ + 191, /* (257) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + 212, /* (258) plus_num ::= PLUS INTEGER|FLOAT */ + 213, /* (259) minus_num ::= MINUS INTEGER|FLOAT */ + 191, /* (260) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + 287, /* (261) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + 289, /* (262) trigger_time ::= BEFORE|AFTER */ + 289, /* (263) trigger_time ::= INSTEAD OF */ + 289, /* (264) trigger_time ::= */ + 290, /* (265) trigger_event ::= DELETE|INSERT */ + 290, /* (266) trigger_event ::= UPDATE */ + 290, /* (267) trigger_event ::= UPDATE OF idlist */ + 292, /* (268) when_clause ::= */ + 292, /* (269) when_clause ::= WHEN expr */ + 288, /* (270) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + 288, /* (271) trigger_cmd_list ::= trigger_cmd SEMI */ + 294, /* (272) trnm ::= nm DOT nm */ + 295, /* (273) tridxby ::= INDEXED BY nm */ + 295, /* (274) tridxby ::= NOT INDEXED */ + 293, /* (275) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + 293, /* (276) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + 293, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + 293, /* (278) trigger_cmd ::= scanpt select scanpt */ + 218, /* (279) expr ::= RAISE LP IGNORE RP */ + 218, /* (280) expr ::= RAISE LP raisetype COMMA nm RP */ + 237, /* (281) raisetype ::= ROLLBACK */ + 237, /* (282) raisetype ::= ABORT */ + 237, /* (283) raisetype ::= FAIL */ + 191, /* (284) cmd ::= DROP TRIGGER ifexists fullname */ + 191, /* (285) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + 191, /* (286) cmd ::= DETACH database_kw_opt expr */ + 297, /* (287) key_opt ::= */ + 297, /* (288) key_opt ::= KEY expr */ + 191, /* (289) cmd ::= REINDEX */ + 191, /* (290) cmd ::= REINDEX nm dbnm */ + 191, /* (291) cmd ::= ANALYZE */ + 191, /* (292) cmd ::= ANALYZE nm dbnm */ + 191, /* (293) cmd ::= ALTER TABLE fullname RENAME TO nm */ + 191, /* (294) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + 191, /* (295) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + 298, /* (296) add_column_fullname ::= fullname */ + 191, /* (297) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + 191, /* (298) cmd ::= create_vtab */ + 191, /* (299) cmd ::= create_vtab LP vtabarglist RP */ + 300, /* (300) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 302, /* (301) vtabarg ::= */ + 303, /* (302) vtabargtoken ::= ANY */ + 303, /* (303) vtabargtoken ::= lp anylist RP */ + 304, /* (304) lp ::= LP */ + 268, /* (305) with ::= WITH wqlist */ + 268, /* (306) with ::= WITH RECURSIVE wqlist */ + 307, /* (307) wqas ::= AS */ + 307, /* (308) wqas ::= AS MATERIALIZED */ + 307, /* (309) wqas ::= AS NOT MATERIALIZED */ + 306, /* (310) wqitem ::= withnm eidlist_opt wqas LP select RP */ + 308, /* (311) withnm ::= nm */ + 242, /* (312) wqlist ::= wqitem */ + 242, /* (313) wqlist ::= wqlist COMMA wqitem */ + 309, /* (314) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + 310, /* (315) windowdefn ::= nm AS LP window RP */ + 311, /* (316) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + 311, /* (317) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + 311, /* (318) window ::= ORDER BY sortlist frame_opt */ + 311, /* (319) window ::= nm ORDER BY sortlist frame_opt */ + 311, /* (320) window ::= nm frame_opt */ + 312, /* (321) frame_opt ::= */ + 312, /* (322) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + 312, /* (323) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + 316, /* (324) range_or_rows ::= RANGE|ROWS|GROUPS */ + 318, /* (325) frame_bound_s ::= frame_bound */ + 318, /* (326) frame_bound_s ::= UNBOUNDED PRECEDING */ + 319, /* (327) frame_bound_e ::= frame_bound */ + 319, /* (328) frame_bound_e ::= UNBOUNDED FOLLOWING */ + 317, /* (329) frame_bound ::= expr PRECEDING|FOLLOWING */ + 317, /* (330) frame_bound ::= CURRENT ROW */ + 320, /* (331) frame_exclude_opt ::= */ + 320, /* (332) frame_exclude_opt ::= EXCLUDE frame_exclude */ + 321, /* (333) frame_exclude ::= NO OTHERS */ + 321, /* (334) frame_exclude ::= CURRENT ROW */ + 321, /* (335) frame_exclude ::= GROUP|TIES */ + 252, /* (336) window_clause ::= WINDOW windowdefn_list */ + 275, /* (337) filter_over ::= filter_clause over_clause */ + 275, /* (338) filter_over ::= over_clause */ + 275, /* (339) filter_over ::= filter_clause */ + 315, /* (340) over_clause ::= OVER LP window RP */ + 315, /* (341) over_clause ::= OVER nm */ + 314, /* (342) filter_clause ::= FILTER LP WHERE expr RP */ + 217, /* (343) term ::= QNUMBER */ + 186, /* (344) input ::= cmdlist */ + 187, /* (345) cmdlist ::= cmdlist ecmd */ + 187, /* (346) cmdlist ::= ecmd */ + 188, /* (347) ecmd ::= SEMI */ + 188, /* (348) ecmd ::= cmdx SEMI */ + 188, /* (349) ecmd ::= explain cmdx SEMI */ + 193, /* (350) trans_opt ::= */ + 193, /* (351) trans_opt ::= TRANSACTION */ + 193, /* (352) trans_opt ::= TRANSACTION nm */ + 195, /* (353) savepoint_opt ::= SAVEPOINT */ + 195, /* (354) savepoint_opt ::= */ + 191, /* (355) cmd ::= create_table create_table_args */ + 204, /* (356) table_option_set ::= table_option */ + 202, /* (357) columnlist ::= columnlist COMMA columnname carglist */ + 202, /* (358) columnlist ::= columnname carglist */ + 194, /* (359) nm ::= ID|INDEXED|JOIN_KW */ + 194, /* (360) nm ::= STRING */ + 209, /* (361) typetoken ::= typename */ + 210, /* (362) typename ::= ID|STRING */ + 211, /* (363) signed ::= plus_num */ + 211, /* (364) signed ::= minus_num */ + 208, /* (365) carglist ::= carglist ccons */ + 208, /* (366) carglist ::= */ + 216, /* (367) ccons ::= NULL onconf */ + 216, /* (368) ccons ::= GENERATED ALWAYS AS generated */ + 216, /* (369) ccons ::= AS generated */ + 203, /* (370) conslist_opt ::= COMMA conslist */ + 229, /* (371) conslist ::= conslist tconscomma tcons */ + 229, /* (372) conslist ::= tcons */ + 230, /* (373) tconscomma ::= */ + 234, /* (374) defer_subclause_opt ::= defer_subclause */ + 236, /* (375) resolvetype ::= raisetype */ + 240, /* (376) selectnowith ::= oneselect */ + 241, /* (377) oneselect ::= values */ + 256, /* (378) sclp ::= selcollist COMMA */ + 257, /* (379) as ::= ID|STRING */ + 266, /* (380) indexed_opt ::= indexed_by */ + 274, /* (381) returning ::= */ + 218, /* (382) expr ::= term */ + 276, /* (383) likeop ::= LIKE_KW|MATCH */ + 280, /* (384) case_operand ::= expr */ + 263, /* (385) exprlist ::= nexprlist */ + 286, /* (386) nmnum ::= plus_num */ + 286, /* (387) nmnum ::= nm */ + 286, /* (388) nmnum ::= ON */ + 286, /* (389) nmnum ::= DELETE */ + 286, /* (390) nmnum ::= DEFAULT */ + 212, /* (391) plus_num ::= INTEGER|FLOAT */ + 291, /* (392) foreach_clause ::= */ + 291, /* (393) foreach_clause ::= FOR EACH ROW */ + 294, /* (394) trnm ::= nm */ + 295, /* (395) tridxby ::= */ + 296, /* (396) database_kw_opt ::= DATABASE */ + 296, /* (397) database_kw_opt ::= */ + 299, /* (398) kwcolumn_opt ::= */ + 299, /* (399) kwcolumn_opt ::= COLUMNKW */ + 301, /* (400) vtabarglist ::= vtabarg */ + 301, /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ + 302, /* (402) vtabarg ::= vtabarg vtabargtoken */ + 305, /* (403) anylist ::= */ + 305, /* (404) anylist ::= anylist LP anylist RP */ + 305, /* (405) anylist ::= anylist ANY */ + 268, /* (406) with ::= */ + 309, /* (407) windowdefn_list ::= windowdefn */ + 311, /* (408) window ::= frame_opt */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -174378,316 +175760,320 @@ static const signed char yyRuleInfoNRhs[] = { -9, /* (92) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ -10, /* (93) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ -4, /* (94) values ::= VALUES LP nexprlist RP */ - -5, /* (95) values ::= values COMMA LP nexprlist RP */ - -1, /* (96) distinct ::= DISTINCT */ - -1, /* (97) distinct ::= ALL */ - 0, /* (98) distinct ::= */ - 0, /* (99) sclp ::= */ - -5, /* (100) selcollist ::= sclp scanpt expr scanpt as */ - -3, /* (101) selcollist ::= sclp scanpt STAR */ - -5, /* (102) selcollist ::= sclp scanpt nm DOT STAR */ - -2, /* (103) as ::= AS nm */ - 0, /* (104) as ::= */ - 0, /* (105) from ::= */ - -2, /* (106) from ::= FROM seltablist */ - -2, /* (107) stl_prefix ::= seltablist joinop */ - 0, /* (108) stl_prefix ::= */ - -5, /* (109) seltablist ::= stl_prefix nm dbnm as on_using */ - -6, /* (110) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ - -8, /* (111) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ - -6, /* (112) seltablist ::= stl_prefix LP select RP as on_using */ - -6, /* (113) seltablist ::= stl_prefix LP seltablist RP as on_using */ - 0, /* (114) dbnm ::= */ - -2, /* (115) dbnm ::= DOT nm */ - -1, /* (116) fullname ::= nm */ - -3, /* (117) fullname ::= nm DOT nm */ - -1, /* (118) xfullname ::= nm */ - -3, /* (119) xfullname ::= nm DOT nm */ - -5, /* (120) xfullname ::= nm DOT nm AS nm */ - -3, /* (121) xfullname ::= nm AS nm */ - -1, /* (122) joinop ::= COMMA|JOIN */ - -2, /* (123) joinop ::= JOIN_KW JOIN */ - -3, /* (124) joinop ::= JOIN_KW nm JOIN */ - -4, /* (125) joinop ::= JOIN_KW nm nm JOIN */ - -2, /* (126) on_using ::= ON expr */ - -4, /* (127) on_using ::= USING LP idlist RP */ - 0, /* (128) on_using ::= */ - 0, /* (129) indexed_opt ::= */ - -3, /* (130) indexed_by ::= INDEXED BY nm */ - -2, /* (131) indexed_by ::= NOT INDEXED */ - 0, /* (132) orderby_opt ::= */ - -3, /* (133) orderby_opt ::= ORDER BY sortlist */ - -5, /* (134) sortlist ::= sortlist COMMA expr sortorder nulls */ - -3, /* (135) sortlist ::= expr sortorder nulls */ - -1, /* (136) sortorder ::= ASC */ - -1, /* (137) sortorder ::= DESC */ - 0, /* (138) sortorder ::= */ - -2, /* (139) nulls ::= NULLS FIRST */ - -2, /* (140) nulls ::= NULLS LAST */ - 0, /* (141) nulls ::= */ - 0, /* (142) groupby_opt ::= */ - -3, /* (143) groupby_opt ::= GROUP BY nexprlist */ - 0, /* (144) having_opt ::= */ - -2, /* (145) having_opt ::= HAVING expr */ - 0, /* (146) limit_opt ::= */ - -2, /* (147) limit_opt ::= LIMIT expr */ - -4, /* (148) limit_opt ::= LIMIT expr OFFSET expr */ - -4, /* (149) limit_opt ::= LIMIT expr COMMA expr */ - -8, /* (150) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ - 0, /* (151) where_opt ::= */ - -2, /* (152) where_opt ::= WHERE expr */ - 0, /* (153) where_opt_ret ::= */ - -2, /* (154) where_opt_ret ::= WHERE expr */ - -2, /* (155) where_opt_ret ::= RETURNING selcollist */ - -4, /* (156) where_opt_ret ::= WHERE expr RETURNING selcollist */ - -11, /* (157) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ - -5, /* (158) setlist ::= setlist COMMA nm EQ expr */ - -7, /* (159) setlist ::= setlist COMMA LP idlist RP EQ expr */ - -3, /* (160) setlist ::= nm EQ expr */ - -5, /* (161) setlist ::= LP idlist RP EQ expr */ - -7, /* (162) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - -8, /* (163) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ - 0, /* (164) upsert ::= */ - -2, /* (165) upsert ::= RETURNING selcollist */ - -12, /* (166) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ - -9, /* (167) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ - -5, /* (168) upsert ::= ON CONFLICT DO NOTHING returning */ - -8, /* (169) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ - -2, /* (170) returning ::= RETURNING selcollist */ - -2, /* (171) insert_cmd ::= INSERT orconf */ - -1, /* (172) insert_cmd ::= REPLACE */ - 0, /* (173) idlist_opt ::= */ - -3, /* (174) idlist_opt ::= LP idlist RP */ - -3, /* (175) idlist ::= idlist COMMA nm */ - -1, /* (176) idlist ::= nm */ - -3, /* (177) expr ::= LP expr RP */ - -1, /* (178) expr ::= ID|INDEXED|JOIN_KW */ - -3, /* (179) expr ::= nm DOT nm */ - -5, /* (180) expr ::= nm DOT nm DOT nm */ - -1, /* (181) term ::= NULL|FLOAT|BLOB */ - -1, /* (182) term ::= STRING */ - -1, /* (183) term ::= INTEGER */ - -1, /* (184) expr ::= VARIABLE */ - -3, /* (185) expr ::= expr COLLATE ID|STRING */ - -6, /* (186) expr ::= CAST LP expr AS typetoken RP */ - -5, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ - -8, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ - -4, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ - -6, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ - -9, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ - -5, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ - -1, /* (193) term ::= CTIME_KW */ - -5, /* (194) expr ::= LP nexprlist COMMA expr RP */ - -3, /* (195) expr ::= expr AND expr */ - -3, /* (196) expr ::= expr OR expr */ - -3, /* (197) expr ::= expr LT|GT|GE|LE expr */ - -3, /* (198) expr ::= expr EQ|NE expr */ - -3, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - -3, /* (200) expr ::= expr PLUS|MINUS expr */ - -3, /* (201) expr ::= expr STAR|SLASH|REM expr */ - -3, /* (202) expr ::= expr CONCAT expr */ - -2, /* (203) likeop ::= NOT LIKE_KW|MATCH */ - -3, /* (204) expr ::= expr likeop expr */ - -5, /* (205) expr ::= expr likeop expr ESCAPE expr */ - -2, /* (206) expr ::= expr ISNULL|NOTNULL */ - -3, /* (207) expr ::= expr NOT NULL */ - -3, /* (208) expr ::= expr IS expr */ - -4, /* (209) expr ::= expr IS NOT expr */ - -6, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */ - -5, /* (211) expr ::= expr IS DISTINCT FROM expr */ - -2, /* (212) expr ::= NOT expr */ - -2, /* (213) expr ::= BITNOT expr */ - -2, /* (214) expr ::= PLUS|MINUS expr */ - -3, /* (215) expr ::= expr PTR expr */ - -1, /* (216) between_op ::= BETWEEN */ - -2, /* (217) between_op ::= NOT BETWEEN */ - -5, /* (218) expr ::= expr between_op expr AND expr */ - -1, /* (219) in_op ::= IN */ - -2, /* (220) in_op ::= NOT IN */ - -5, /* (221) expr ::= expr in_op LP exprlist RP */ - -3, /* (222) expr ::= LP select RP */ - -5, /* (223) expr ::= expr in_op LP select RP */ - -5, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */ - -4, /* (225) expr ::= EXISTS LP select RP */ - -5, /* (226) expr ::= CASE case_operand case_exprlist case_else END */ - -5, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - -4, /* (228) case_exprlist ::= WHEN expr THEN expr */ - -2, /* (229) case_else ::= ELSE expr */ - 0, /* (230) case_else ::= */ - 0, /* (231) case_operand ::= */ - 0, /* (232) exprlist ::= */ - -3, /* (233) nexprlist ::= nexprlist COMMA expr */ - -1, /* (234) nexprlist ::= expr */ - 0, /* (235) paren_exprlist ::= */ - -3, /* (236) paren_exprlist ::= LP exprlist RP */ - -12, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - -1, /* (238) uniqueflag ::= UNIQUE */ - 0, /* (239) uniqueflag ::= */ - 0, /* (240) eidlist_opt ::= */ - -3, /* (241) eidlist_opt ::= LP eidlist RP */ - -5, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */ - -3, /* (243) eidlist ::= nm collate sortorder */ - 0, /* (244) collate ::= */ - -2, /* (245) collate ::= COLLATE ID|STRING */ - -4, /* (246) cmd ::= DROP INDEX ifexists fullname */ - -2, /* (247) cmd ::= VACUUM vinto */ - -3, /* (248) cmd ::= VACUUM nm vinto */ - -2, /* (249) vinto ::= INTO expr */ - 0, /* (250) vinto ::= */ - -3, /* (251) cmd ::= PRAGMA nm dbnm */ - -5, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */ - -6, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - -5, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */ - -6, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - -2, /* (256) plus_num ::= PLUS INTEGER|FLOAT */ - -2, /* (257) minus_num ::= MINUS INTEGER|FLOAT */ - -5, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - -11, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - -1, /* (260) trigger_time ::= BEFORE|AFTER */ - -2, /* (261) trigger_time ::= INSTEAD OF */ - 0, /* (262) trigger_time ::= */ - -1, /* (263) trigger_event ::= DELETE|INSERT */ - -1, /* (264) trigger_event ::= UPDATE */ - -3, /* (265) trigger_event ::= UPDATE OF idlist */ - 0, /* (266) when_clause ::= */ - -2, /* (267) when_clause ::= WHEN expr */ - -3, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - -2, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */ - -3, /* (270) trnm ::= nm DOT nm */ - -3, /* (271) tridxby ::= INDEXED BY nm */ - -2, /* (272) tridxby ::= NOT INDEXED */ - -9, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - -8, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - -6, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - -3, /* (276) trigger_cmd ::= scanpt select scanpt */ - -4, /* (277) expr ::= RAISE LP IGNORE RP */ - -6, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */ - -1, /* (279) raisetype ::= ROLLBACK */ - -1, /* (280) raisetype ::= ABORT */ - -1, /* (281) raisetype ::= FAIL */ - -4, /* (282) cmd ::= DROP TRIGGER ifexists fullname */ - -6, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - -3, /* (284) cmd ::= DETACH database_kw_opt expr */ - 0, /* (285) key_opt ::= */ - -2, /* (286) key_opt ::= KEY expr */ - -1, /* (287) cmd ::= REINDEX */ - -3, /* (288) cmd ::= REINDEX nm dbnm */ - -1, /* (289) cmd ::= ANALYZE */ - -3, /* (290) cmd ::= ANALYZE nm dbnm */ - -6, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */ - -7, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - -6, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ - -1, /* (294) add_column_fullname ::= fullname */ - -8, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - -1, /* (296) cmd ::= create_vtab */ - -4, /* (297) cmd ::= create_vtab LP vtabarglist RP */ - -8, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 0, /* (299) vtabarg ::= */ - -1, /* (300) vtabargtoken ::= ANY */ - -3, /* (301) vtabargtoken ::= lp anylist RP */ - -1, /* (302) lp ::= LP */ - -2, /* (303) with ::= WITH wqlist */ - -3, /* (304) with ::= WITH RECURSIVE wqlist */ - -1, /* (305) wqas ::= AS */ - -2, /* (306) wqas ::= AS MATERIALIZED */ - -3, /* (307) wqas ::= AS NOT MATERIALIZED */ - -6, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */ - -1, /* (309) wqlist ::= wqitem */ - -3, /* (310) wqlist ::= wqlist COMMA wqitem */ - -3, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - -5, /* (312) windowdefn ::= nm AS LP window RP */ - -5, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - -6, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - -4, /* (315) window ::= ORDER BY sortlist frame_opt */ - -5, /* (316) window ::= nm ORDER BY sortlist frame_opt */ - -2, /* (317) window ::= nm frame_opt */ - 0, /* (318) frame_opt ::= */ - -3, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - -6, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - -1, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ - -1, /* (322) frame_bound_s ::= frame_bound */ - -2, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ - -1, /* (324) frame_bound_e ::= frame_bound */ - -2, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ - -2, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ - -2, /* (327) frame_bound ::= CURRENT ROW */ - 0, /* (328) frame_exclude_opt ::= */ - -2, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ - -2, /* (330) frame_exclude ::= NO OTHERS */ - -2, /* (331) frame_exclude ::= CURRENT ROW */ - -1, /* (332) frame_exclude ::= GROUP|TIES */ - -2, /* (333) window_clause ::= WINDOW windowdefn_list */ - -2, /* (334) filter_over ::= filter_clause over_clause */ - -1, /* (335) filter_over ::= over_clause */ - -1, /* (336) filter_over ::= filter_clause */ - -4, /* (337) over_clause ::= OVER LP window RP */ - -2, /* (338) over_clause ::= OVER nm */ - -5, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ - -1, /* (340) input ::= cmdlist */ - -2, /* (341) cmdlist ::= cmdlist ecmd */ - -1, /* (342) cmdlist ::= ecmd */ - -1, /* (343) ecmd ::= SEMI */ - -2, /* (344) ecmd ::= cmdx SEMI */ - -3, /* (345) ecmd ::= explain cmdx SEMI */ - 0, /* (346) trans_opt ::= */ - -1, /* (347) trans_opt ::= TRANSACTION */ - -2, /* (348) trans_opt ::= TRANSACTION nm */ - -1, /* (349) savepoint_opt ::= SAVEPOINT */ - 0, /* (350) savepoint_opt ::= */ - -2, /* (351) cmd ::= create_table create_table_args */ - -1, /* (352) table_option_set ::= table_option */ - -4, /* (353) columnlist ::= columnlist COMMA columnname carglist */ - -2, /* (354) columnlist ::= columnname carglist */ - -1, /* (355) nm ::= ID|INDEXED|JOIN_KW */ - -1, /* (356) nm ::= STRING */ - -1, /* (357) typetoken ::= typename */ - -1, /* (358) typename ::= ID|STRING */ - -1, /* (359) signed ::= plus_num */ - -1, /* (360) signed ::= minus_num */ - -2, /* (361) carglist ::= carglist ccons */ - 0, /* (362) carglist ::= */ - -2, /* (363) ccons ::= NULL onconf */ - -4, /* (364) ccons ::= GENERATED ALWAYS AS generated */ - -2, /* (365) ccons ::= AS generated */ - -2, /* (366) conslist_opt ::= COMMA conslist */ - -3, /* (367) conslist ::= conslist tconscomma tcons */ - -1, /* (368) conslist ::= tcons */ - 0, /* (369) tconscomma ::= */ - -1, /* (370) defer_subclause_opt ::= defer_subclause */ - -1, /* (371) resolvetype ::= raisetype */ - -1, /* (372) selectnowith ::= oneselect */ - -1, /* (373) oneselect ::= values */ - -2, /* (374) sclp ::= selcollist COMMA */ - -1, /* (375) as ::= ID|STRING */ - -1, /* (376) indexed_opt ::= indexed_by */ - 0, /* (377) returning ::= */ - -1, /* (378) expr ::= term */ - -1, /* (379) likeop ::= LIKE_KW|MATCH */ - -1, /* (380) case_operand ::= expr */ - -1, /* (381) exprlist ::= nexprlist */ - -1, /* (382) nmnum ::= plus_num */ - -1, /* (383) nmnum ::= nm */ - -1, /* (384) nmnum ::= ON */ - -1, /* (385) nmnum ::= DELETE */ - -1, /* (386) nmnum ::= DEFAULT */ - -1, /* (387) plus_num ::= INTEGER|FLOAT */ - 0, /* (388) foreach_clause ::= */ - -3, /* (389) foreach_clause ::= FOR EACH ROW */ - -1, /* (390) trnm ::= nm */ - 0, /* (391) tridxby ::= */ - -1, /* (392) database_kw_opt ::= DATABASE */ - 0, /* (393) database_kw_opt ::= */ - 0, /* (394) kwcolumn_opt ::= */ - -1, /* (395) kwcolumn_opt ::= COLUMNKW */ - -1, /* (396) vtabarglist ::= vtabarg */ - -3, /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ - -2, /* (398) vtabarg ::= vtabarg vtabargtoken */ - 0, /* (399) anylist ::= */ - -4, /* (400) anylist ::= anylist LP anylist RP */ - -2, /* (401) anylist ::= anylist ANY */ - 0, /* (402) with ::= */ - -1, /* (403) windowdefn_list ::= windowdefn */ - -1, /* (404) window ::= frame_opt */ + -1, /* (95) oneselect ::= mvalues */ + -5, /* (96) mvalues ::= values COMMA LP nexprlist RP */ + -5, /* (97) mvalues ::= mvalues COMMA LP nexprlist RP */ + -1, /* (98) distinct ::= DISTINCT */ + -1, /* (99) distinct ::= ALL */ + 0, /* (100) distinct ::= */ + 0, /* (101) sclp ::= */ + -5, /* (102) selcollist ::= sclp scanpt expr scanpt as */ + -3, /* (103) selcollist ::= sclp scanpt STAR */ + -5, /* (104) selcollist ::= sclp scanpt nm DOT STAR */ + -2, /* (105) as ::= AS nm */ + 0, /* (106) as ::= */ + 0, /* (107) from ::= */ + -2, /* (108) from ::= FROM seltablist */ + -2, /* (109) stl_prefix ::= seltablist joinop */ + 0, /* (110) stl_prefix ::= */ + -5, /* (111) seltablist ::= stl_prefix nm dbnm as on_using */ + -6, /* (112) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ + -8, /* (113) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ + -6, /* (114) seltablist ::= stl_prefix LP select RP as on_using */ + -6, /* (115) seltablist ::= stl_prefix LP seltablist RP as on_using */ + 0, /* (116) dbnm ::= */ + -2, /* (117) dbnm ::= DOT nm */ + -1, /* (118) fullname ::= nm */ + -3, /* (119) fullname ::= nm DOT nm */ + -1, /* (120) xfullname ::= nm */ + -3, /* (121) xfullname ::= nm DOT nm */ + -5, /* (122) xfullname ::= nm DOT nm AS nm */ + -3, /* (123) xfullname ::= nm AS nm */ + -1, /* (124) joinop ::= COMMA|JOIN */ + -2, /* (125) joinop ::= JOIN_KW JOIN */ + -3, /* (126) joinop ::= JOIN_KW nm JOIN */ + -4, /* (127) joinop ::= JOIN_KW nm nm JOIN */ + -2, /* (128) on_using ::= ON expr */ + -4, /* (129) on_using ::= USING LP idlist RP */ + 0, /* (130) on_using ::= */ + 0, /* (131) indexed_opt ::= */ + -3, /* (132) indexed_by ::= INDEXED BY nm */ + -2, /* (133) indexed_by ::= NOT INDEXED */ + 0, /* (134) orderby_opt ::= */ + -3, /* (135) orderby_opt ::= ORDER BY sortlist */ + -5, /* (136) sortlist ::= sortlist COMMA expr sortorder nulls */ + -3, /* (137) sortlist ::= expr sortorder nulls */ + -1, /* (138) sortorder ::= ASC */ + -1, /* (139) sortorder ::= DESC */ + 0, /* (140) sortorder ::= */ + -2, /* (141) nulls ::= NULLS FIRST */ + -2, /* (142) nulls ::= NULLS LAST */ + 0, /* (143) nulls ::= */ + 0, /* (144) groupby_opt ::= */ + -3, /* (145) groupby_opt ::= GROUP BY nexprlist */ + 0, /* (146) having_opt ::= */ + -2, /* (147) having_opt ::= HAVING expr */ + 0, /* (148) limit_opt ::= */ + -2, /* (149) limit_opt ::= LIMIT expr */ + -4, /* (150) limit_opt ::= LIMIT expr OFFSET expr */ + -4, /* (151) limit_opt ::= LIMIT expr COMMA expr */ + -8, /* (152) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ + 0, /* (153) where_opt ::= */ + -2, /* (154) where_opt ::= WHERE expr */ + 0, /* (155) where_opt_ret ::= */ + -2, /* (156) where_opt_ret ::= WHERE expr */ + -2, /* (157) where_opt_ret ::= RETURNING selcollist */ + -4, /* (158) where_opt_ret ::= WHERE expr RETURNING selcollist */ + -11, /* (159) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ + -5, /* (160) setlist ::= setlist COMMA nm EQ expr */ + -7, /* (161) setlist ::= setlist COMMA LP idlist RP EQ expr */ + -3, /* (162) setlist ::= nm EQ expr */ + -5, /* (163) setlist ::= LP idlist RP EQ expr */ + -7, /* (164) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + -8, /* (165) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 0, /* (166) upsert ::= */ + -2, /* (167) upsert ::= RETURNING selcollist */ + -12, /* (168) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + -9, /* (169) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + -5, /* (170) upsert ::= ON CONFLICT DO NOTHING returning */ + -8, /* (171) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + -2, /* (172) returning ::= RETURNING selcollist */ + -2, /* (173) insert_cmd ::= INSERT orconf */ + -1, /* (174) insert_cmd ::= REPLACE */ + 0, /* (175) idlist_opt ::= */ + -3, /* (176) idlist_opt ::= LP idlist RP */ + -3, /* (177) idlist ::= idlist COMMA nm */ + -1, /* (178) idlist ::= nm */ + -3, /* (179) expr ::= LP expr RP */ + -1, /* (180) expr ::= ID|INDEXED|JOIN_KW */ + -3, /* (181) expr ::= nm DOT nm */ + -5, /* (182) expr ::= nm DOT nm DOT nm */ + -1, /* (183) term ::= NULL|FLOAT|BLOB */ + -1, /* (184) term ::= STRING */ + -1, /* (185) term ::= INTEGER */ + -1, /* (186) expr ::= VARIABLE */ + -3, /* (187) expr ::= expr COLLATE ID|STRING */ + -6, /* (188) expr ::= CAST LP expr AS typetoken RP */ + -5, /* (189) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + -8, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + -4, /* (191) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + -6, /* (192) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + -9, /* (193) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + -5, /* (194) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + -1, /* (195) term ::= CTIME_KW */ + -5, /* (196) expr ::= LP nexprlist COMMA expr RP */ + -3, /* (197) expr ::= expr AND expr */ + -3, /* (198) expr ::= expr OR expr */ + -3, /* (199) expr ::= expr LT|GT|GE|LE expr */ + -3, /* (200) expr ::= expr EQ|NE expr */ + -3, /* (201) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + -3, /* (202) expr ::= expr PLUS|MINUS expr */ + -3, /* (203) expr ::= expr STAR|SLASH|REM expr */ + -3, /* (204) expr ::= expr CONCAT expr */ + -2, /* (205) likeop ::= NOT LIKE_KW|MATCH */ + -3, /* (206) expr ::= expr likeop expr */ + -5, /* (207) expr ::= expr likeop expr ESCAPE expr */ + -2, /* (208) expr ::= expr ISNULL|NOTNULL */ + -3, /* (209) expr ::= expr NOT NULL */ + -3, /* (210) expr ::= expr IS expr */ + -4, /* (211) expr ::= expr IS NOT expr */ + -6, /* (212) expr ::= expr IS NOT DISTINCT FROM expr */ + -5, /* (213) expr ::= expr IS DISTINCT FROM expr */ + -2, /* (214) expr ::= NOT expr */ + -2, /* (215) expr ::= BITNOT expr */ + -2, /* (216) expr ::= PLUS|MINUS expr */ + -3, /* (217) expr ::= expr PTR expr */ + -1, /* (218) between_op ::= BETWEEN */ + -2, /* (219) between_op ::= NOT BETWEEN */ + -5, /* (220) expr ::= expr between_op expr AND expr */ + -1, /* (221) in_op ::= IN */ + -2, /* (222) in_op ::= NOT IN */ + -5, /* (223) expr ::= expr in_op LP exprlist RP */ + -3, /* (224) expr ::= LP select RP */ + -5, /* (225) expr ::= expr in_op LP select RP */ + -5, /* (226) expr ::= expr in_op nm dbnm paren_exprlist */ + -4, /* (227) expr ::= EXISTS LP select RP */ + -5, /* (228) expr ::= CASE case_operand case_exprlist case_else END */ + -5, /* (229) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + -4, /* (230) case_exprlist ::= WHEN expr THEN expr */ + -2, /* (231) case_else ::= ELSE expr */ + 0, /* (232) case_else ::= */ + 0, /* (233) case_operand ::= */ + 0, /* (234) exprlist ::= */ + -3, /* (235) nexprlist ::= nexprlist COMMA expr */ + -1, /* (236) nexprlist ::= expr */ + 0, /* (237) paren_exprlist ::= */ + -3, /* (238) paren_exprlist ::= LP exprlist RP */ + -12, /* (239) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + -1, /* (240) uniqueflag ::= UNIQUE */ + 0, /* (241) uniqueflag ::= */ + 0, /* (242) eidlist_opt ::= */ + -3, /* (243) eidlist_opt ::= LP eidlist RP */ + -5, /* (244) eidlist ::= eidlist COMMA nm collate sortorder */ + -3, /* (245) eidlist ::= nm collate sortorder */ + 0, /* (246) collate ::= */ + -2, /* (247) collate ::= COLLATE ID|STRING */ + -4, /* (248) cmd ::= DROP INDEX ifexists fullname */ + -2, /* (249) cmd ::= VACUUM vinto */ + -3, /* (250) cmd ::= VACUUM nm vinto */ + -2, /* (251) vinto ::= INTO expr */ + 0, /* (252) vinto ::= */ + -3, /* (253) cmd ::= PRAGMA nm dbnm */ + -5, /* (254) cmd ::= PRAGMA nm dbnm EQ nmnum */ + -6, /* (255) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + -5, /* (256) cmd ::= PRAGMA nm dbnm EQ minus_num */ + -6, /* (257) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + -2, /* (258) plus_num ::= PLUS INTEGER|FLOAT */ + -2, /* (259) minus_num ::= MINUS INTEGER|FLOAT */ + -5, /* (260) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + -11, /* (261) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + -1, /* (262) trigger_time ::= BEFORE|AFTER */ + -2, /* (263) trigger_time ::= INSTEAD OF */ + 0, /* (264) trigger_time ::= */ + -1, /* (265) trigger_event ::= DELETE|INSERT */ + -1, /* (266) trigger_event ::= UPDATE */ + -3, /* (267) trigger_event ::= UPDATE OF idlist */ + 0, /* (268) when_clause ::= */ + -2, /* (269) when_clause ::= WHEN expr */ + -3, /* (270) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + -2, /* (271) trigger_cmd_list ::= trigger_cmd SEMI */ + -3, /* (272) trnm ::= nm DOT nm */ + -3, /* (273) tridxby ::= INDEXED BY nm */ + -2, /* (274) tridxby ::= NOT INDEXED */ + -9, /* (275) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + -8, /* (276) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + -6, /* (277) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + -3, /* (278) trigger_cmd ::= scanpt select scanpt */ + -4, /* (279) expr ::= RAISE LP IGNORE RP */ + -6, /* (280) expr ::= RAISE LP raisetype COMMA nm RP */ + -1, /* (281) raisetype ::= ROLLBACK */ + -1, /* (282) raisetype ::= ABORT */ + -1, /* (283) raisetype ::= FAIL */ + -4, /* (284) cmd ::= DROP TRIGGER ifexists fullname */ + -6, /* (285) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + -3, /* (286) cmd ::= DETACH database_kw_opt expr */ + 0, /* (287) key_opt ::= */ + -2, /* (288) key_opt ::= KEY expr */ + -1, /* (289) cmd ::= REINDEX */ + -3, /* (290) cmd ::= REINDEX nm dbnm */ + -1, /* (291) cmd ::= ANALYZE */ + -3, /* (292) cmd ::= ANALYZE nm dbnm */ + -6, /* (293) cmd ::= ALTER TABLE fullname RENAME TO nm */ + -7, /* (294) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + -6, /* (295) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + -1, /* (296) add_column_fullname ::= fullname */ + -8, /* (297) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + -1, /* (298) cmd ::= create_vtab */ + -4, /* (299) cmd ::= create_vtab LP vtabarglist RP */ + -8, /* (300) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 0, /* (301) vtabarg ::= */ + -1, /* (302) vtabargtoken ::= ANY */ + -3, /* (303) vtabargtoken ::= lp anylist RP */ + -1, /* (304) lp ::= LP */ + -2, /* (305) with ::= WITH wqlist */ + -3, /* (306) with ::= WITH RECURSIVE wqlist */ + -1, /* (307) wqas ::= AS */ + -2, /* (308) wqas ::= AS MATERIALIZED */ + -3, /* (309) wqas ::= AS NOT MATERIALIZED */ + -6, /* (310) wqitem ::= withnm eidlist_opt wqas LP select RP */ + -1, /* (311) withnm ::= nm */ + -1, /* (312) wqlist ::= wqitem */ + -3, /* (313) wqlist ::= wqlist COMMA wqitem */ + -3, /* (314) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + -5, /* (315) windowdefn ::= nm AS LP window RP */ + -5, /* (316) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + -6, /* (317) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + -4, /* (318) window ::= ORDER BY sortlist frame_opt */ + -5, /* (319) window ::= nm ORDER BY sortlist frame_opt */ + -2, /* (320) window ::= nm frame_opt */ + 0, /* (321) frame_opt ::= */ + -3, /* (322) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + -6, /* (323) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + -1, /* (324) range_or_rows ::= RANGE|ROWS|GROUPS */ + -1, /* (325) frame_bound_s ::= frame_bound */ + -2, /* (326) frame_bound_s ::= UNBOUNDED PRECEDING */ + -1, /* (327) frame_bound_e ::= frame_bound */ + -2, /* (328) frame_bound_e ::= UNBOUNDED FOLLOWING */ + -2, /* (329) frame_bound ::= expr PRECEDING|FOLLOWING */ + -2, /* (330) frame_bound ::= CURRENT ROW */ + 0, /* (331) frame_exclude_opt ::= */ + -2, /* (332) frame_exclude_opt ::= EXCLUDE frame_exclude */ + -2, /* (333) frame_exclude ::= NO OTHERS */ + -2, /* (334) frame_exclude ::= CURRENT ROW */ + -1, /* (335) frame_exclude ::= GROUP|TIES */ + -2, /* (336) window_clause ::= WINDOW windowdefn_list */ + -2, /* (337) filter_over ::= filter_clause over_clause */ + -1, /* (338) filter_over ::= over_clause */ + -1, /* (339) filter_over ::= filter_clause */ + -4, /* (340) over_clause ::= OVER LP window RP */ + -2, /* (341) over_clause ::= OVER nm */ + -5, /* (342) filter_clause ::= FILTER LP WHERE expr RP */ + -1, /* (343) term ::= QNUMBER */ + -1, /* (344) input ::= cmdlist */ + -2, /* (345) cmdlist ::= cmdlist ecmd */ + -1, /* (346) cmdlist ::= ecmd */ + -1, /* (347) ecmd ::= SEMI */ + -2, /* (348) ecmd ::= cmdx SEMI */ + -3, /* (349) ecmd ::= explain cmdx SEMI */ + 0, /* (350) trans_opt ::= */ + -1, /* (351) trans_opt ::= TRANSACTION */ + -2, /* (352) trans_opt ::= TRANSACTION nm */ + -1, /* (353) savepoint_opt ::= SAVEPOINT */ + 0, /* (354) savepoint_opt ::= */ + -2, /* (355) cmd ::= create_table create_table_args */ + -1, /* (356) table_option_set ::= table_option */ + -4, /* (357) columnlist ::= columnlist COMMA columnname carglist */ + -2, /* (358) columnlist ::= columnname carglist */ + -1, /* (359) nm ::= ID|INDEXED|JOIN_KW */ + -1, /* (360) nm ::= STRING */ + -1, /* (361) typetoken ::= typename */ + -1, /* (362) typename ::= ID|STRING */ + -1, /* (363) signed ::= plus_num */ + -1, /* (364) signed ::= minus_num */ + -2, /* (365) carglist ::= carglist ccons */ + 0, /* (366) carglist ::= */ + -2, /* (367) ccons ::= NULL onconf */ + -4, /* (368) ccons ::= GENERATED ALWAYS AS generated */ + -2, /* (369) ccons ::= AS generated */ + -2, /* (370) conslist_opt ::= COMMA conslist */ + -3, /* (371) conslist ::= conslist tconscomma tcons */ + -1, /* (372) conslist ::= tcons */ + 0, /* (373) tconscomma ::= */ + -1, /* (374) defer_subclause_opt ::= defer_subclause */ + -1, /* (375) resolvetype ::= raisetype */ + -1, /* (376) selectnowith ::= oneselect */ + -1, /* (377) oneselect ::= values */ + -2, /* (378) sclp ::= selcollist COMMA */ + -1, /* (379) as ::= ID|STRING */ + -1, /* (380) indexed_opt ::= indexed_by */ + 0, /* (381) returning ::= */ + -1, /* (382) expr ::= term */ + -1, /* (383) likeop ::= LIKE_KW|MATCH */ + -1, /* (384) case_operand ::= expr */ + -1, /* (385) exprlist ::= nexprlist */ + -1, /* (386) nmnum ::= plus_num */ + -1, /* (387) nmnum ::= nm */ + -1, /* (388) nmnum ::= ON */ + -1, /* (389) nmnum ::= DELETE */ + -1, /* (390) nmnum ::= DEFAULT */ + -1, /* (391) plus_num ::= INTEGER|FLOAT */ + 0, /* (392) foreach_clause ::= */ + -3, /* (393) foreach_clause ::= FOR EACH ROW */ + -1, /* (394) trnm ::= nm */ + 0, /* (395) tridxby ::= */ + -1, /* (396) database_kw_opt ::= DATABASE */ + 0, /* (397) database_kw_opt ::= */ + 0, /* (398) kwcolumn_opt ::= */ + -1, /* (399) kwcolumn_opt ::= COLUMNKW */ + -1, /* (400) vtabarglist ::= vtabarg */ + -3, /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ + -2, /* (402) vtabarg ::= vtabarg vtabargtoken */ + 0, /* (403) anylist ::= */ + -4, /* (404) anylist ::= anylist LP anylist RP */ + -2, /* (405) anylist ::= anylist ANY */ + 0, /* (406) with ::= */ + -1, /* (407) windowdefn_list ::= windowdefn */ + -1, /* (408) window ::= frame_opt */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -174739,16 +176125,16 @@ static YYACTIONTYPE yy_reduce( { sqlite3FinishCoding(pParse); } break; case 3: /* cmd ::= BEGIN transtype trans_opt */ -{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy394);} +{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy144);} break; case 4: /* transtype ::= */ -{yymsp[1].minor.yy394 = TK_DEFERRED;} +{yymsp[1].minor.yy144 = TK_DEFERRED;} break; case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); - case 321: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==321); -{yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/} + case 324: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==324); +{yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT|END trans_opt */ case 9: /* cmd ::= ROLLBACK trans_opt */ yytestcase(yyruleno==9); @@ -174771,7 +176157,7 @@ static YYACTIONTYPE yy_reduce( break; case 13: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ { - sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy394,0,0,yymsp[-2].minor.yy394); + sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy144,0,0,yymsp[-2].minor.yy144); } break; case 14: /* createkw ::= CREATE */ @@ -174783,40 +176169,40 @@ static YYACTIONTYPE yy_reduce( case 62: /* init_deferred_pred_opt ::= */ yytestcase(yyruleno==62); case 72: /* defer_subclause_opt ::= */ yytestcase(yyruleno==72); case 81: /* ifexists ::= */ yytestcase(yyruleno==81); - case 98: /* distinct ::= */ yytestcase(yyruleno==98); - case 244: /* collate ::= */ yytestcase(yyruleno==244); -{yymsp[1].minor.yy394 = 0;} + case 100: /* distinct ::= */ yytestcase(yyruleno==100); + case 246: /* collate ::= */ yytestcase(yyruleno==246); +{yymsp[1].minor.yy144 = 0;} break; case 16: /* ifnotexists ::= IF NOT EXISTS */ -{yymsp[-2].minor.yy394 = 1;} +{yymsp[-2].minor.yy144 = 1;} break; case 17: /* temp ::= TEMP */ -{yymsp[0].minor.yy394 = pParse->db->init.busy==0;} +{yymsp[0].minor.yy144 = pParse->db->init.busy==0;} break; case 19: /* create_table_args ::= LP columnlist conslist_opt RP table_option_set */ { - sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy285,0); + sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy391,0); } break; case 20: /* create_table_args ::= AS select */ { - sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy47); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy47); + sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy555); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy555); } break; case 21: /* table_option_set ::= */ -{yymsp[1].minor.yy285 = 0;} +{yymsp[1].minor.yy391 = 0;} break; case 22: /* table_option_set ::= table_option_set COMMA table_option */ -{yylhsminor.yy285 = yymsp[-2].minor.yy285|yymsp[0].minor.yy285;} - yymsp[-2].minor.yy285 = yylhsminor.yy285; +{yylhsminor.yy391 = yymsp[-2].minor.yy391|yymsp[0].minor.yy391;} + yymsp[-2].minor.yy391 = yylhsminor.yy391; break; case 23: /* table_option ::= WITHOUT nm */ { if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ - yymsp[-1].minor.yy285 = TF_WithoutRowid | TF_NoVisibleRowid; + yymsp[-1].minor.yy391 = TF_WithoutRowid | TF_NoVisibleRowid; }else{ - yymsp[-1].minor.yy285 = 0; + yymsp[-1].minor.yy391 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } @@ -174824,20 +176210,20 @@ static YYACTIONTYPE yy_reduce( case 24: /* table_option ::= nm */ { if( yymsp[0].minor.yy0.n==6 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"strict",6)==0 ){ - yylhsminor.yy285 = TF_Strict; + yylhsminor.yy391 = TF_Strict; }else{ - yylhsminor.yy285 = 0; + yylhsminor.yy391 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } - yymsp[0].minor.yy285 = yylhsminor.yy285; + yymsp[0].minor.yy391 = yylhsminor.yy391; break; case 25: /* columnname ::= nm typetoken */ {sqlite3AddColumn(pParse,yymsp[-1].minor.yy0,yymsp[0].minor.yy0);} break; case 26: /* typetoken ::= */ case 65: /* conslist_opt ::= */ yytestcase(yyruleno==65); - case 104: /* as ::= */ yytestcase(yyruleno==104); + case 106: /* as ::= */ yytestcase(yyruleno==106); {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = 0;} break; case 27: /* typetoken ::= typename LP signed RP */ @@ -174856,7 +176242,7 @@ static YYACTIONTYPE yy_reduce( case 30: /* scanpt ::= */ { assert( yyLookahead!=YYNOCODE ); - yymsp[1].minor.yy522 = yyLookaheadToken.z; + yymsp[1].minor.yy168 = yyLookaheadToken.z; } break; case 31: /* scantok ::= */ @@ -174870,17 +176256,17 @@ static YYACTIONTYPE yy_reduce( {pParse->constraintName = yymsp[0].minor.yy0;} break; case 33: /* ccons ::= DEFAULT scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy528,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy454,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 34: /* ccons ::= DEFAULT LP expr RP */ -{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy528,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} +{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy454,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} break; case 35: /* ccons ::= DEFAULT PLUS scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy528,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy454,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 36: /* ccons ::= DEFAULT MINUS scantok term */ { - Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy528, 0); + Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy454, 0); sqlite3AddDefaultValue(pParse,p,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]); } break; @@ -174895,151 +176281,151 @@ static YYACTIONTYPE yy_reduce( } break; case 38: /* ccons ::= NOT NULL onconf */ -{sqlite3AddNotNull(pParse, yymsp[0].minor.yy394);} +{sqlite3AddNotNull(pParse, yymsp[0].minor.yy144);} break; case 39: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ -{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy394,yymsp[0].minor.yy394,yymsp[-2].minor.yy394);} +{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy144,yymsp[0].minor.yy144,yymsp[-2].minor.yy144);} break; case 40: /* ccons ::= UNIQUE onconf */ -{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy394,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy144,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 41: /* ccons ::= CHECK LP expr RP */ -{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy528,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy454,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} break; case 42: /* ccons ::= REFERENCES nm eidlist_opt refargs */ -{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy322,yymsp[0].minor.yy394);} +{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy14,yymsp[0].minor.yy144);} break; case 43: /* ccons ::= defer_subclause */ -{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy394);} +{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy144);} break; case 44: /* ccons ::= COLLATE ID|STRING */ {sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} break; case 45: /* generated ::= LP expr RP */ -{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy528,0);} +{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy454,0);} break; case 46: /* generated ::= LP expr RP ID */ -{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy528,&yymsp[0].minor.yy0);} +{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy454,&yymsp[0].minor.yy0);} break; case 48: /* autoinc ::= AUTOINCR */ -{yymsp[0].minor.yy394 = 1;} +{yymsp[0].minor.yy144 = 1;} break; case 49: /* refargs ::= */ -{ yymsp[1].minor.yy394 = OE_None*0x0101; /* EV: R-19803-45884 */} +{ yymsp[1].minor.yy144 = OE_None*0x0101; /* EV: R-19803-45884 */} break; case 50: /* refargs ::= refargs refarg */ -{ yymsp[-1].minor.yy394 = (yymsp[-1].minor.yy394 & ~yymsp[0].minor.yy231.mask) | yymsp[0].minor.yy231.value; } +{ yymsp[-1].minor.yy144 = (yymsp[-1].minor.yy144 & ~yymsp[0].minor.yy383.mask) | yymsp[0].minor.yy383.value; } break; case 51: /* refarg ::= MATCH nm */ -{ yymsp[-1].minor.yy231.value = 0; yymsp[-1].minor.yy231.mask = 0x000000; } +{ yymsp[-1].minor.yy383.value = 0; yymsp[-1].minor.yy383.mask = 0x000000; } break; case 52: /* refarg ::= ON INSERT refact */ -{ yymsp[-2].minor.yy231.value = 0; yymsp[-2].minor.yy231.mask = 0x000000; } +{ yymsp[-2].minor.yy383.value = 0; yymsp[-2].minor.yy383.mask = 0x000000; } break; case 53: /* refarg ::= ON DELETE refact */ -{ yymsp[-2].minor.yy231.value = yymsp[0].minor.yy394; yymsp[-2].minor.yy231.mask = 0x0000ff; } +{ yymsp[-2].minor.yy383.value = yymsp[0].minor.yy144; yymsp[-2].minor.yy383.mask = 0x0000ff; } break; case 54: /* refarg ::= ON UPDATE refact */ -{ yymsp[-2].minor.yy231.value = yymsp[0].minor.yy394<<8; yymsp[-2].minor.yy231.mask = 0x00ff00; } +{ yymsp[-2].minor.yy383.value = yymsp[0].minor.yy144<<8; yymsp[-2].minor.yy383.mask = 0x00ff00; } break; case 55: /* refact ::= SET NULL */ -{ yymsp[-1].minor.yy394 = OE_SetNull; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy144 = OE_SetNull; /* EV: R-33326-45252 */} break; case 56: /* refact ::= SET DEFAULT */ -{ yymsp[-1].minor.yy394 = OE_SetDflt; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy144 = OE_SetDflt; /* EV: R-33326-45252 */} break; case 57: /* refact ::= CASCADE */ -{ yymsp[0].minor.yy394 = OE_Cascade; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy144 = OE_Cascade; /* EV: R-33326-45252 */} break; case 58: /* refact ::= RESTRICT */ -{ yymsp[0].minor.yy394 = OE_Restrict; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy144 = OE_Restrict; /* EV: R-33326-45252 */} break; case 59: /* refact ::= NO ACTION */ -{ yymsp[-1].minor.yy394 = OE_None; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy144 = OE_None; /* EV: R-33326-45252 */} break; case 60: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ -{yymsp[-2].minor.yy394 = 0;} +{yymsp[-2].minor.yy144 = 0;} break; case 61: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ case 76: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==76); - case 171: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==171); -{yymsp[-1].minor.yy394 = yymsp[0].minor.yy394;} + case 173: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==173); +{yymsp[-1].minor.yy144 = yymsp[0].minor.yy144;} break; case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80); - case 217: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==217); - case 220: /* in_op ::= NOT IN */ yytestcase(yyruleno==220); - case 245: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==245); -{yymsp[-1].minor.yy394 = 1;} + case 219: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==219); + case 222: /* in_op ::= NOT IN */ yytestcase(yyruleno==222); + case 247: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==247); +{yymsp[-1].minor.yy144 = 1;} break; case 64: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ -{yymsp[-1].minor.yy394 = 0;} +{yymsp[-1].minor.yy144 = 0;} break; case 66: /* tconscomma ::= COMMA */ {pParse->constraintName.n = 0;} break; case 68: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ -{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy322,yymsp[0].minor.yy394,yymsp[-2].minor.yy394,0);} +{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy14,yymsp[0].minor.yy144,yymsp[-2].minor.yy144,0);} break; case 69: /* tcons ::= UNIQUE LP sortlist RP onconf */ -{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy322,yymsp[0].minor.yy394,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy14,yymsp[0].minor.yy144,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 70: /* tcons ::= CHECK LP expr RP onconf */ -{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy528,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} +{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy454,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} break; case 71: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ { - sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy322, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy322, yymsp[-1].minor.yy394); - sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy394); + sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy14, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[-1].minor.yy144); + sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy144); } break; case 73: /* onconf ::= */ case 75: /* orconf ::= */ yytestcase(yyruleno==75); -{yymsp[1].minor.yy394 = OE_Default;} +{yymsp[1].minor.yy144 = OE_Default;} break; case 74: /* onconf ::= ON CONFLICT resolvetype */ -{yymsp[-2].minor.yy394 = yymsp[0].minor.yy394;} +{yymsp[-2].minor.yy144 = yymsp[0].minor.yy144;} break; case 77: /* resolvetype ::= IGNORE */ -{yymsp[0].minor.yy394 = OE_Ignore;} +{yymsp[0].minor.yy144 = OE_Ignore;} break; case 78: /* resolvetype ::= REPLACE */ - case 172: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==172); -{yymsp[0].minor.yy394 = OE_Replace;} + case 174: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==174); +{yymsp[0].minor.yy144 = OE_Replace;} break; case 79: /* cmd ::= DROP TABLE ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy131, 0, yymsp[-1].minor.yy394); + sqlite3DropTable(pParse, yymsp[0].minor.yy203, 0, yymsp[-1].minor.yy144); } break; case 82: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ { - sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy322, yymsp[0].minor.yy47, yymsp[-7].minor.yy394, yymsp[-5].minor.yy394); + sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[0].minor.yy555, yymsp[-7].minor.yy144, yymsp[-5].minor.yy144); } break; case 83: /* cmd ::= DROP VIEW ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy131, 1, yymsp[-1].minor.yy394); + sqlite3DropTable(pParse, yymsp[0].minor.yy203, 1, yymsp[-1].minor.yy144); } break; case 84: /* cmd ::= select */ { SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0, 0}; - sqlite3Select(pParse, yymsp[0].minor.yy47, &dest); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy47); + sqlite3Select(pParse, yymsp[0].minor.yy555, &dest); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy555); } break; case 85: /* select ::= WITH wqlist selectnowith */ -{yymsp[-2].minor.yy47 = attachWithToSelect(pParse,yymsp[0].minor.yy47,yymsp[-1].minor.yy521);} +{yymsp[-2].minor.yy555 = attachWithToSelect(pParse,yymsp[0].minor.yy555,yymsp[-1].minor.yy59);} break; case 86: /* select ::= WITH RECURSIVE wqlist selectnowith */ -{yymsp[-3].minor.yy47 = attachWithToSelect(pParse,yymsp[0].minor.yy47,yymsp[-1].minor.yy521);} +{yymsp[-3].minor.yy555 = attachWithToSelect(pParse,yymsp[0].minor.yy555,yymsp[-1].minor.yy59);} break; case 87: /* select ::= selectnowith */ { - Select *p = yymsp[0].minor.yy47; + Select *p = yymsp[0].minor.yy555; if( p ){ parserDoubleLinkSelect(pParse, p); } @@ -175047,8 +176433,8 @@ static YYACTIONTYPE yy_reduce( break; case 88: /* selectnowith ::= selectnowith multiselect_op oneselect */ { - Select *pRhs = yymsp[0].minor.yy47; - Select *pLhs = yymsp[-2].minor.yy47; + Select *pRhs = yymsp[0].minor.yy555; + Select *pLhs = yymsp[-2].minor.yy555; if( pRhs && pRhs->pPrior ){ SrcList *pFrom; Token x; @@ -175058,148 +176444,145 @@ static YYACTIONTYPE yy_reduce( pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0); } if( pRhs ){ - pRhs->op = (u8)yymsp[-1].minor.yy394; + pRhs->op = (u8)yymsp[-1].minor.yy144; pRhs->pPrior = pLhs; if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; pRhs->selFlags &= ~SF_MultiValue; - if( yymsp[-1].minor.yy394!=TK_ALL ) pParse->hasCompound = 1; + if( yymsp[-1].minor.yy144!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); } - yymsp[-2].minor.yy47 = pRhs; + yymsp[-2].minor.yy555 = pRhs; } break; case 89: /* multiselect_op ::= UNION */ case 91: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==91); -{yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-OP*/} +{yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-OP*/} break; case 90: /* multiselect_op ::= UNION ALL */ -{yymsp[-1].minor.yy394 = TK_ALL;} +{yymsp[-1].minor.yy144 = TK_ALL;} break; case 92: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ { - yymsp[-8].minor.yy47 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy322,yymsp[-5].minor.yy131,yymsp[-4].minor.yy528,yymsp[-3].minor.yy322,yymsp[-2].minor.yy528,yymsp[-1].minor.yy322,yymsp[-7].minor.yy394,yymsp[0].minor.yy528); + yymsp[-8].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy14,yymsp[-5].minor.yy203,yymsp[-4].minor.yy454,yymsp[-3].minor.yy14,yymsp[-2].minor.yy454,yymsp[-1].minor.yy14,yymsp[-7].minor.yy144,yymsp[0].minor.yy454); } break; case 93: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ { - yymsp[-9].minor.yy47 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy322,yymsp[-6].minor.yy131,yymsp[-5].minor.yy528,yymsp[-4].minor.yy322,yymsp[-3].minor.yy528,yymsp[-1].minor.yy322,yymsp[-8].minor.yy394,yymsp[0].minor.yy528); - if( yymsp[-9].minor.yy47 ){ - yymsp[-9].minor.yy47->pWinDefn = yymsp[-2].minor.yy41; + yymsp[-9].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy14,yymsp[-6].minor.yy203,yymsp[-5].minor.yy454,yymsp[-4].minor.yy14,yymsp[-3].minor.yy454,yymsp[-1].minor.yy14,yymsp[-8].minor.yy144,yymsp[0].minor.yy454); + if( yymsp[-9].minor.yy555 ){ + yymsp[-9].minor.yy555->pWinDefn = yymsp[-2].minor.yy211; }else{ - sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy41); + sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy211); } } break; case 94: /* values ::= VALUES LP nexprlist RP */ { - yymsp[-3].minor.yy47 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy322,0,0,0,0,0,SF_Values,0); + yymsp[-3].minor.yy555 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy14,0,0,0,0,0,SF_Values,0); } break; - case 95: /* values ::= values COMMA LP nexprlist RP */ + case 95: /* oneselect ::= mvalues */ { - Select *pRight, *pLeft = yymsp[-4].minor.yy47; - pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy322,0,0,0,0,0,SF_Values|SF_MultiValue,0); - if( ALWAYS(pLeft) ) pLeft->selFlags &= ~SF_MultiValue; - if( pRight ){ - pRight->op = TK_ALL; - pRight->pPrior = pLeft; - yymsp[-4].minor.yy47 = pRight; - }else{ - yymsp[-4].minor.yy47 = pLeft; - } + sqlite3MultiValuesEnd(pParse, yymsp[0].minor.yy555); +} + break; + case 96: /* mvalues ::= values COMMA LP nexprlist RP */ + case 97: /* mvalues ::= mvalues COMMA LP nexprlist RP */ yytestcase(yyruleno==97); +{ + yymsp[-4].minor.yy555 = sqlite3MultiValues(pParse, yymsp[-4].minor.yy555, yymsp[-1].minor.yy14); } break; - case 96: /* distinct ::= DISTINCT */ -{yymsp[0].minor.yy394 = SF_Distinct;} + case 98: /* distinct ::= DISTINCT */ +{yymsp[0].minor.yy144 = SF_Distinct;} break; - case 97: /* distinct ::= ALL */ -{yymsp[0].minor.yy394 = SF_All;} + case 99: /* distinct ::= ALL */ +{yymsp[0].minor.yy144 = SF_All;} break; - case 99: /* sclp ::= */ - case 132: /* orderby_opt ::= */ yytestcase(yyruleno==132); - case 142: /* groupby_opt ::= */ yytestcase(yyruleno==142); - case 232: /* exprlist ::= */ yytestcase(yyruleno==232); - case 235: /* paren_exprlist ::= */ yytestcase(yyruleno==235); - case 240: /* eidlist_opt ::= */ yytestcase(yyruleno==240); -{yymsp[1].minor.yy322 = 0;} + case 101: /* sclp ::= */ + case 134: /* orderby_opt ::= */ yytestcase(yyruleno==134); + case 144: /* groupby_opt ::= */ yytestcase(yyruleno==144); + case 234: /* exprlist ::= */ yytestcase(yyruleno==234); + case 237: /* paren_exprlist ::= */ yytestcase(yyruleno==237); + case 242: /* eidlist_opt ::= */ yytestcase(yyruleno==242); +{yymsp[1].minor.yy14 = 0;} break; - case 100: /* selcollist ::= sclp scanpt expr scanpt as */ + case 102: /* selcollist ::= sclp scanpt expr scanpt as */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy322, yymsp[-2].minor.yy528); - if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy322, &yymsp[0].minor.yy0, 1); - sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy322,yymsp[-3].minor.yy522,yymsp[-1].minor.yy522); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[-2].minor.yy454); + if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy14, &yymsp[0].minor.yy0, 1); + sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy14,yymsp[-3].minor.yy168,yymsp[-1].minor.yy168); } break; - case 101: /* selcollist ::= sclp scanpt STAR */ + case 103: /* selcollist ::= sclp scanpt STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); sqlite3ExprSetErrorOffset(p, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); - yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy322, p); + yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy14, p); } break; - case 102: /* selcollist ::= sclp scanpt nm DOT STAR */ + case 104: /* selcollist ::= sclp scanpt nm DOT STAR */ { Expr *pRight, *pLeft, *pDot; pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0); sqlite3ExprSetErrorOffset(pRight, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0); pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, pDot); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, pDot); } break; - case 103: /* as ::= AS nm */ - case 115: /* dbnm ::= DOT nm */ yytestcase(yyruleno==115); - case 256: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==256); - case 257: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==257); + case 105: /* as ::= AS nm */ + case 117: /* dbnm ::= DOT nm */ yytestcase(yyruleno==117); + case 258: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==258); + case 259: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==259); {yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;} break; - case 105: /* from ::= */ - case 108: /* stl_prefix ::= */ yytestcase(yyruleno==108); -{yymsp[1].minor.yy131 = 0;} + case 107: /* from ::= */ + case 110: /* stl_prefix ::= */ yytestcase(yyruleno==110); +{yymsp[1].minor.yy203 = 0;} break; - case 106: /* from ::= FROM seltablist */ + case 108: /* from ::= FROM seltablist */ { - yymsp[-1].minor.yy131 = yymsp[0].minor.yy131; - sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy131); + yymsp[-1].minor.yy203 = yymsp[0].minor.yy203; + sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy203); } break; - case 107: /* stl_prefix ::= seltablist joinop */ + case 109: /* stl_prefix ::= seltablist joinop */ { - if( ALWAYS(yymsp[-1].minor.yy131 && yymsp[-1].minor.yy131->nSrc>0) ) yymsp[-1].minor.yy131->a[yymsp[-1].minor.yy131->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy394; + if( ALWAYS(yymsp[-1].minor.yy203 && yymsp[-1].minor.yy203->nSrc>0) ) yymsp[-1].minor.yy203->a[yymsp[-1].minor.yy203->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy144; } break; - case 109: /* seltablist ::= stl_prefix nm dbnm as on_using */ + case 111: /* seltablist ::= stl_prefix nm dbnm as on_using */ { - yymsp[-4].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy131,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561); + yymsp[-4].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy203,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); } break; - case 110: /* seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ + case 112: /* seltablist ::= stl_prefix nm dbnm as indexed_by on_using */ { - yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy561); - sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy131, &yymsp[-1].minor.yy0); + yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy269); + sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy203, &yymsp[-1].minor.yy0); } break; - case 111: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ + case 113: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */ { - yymsp[-7].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy131,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561); - sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy131, yymsp[-3].minor.yy322); + yymsp[-7].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy203,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); + sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy203, yymsp[-3].minor.yy14); } break; - case 112: /* seltablist ::= stl_prefix LP select RP as on_using */ + case 114: /* seltablist ::= stl_prefix LP select RP as on_using */ { - yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy47,&yymsp[0].minor.yy561); + yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy555,&yymsp[0].minor.yy269); } break; - case 113: /* seltablist ::= stl_prefix LP seltablist RP as on_using */ + case 115: /* seltablist ::= stl_prefix LP seltablist RP as on_using */ { - if( yymsp[-5].minor.yy131==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy561.pOn==0 && yymsp[0].minor.yy561.pUsing==0 ){ - yymsp[-5].minor.yy131 = yymsp[-3].minor.yy131; - }else if( ALWAYS(yymsp[-3].minor.yy131!=0) && yymsp[-3].minor.yy131->nSrc==1 ){ - yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561); - if( yymsp[-5].minor.yy131 ){ - SrcItem *pNew = &yymsp[-5].minor.yy131->a[yymsp[-5].minor.yy131->nSrc-1]; - SrcItem *pOld = yymsp[-3].minor.yy131->a; + if( yymsp[-5].minor.yy203==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy269.pOn==0 && yymsp[0].minor.yy269.pUsing==0 ){ + yymsp[-5].minor.yy203 = yymsp[-3].minor.yy203; + }else if( ALWAYS(yymsp[-3].minor.yy203!=0) && yymsp[-3].minor.yy203->nSrc==1 ){ + yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy269); + if( yymsp[-5].minor.yy203 ){ + SrcItem *pNew = &yymsp[-5].minor.yy203->a[yymsp[-5].minor.yy203->nSrc-1]; + SrcItem *pOld = yymsp[-3].minor.yy203->a; pNew->zName = pOld->zName; pNew->zDatabase = pOld->zDatabase; pNew->pSelect = pOld->pSelect; @@ -175215,159 +176598,159 @@ static YYACTIONTYPE yy_reduce( pOld->zName = pOld->zDatabase = 0; pOld->pSelect = 0; } - sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy131); + sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy203); }else{ Select *pSubquery; - sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy131); - pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy131,0,0,0,0,SF_NestedFrom,0); - yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy561); + sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy203); + pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy203,0,0,0,0,SF_NestedFrom,0); + yymsp[-5].minor.yy203 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy203,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy269); } } break; - case 114: /* dbnm ::= */ - case 129: /* indexed_opt ::= */ yytestcase(yyruleno==129); + case 116: /* dbnm ::= */ + case 131: /* indexed_opt ::= */ yytestcase(yyruleno==131); {yymsp[1].minor.yy0.z=0; yymsp[1].minor.yy0.n=0;} break; - case 116: /* fullname ::= nm */ + case 118: /* fullname ::= nm */ { - yylhsminor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); - if( IN_RENAME_OBJECT && yylhsminor.yy131 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy131->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); + if( IN_RENAME_OBJECT && yylhsminor.yy203 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy203->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy131 = yylhsminor.yy131; + yymsp[0].minor.yy203 = yylhsminor.yy203; break; - case 117: /* fullname ::= nm DOT nm */ + case 119: /* fullname ::= nm DOT nm */ { - yylhsminor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); - if( IN_RENAME_OBJECT && yylhsminor.yy131 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy131->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); + if( IN_RENAME_OBJECT && yylhsminor.yy203 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy203->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy131 = yylhsminor.yy131; + yymsp[-2].minor.yy203 = yylhsminor.yy203; break; - case 118: /* xfullname ::= nm */ -{yymsp[0].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} + case 120: /* xfullname ::= nm */ +{yymsp[0].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} break; - case 119: /* xfullname ::= nm DOT nm */ -{yymsp[-2].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 121: /* xfullname ::= nm DOT nm */ +{yymsp[-2].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 120: /* xfullname ::= nm DOT nm AS nm */ + case 122: /* xfullname ::= nm DOT nm AS nm */ { - yymsp[-4].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ - if( yymsp[-4].minor.yy131 ) yymsp[-4].minor.yy131->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-4].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ + if( yymsp[-4].minor.yy203 ) yymsp[-4].minor.yy203->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; - case 121: /* xfullname ::= nm AS nm */ + case 123: /* xfullname ::= nm AS nm */ { - yymsp[-2].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ - if( yymsp[-2].minor.yy131 ) yymsp[-2].minor.yy131->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-2].minor.yy203 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ + if( yymsp[-2].minor.yy203 ) yymsp[-2].minor.yy203->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; - case 122: /* joinop ::= COMMA|JOIN */ -{ yymsp[0].minor.yy394 = JT_INNER; } + case 124: /* joinop ::= COMMA|JOIN */ +{ yymsp[0].minor.yy144 = JT_INNER; } break; - case 123: /* joinop ::= JOIN_KW JOIN */ -{yymsp[-1].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} + case 125: /* joinop ::= JOIN_KW JOIN */ +{yymsp[-1].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} break; - case 124: /* joinop ::= JOIN_KW nm JOIN */ -{yymsp[-2].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} + case 126: /* joinop ::= JOIN_KW nm JOIN */ +{yymsp[-2].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} break; - case 125: /* joinop ::= JOIN_KW nm nm JOIN */ -{yymsp[-3].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} + case 127: /* joinop ::= JOIN_KW nm nm JOIN */ +{yymsp[-3].minor.yy144 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} break; - case 126: /* on_using ::= ON expr */ -{yymsp[-1].minor.yy561.pOn = yymsp[0].minor.yy528; yymsp[-1].minor.yy561.pUsing = 0;} + case 128: /* on_using ::= ON expr */ +{yymsp[-1].minor.yy269.pOn = yymsp[0].minor.yy454; yymsp[-1].minor.yy269.pUsing = 0;} break; - case 127: /* on_using ::= USING LP idlist RP */ -{yymsp[-3].minor.yy561.pOn = 0; yymsp[-3].minor.yy561.pUsing = yymsp[-1].minor.yy254;} + case 129: /* on_using ::= USING LP idlist RP */ +{yymsp[-3].minor.yy269.pOn = 0; yymsp[-3].minor.yy269.pUsing = yymsp[-1].minor.yy132;} break; - case 128: /* on_using ::= */ -{yymsp[1].minor.yy561.pOn = 0; yymsp[1].minor.yy561.pUsing = 0;} + case 130: /* on_using ::= */ +{yymsp[1].minor.yy269.pOn = 0; yymsp[1].minor.yy269.pUsing = 0;} break; - case 130: /* indexed_by ::= INDEXED BY nm */ + case 132: /* indexed_by ::= INDEXED BY nm */ {yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;} break; - case 131: /* indexed_by ::= NOT INDEXED */ + case 133: /* indexed_by ::= NOT INDEXED */ {yymsp[-1].minor.yy0.z=0; yymsp[-1].minor.yy0.n=1;} break; - case 133: /* orderby_opt ::= ORDER BY sortlist */ - case 143: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==143); -{yymsp[-2].minor.yy322 = yymsp[0].minor.yy322;} + case 135: /* orderby_opt ::= ORDER BY sortlist */ + case 145: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==145); +{yymsp[-2].minor.yy14 = yymsp[0].minor.yy14;} break; - case 134: /* sortlist ::= sortlist COMMA expr sortorder nulls */ + case 136: /* sortlist ::= sortlist COMMA expr sortorder nulls */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322,yymsp[-2].minor.yy528); - sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy322,yymsp[-1].minor.yy394,yymsp[0].minor.yy394); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14,yymsp[-2].minor.yy454); + sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy14,yymsp[-1].minor.yy144,yymsp[0].minor.yy144); } break; - case 135: /* sortlist ::= expr sortorder nulls */ + case 137: /* sortlist ::= expr sortorder nulls */ { - yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy528); /*A-overwrites-Y*/ - sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy322,yymsp[-1].minor.yy394,yymsp[0].minor.yy394); + yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy454); /*A-overwrites-Y*/ + sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy14,yymsp[-1].minor.yy144,yymsp[0].minor.yy144); } break; - case 136: /* sortorder ::= ASC */ -{yymsp[0].minor.yy394 = SQLITE_SO_ASC;} + case 138: /* sortorder ::= ASC */ +{yymsp[0].minor.yy144 = SQLITE_SO_ASC;} break; - case 137: /* sortorder ::= DESC */ -{yymsp[0].minor.yy394 = SQLITE_SO_DESC;} + case 139: /* sortorder ::= DESC */ +{yymsp[0].minor.yy144 = SQLITE_SO_DESC;} break; - case 138: /* sortorder ::= */ - case 141: /* nulls ::= */ yytestcase(yyruleno==141); -{yymsp[1].minor.yy394 = SQLITE_SO_UNDEFINED;} + case 140: /* sortorder ::= */ + case 143: /* nulls ::= */ yytestcase(yyruleno==143); +{yymsp[1].minor.yy144 = SQLITE_SO_UNDEFINED;} break; - case 139: /* nulls ::= NULLS FIRST */ -{yymsp[-1].minor.yy394 = SQLITE_SO_ASC;} + case 141: /* nulls ::= NULLS FIRST */ +{yymsp[-1].minor.yy144 = SQLITE_SO_ASC;} break; - case 140: /* nulls ::= NULLS LAST */ -{yymsp[-1].minor.yy394 = SQLITE_SO_DESC;} + case 142: /* nulls ::= NULLS LAST */ +{yymsp[-1].minor.yy144 = SQLITE_SO_DESC;} break; - case 144: /* having_opt ::= */ - case 146: /* limit_opt ::= */ yytestcase(yyruleno==146); - case 151: /* where_opt ::= */ yytestcase(yyruleno==151); - case 153: /* where_opt_ret ::= */ yytestcase(yyruleno==153); - case 230: /* case_else ::= */ yytestcase(yyruleno==230); - case 231: /* case_operand ::= */ yytestcase(yyruleno==231); - case 250: /* vinto ::= */ yytestcase(yyruleno==250); -{yymsp[1].minor.yy528 = 0;} + case 146: /* having_opt ::= */ + case 148: /* limit_opt ::= */ yytestcase(yyruleno==148); + case 153: /* where_opt ::= */ yytestcase(yyruleno==153); + case 155: /* where_opt_ret ::= */ yytestcase(yyruleno==155); + case 232: /* case_else ::= */ yytestcase(yyruleno==232); + case 233: /* case_operand ::= */ yytestcase(yyruleno==233); + case 252: /* vinto ::= */ yytestcase(yyruleno==252); +{yymsp[1].minor.yy454 = 0;} break; - case 145: /* having_opt ::= HAVING expr */ - case 152: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==152); - case 154: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==154); - case 229: /* case_else ::= ELSE expr */ yytestcase(yyruleno==229); - case 249: /* vinto ::= INTO expr */ yytestcase(yyruleno==249); -{yymsp[-1].minor.yy528 = yymsp[0].minor.yy528;} + case 147: /* having_opt ::= HAVING expr */ + case 154: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==154); + case 156: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==156); + case 231: /* case_else ::= ELSE expr */ yytestcase(yyruleno==231); + case 251: /* vinto ::= INTO expr */ yytestcase(yyruleno==251); +{yymsp[-1].minor.yy454 = yymsp[0].minor.yy454;} break; - case 147: /* limit_opt ::= LIMIT expr */ -{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy528,0);} + case 149: /* limit_opt ::= LIMIT expr */ +{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy454,0);} break; - case 148: /* limit_opt ::= LIMIT expr OFFSET expr */ -{yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} + case 150: /* limit_opt ::= LIMIT expr OFFSET expr */ +{yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} break; - case 149: /* limit_opt ::= LIMIT expr COMMA expr */ -{yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy528,yymsp[-2].minor.yy528);} + case 151: /* limit_opt ::= LIMIT expr COMMA expr */ +{yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy454,yymsp[-2].minor.yy454);} break; - case 150: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ + case 152: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy131, &yymsp[-3].minor.yy0); + sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy203, &yymsp[-3].minor.yy0); #ifndef SQLITE_ENABLE_UPDATE_DELETE_LIMIT - if( yymsp[-1].minor.yy322 || yymsp[0].minor.yy528 ){ - updateDeleteLimitError(pParse,yymsp[-1].minor.yy322,yymsp[0].minor.yy528); - yymsp[-1].minor.yy322 = 0; - yymsp[0].minor.yy528 = 0; + if( yymsp[-1].minor.yy14 || yymsp[0].minor.yy454 ){ + updateDeleteLimitError(pParse,yymsp[-1].minor.yy14,yymsp[0].minor.yy454); + yymsp[-1].minor.yy14 = 0; + yymsp[0].minor.yy454 = 0; } #endif - sqlite3DeleteFrom(pParse,yymsp[-4].minor.yy131,yymsp[-2].minor.yy528,yymsp[-1].minor.yy322,yymsp[0].minor.yy528); + sqlite3DeleteFrom(pParse,yymsp[-4].minor.yy203,yymsp[-2].minor.yy454,yymsp[-1].minor.yy14,yymsp[0].minor.yy454); } break; - case 155: /* where_opt_ret ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy322); yymsp[-1].minor.yy528 = 0;} + case 157: /* where_opt_ret ::= RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy14); yymsp[-1].minor.yy454 = 0;} break; - case 156: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy322); yymsp[-3].minor.yy528 = yymsp[-2].minor.yy528;} + case 158: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy14); yymsp[-3].minor.yy454 = yymsp[-2].minor.yy454;} break; - case 157: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ + case 159: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-7].minor.yy131, &yymsp[-6].minor.yy0); - if( yymsp[-3].minor.yy131 ){ - SrcList *pFromClause = yymsp[-3].minor.yy131; + sqlite3SrcListIndexedBy(pParse, yymsp[-7].minor.yy203, &yymsp[-6].minor.yy0); + if( yymsp[-3].minor.yy203 ){ + SrcList *pFromClause = yymsp[-3].minor.yy203; if( pFromClause->nSrc>1 ){ Select *pSubquery; Token as; @@ -175376,100 +176759,100 @@ static YYACTIONTYPE yy_reduce( as.z = 0; pFromClause = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&as,pSubquery,0); } - yymsp[-7].minor.yy131 = sqlite3SrcListAppendList(pParse, yymsp[-7].minor.yy131, pFromClause); + yymsp[-7].minor.yy203 = sqlite3SrcListAppendList(pParse, yymsp[-7].minor.yy203, pFromClause); } - sqlite3ExprListCheckLength(pParse,yymsp[-4].minor.yy322,"set list"); + sqlite3ExprListCheckLength(pParse,yymsp[-4].minor.yy14,"set list"); #ifndef SQLITE_ENABLE_UPDATE_DELETE_LIMIT - if( yymsp[-1].minor.yy322 || yymsp[0].minor.yy528 ){ - updateDeleteLimitError(pParse,yymsp[-1].minor.yy322,yymsp[0].minor.yy528); - yymsp[-1].minor.yy322 = 0; - yymsp[0].minor.yy528 = 0; + if( yymsp[-1].minor.yy14 || yymsp[0].minor.yy454 ){ + updateDeleteLimitError(pParse,yymsp[-1].minor.yy14,yymsp[0].minor.yy454); + yymsp[-1].minor.yy14 = 0; + yymsp[0].minor.yy454 = 0; } #endif - sqlite3Update(pParse,yymsp[-7].minor.yy131,yymsp[-4].minor.yy322,yymsp[-2].minor.yy528,yymsp[-8].minor.yy394,yymsp[-1].minor.yy322,yymsp[0].minor.yy528,0); + sqlite3Update(pParse,yymsp[-7].minor.yy203,yymsp[-4].minor.yy14,yymsp[-2].minor.yy454,yymsp[-8].minor.yy144,yymsp[-1].minor.yy14,yymsp[0].minor.yy454,0); } break; - case 158: /* setlist ::= setlist COMMA nm EQ expr */ + case 160: /* setlist ::= setlist COMMA nm EQ expr */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy322, yymsp[0].minor.yy528); - sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy322, &yymsp[-2].minor.yy0, 1); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[0].minor.yy454); + sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, 1); } break; - case 159: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ + case 161: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ { - yymsp[-6].minor.yy322 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy322, yymsp[-3].minor.yy254, yymsp[0].minor.yy528); + yymsp[-6].minor.yy14 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy14, yymsp[-3].minor.yy132, yymsp[0].minor.yy454); } break; - case 160: /* setlist ::= nm EQ expr */ + case 162: /* setlist ::= nm EQ expr */ { - yylhsminor.yy322 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy528); - sqlite3ExprListSetName(pParse, yylhsminor.yy322, &yymsp[-2].minor.yy0, 1); + yylhsminor.yy14 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy454); + sqlite3ExprListSetName(pParse, yylhsminor.yy14, &yymsp[-2].minor.yy0, 1); } - yymsp[-2].minor.yy322 = yylhsminor.yy322; + yymsp[-2].minor.yy14 = yylhsminor.yy14; break; - case 161: /* setlist ::= LP idlist RP EQ expr */ + case 163: /* setlist ::= LP idlist RP EQ expr */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy254, yymsp[0].minor.yy528); + yymsp[-4].minor.yy14 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy132, yymsp[0].minor.yy454); } break; - case 162: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + case 164: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ { - sqlite3Insert(pParse, yymsp[-3].minor.yy131, yymsp[-1].minor.yy47, yymsp[-2].minor.yy254, yymsp[-5].minor.yy394, yymsp[0].minor.yy444); + sqlite3Insert(pParse, yymsp[-3].minor.yy203, yymsp[-1].minor.yy555, yymsp[-2].minor.yy132, yymsp[-5].minor.yy144, yymsp[0].minor.yy122); } break; - case 163: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + case 165: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ { - sqlite3Insert(pParse, yymsp[-4].minor.yy131, 0, yymsp[-3].minor.yy254, yymsp[-6].minor.yy394, 0); + sqlite3Insert(pParse, yymsp[-4].minor.yy203, 0, yymsp[-3].minor.yy132, yymsp[-6].minor.yy144, 0); } break; - case 164: /* upsert ::= */ -{ yymsp[1].minor.yy444 = 0; } + case 166: /* upsert ::= */ +{ yymsp[1].minor.yy122 = 0; } break; - case 165: /* upsert ::= RETURNING selcollist */ -{ yymsp[-1].minor.yy444 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy322); } + case 167: /* upsert ::= RETURNING selcollist */ +{ yymsp[-1].minor.yy122 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy14); } break; - case 166: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ -{ yymsp[-11].minor.yy444 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy322,yymsp[-6].minor.yy528,yymsp[-2].minor.yy322,yymsp[-1].minor.yy528,yymsp[0].minor.yy444);} + case 168: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ +{ yymsp[-11].minor.yy122 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy14,yymsp[-6].minor.yy454,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454,yymsp[0].minor.yy122);} break; - case 167: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ -{ yymsp[-8].minor.yy444 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy322,yymsp[-3].minor.yy528,0,0,yymsp[0].minor.yy444); } + case 169: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ +{ yymsp[-8].minor.yy122 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy14,yymsp[-3].minor.yy454,0,0,yymsp[0].minor.yy122); } break; - case 168: /* upsert ::= ON CONFLICT DO NOTHING returning */ -{ yymsp[-4].minor.yy444 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } + case 170: /* upsert ::= ON CONFLICT DO NOTHING returning */ +{ yymsp[-4].minor.yy122 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } break; - case 169: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ -{ yymsp[-7].minor.yy444 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy322,yymsp[-1].minor.yy528,0);} + case 171: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ +{ yymsp[-7].minor.yy122 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454,0);} break; - case 170: /* returning ::= RETURNING selcollist */ -{sqlite3AddReturning(pParse,yymsp[0].minor.yy322);} + case 172: /* returning ::= RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy14);} break; - case 173: /* idlist_opt ::= */ -{yymsp[1].minor.yy254 = 0;} + case 175: /* idlist_opt ::= */ +{yymsp[1].minor.yy132 = 0;} break; - case 174: /* idlist_opt ::= LP idlist RP */ -{yymsp[-2].minor.yy254 = yymsp[-1].minor.yy254;} + case 176: /* idlist_opt ::= LP idlist RP */ +{yymsp[-2].minor.yy132 = yymsp[-1].minor.yy132;} break; - case 175: /* idlist ::= idlist COMMA nm */ -{yymsp[-2].minor.yy254 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy254,&yymsp[0].minor.yy0);} + case 177: /* idlist ::= idlist COMMA nm */ +{yymsp[-2].minor.yy132 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy132,&yymsp[0].minor.yy0);} break; - case 176: /* idlist ::= nm */ -{yymsp[0].minor.yy254 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} + case 178: /* idlist ::= nm */ +{yymsp[0].minor.yy132 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} break; - case 177: /* expr ::= LP expr RP */ -{yymsp[-2].minor.yy528 = yymsp[-1].minor.yy528;} + case 179: /* expr ::= LP expr RP */ +{yymsp[-2].minor.yy454 = yymsp[-1].minor.yy454;} break; - case 178: /* expr ::= ID|INDEXED|JOIN_KW */ -{yymsp[0].minor.yy528=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 180: /* expr ::= ID|INDEXED|JOIN_KW */ +{yymsp[0].minor.yy454=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 179: /* expr ::= nm DOT nm */ + case 181: /* expr ::= nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); - yylhsminor.yy528 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); + yylhsminor.yy454 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); } - yymsp[-2].minor.yy528 = yylhsminor.yy528; + yymsp[-2].minor.yy454 = yylhsminor.yy454; break; - case 180: /* expr ::= nm DOT nm DOT nm */ + case 182: /* expr ::= nm DOT nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-4].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); @@ -175478,27 +176861,27 @@ static YYACTIONTYPE yy_reduce( if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, 0, temp1); } - yylhsminor.yy528 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); + yylhsminor.yy454 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); } - yymsp[-4].minor.yy528 = yylhsminor.yy528; + yymsp[-4].minor.yy454 = yylhsminor.yy454; break; - case 181: /* term ::= NULL|FLOAT|BLOB */ - case 182: /* term ::= STRING */ yytestcase(yyruleno==182); -{yymsp[0].minor.yy528=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 183: /* term ::= NULL|FLOAT|BLOB */ + case 184: /* term ::= STRING */ yytestcase(yyruleno==184); +{yymsp[0].minor.yy454=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 183: /* term ::= INTEGER */ + case 185: /* term ::= INTEGER */ { - yylhsminor.yy528 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); - if( yylhsminor.yy528 ) yylhsminor.yy528->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); + yylhsminor.yy454 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); + if( yylhsminor.yy454 ) yylhsminor.yy454->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); } - yymsp[0].minor.yy528 = yylhsminor.yy528; + yymsp[0].minor.yy454 = yylhsminor.yy454; break; - case 184: /* expr ::= VARIABLE */ + case 186: /* expr ::= VARIABLE */ { if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ u32 n = yymsp[0].minor.yy0.n; - yymsp[0].minor.yy528 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); - sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy528, n); + yymsp[0].minor.yy454 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); + sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy454, n); }else{ /* When doing a nested parse, one can include terms in an expression ** that look like this: #1 #2 ... These terms refer to registers @@ -175507,194 +176890,203 @@ static YYACTIONTYPE yy_reduce( assert( t.n>=2 ); if( pParse->nested==0 ){ sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); - yymsp[0].minor.yy528 = 0; + yymsp[0].minor.yy454 = 0; }else{ - yymsp[0].minor.yy528 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); - if( yymsp[0].minor.yy528 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy528->iTable); + yymsp[0].minor.yy454 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); + if( yymsp[0].minor.yy454 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy454->iTable); } } } break; - case 185: /* expr ::= expr COLLATE ID|STRING */ + case 187: /* expr ::= expr COLLATE ID|STRING */ { - yymsp[-2].minor.yy528 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy528, &yymsp[0].minor.yy0, 1); + yymsp[-2].minor.yy454 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy454, &yymsp[0].minor.yy0, 1); } break; - case 186: /* expr ::= CAST LP expr AS typetoken RP */ + case 188: /* expr ::= CAST LP expr AS typetoken RP */ { - yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); - sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy528, yymsp[-3].minor.yy528, 0); + yymsp[-5].minor.yy454 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); + sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy454, yymsp[-3].minor.yy454, 0); } break; - case 187: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + case 189: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy394); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy144); } - yymsp[-4].minor.yy528 = yylhsminor.yy528; + yymsp[-4].minor.yy454 = yylhsminor.yy454; break; - case 188: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy322, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy394); - sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-1].minor.yy322); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy14, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy144); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy454, yymsp[-1].minor.yy14); } - yymsp[-7].minor.yy528 = yylhsminor.yy528; + yymsp[-7].minor.yy454 = yylhsminor.yy454; break; - case 189: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + case 191: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); } - yymsp[-3].minor.yy528 = yylhsminor.yy528; + yymsp[-3].minor.yy454 = yylhsminor.yy454; break; - case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + case 192: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy322, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy394); - sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy14, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy144); + sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); } - yymsp[-5].minor.yy528 = yylhsminor.yy528; + yymsp[-5].minor.yy454 = yylhsminor.yy454; break; - case 191: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + case 193: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy322, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy394); - sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); - sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-2].minor.yy322); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy14, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy144); + sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy454, yymsp[-2].minor.yy14); } - yymsp[-8].minor.yy528 = yylhsminor.yy528; + yymsp[-8].minor.yy454 = yylhsminor.yy454; break; - case 192: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + case 194: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); - sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); + sqlite3WindowAttach(pParse, yylhsminor.yy454, yymsp[0].minor.yy211); } - yymsp[-4].minor.yy528 = yylhsminor.yy528; + yymsp[-4].minor.yy454 = yylhsminor.yy454; break; - case 193: /* term ::= CTIME_KW */ + case 195: /* term ::= CTIME_KW */ { - yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); } - yymsp[0].minor.yy528 = yylhsminor.yy528; + yymsp[0].minor.yy454 = yylhsminor.yy454; break; - case 194: /* expr ::= LP nexprlist COMMA expr RP */ + case 196: /* expr ::= LP nexprlist COMMA expr RP */ { - ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528); - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); - if( yymsp[-4].minor.yy528 ){ - yymsp[-4].minor.yy528->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy14, yymsp[-1].minor.yy454); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); + if( yymsp[-4].minor.yy454 ){ + yymsp[-4].minor.yy454->x.pList = pList; if( ALWAYS(pList->nExpr) ){ - yymsp[-4].minor.yy528->flags |= pList->a[0].pExpr->flags & EP_Propagate; + yymsp[-4].minor.yy454->flags |= pList->a[0].pExpr->flags & EP_Propagate; } }else{ sqlite3ExprListDelete(pParse->db, pList); } } break; - case 195: /* expr ::= expr AND expr */ -{yymsp[-2].minor.yy528=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} + case 197: /* expr ::= expr AND expr */ +{yymsp[-2].minor.yy454=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} break; - case 196: /* expr ::= expr OR expr */ - case 197: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==197); - case 198: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==198); - case 199: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==199); - case 200: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==200); - case 201: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==201); - case 202: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==202); -{yymsp[-2].minor.yy528=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} + case 198: /* expr ::= expr OR expr */ + case 199: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==199); + case 200: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==200); + case 201: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==201); + case 202: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==202); + case 203: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==203); + case 204: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==204); +{yymsp[-2].minor.yy454=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy454,yymsp[0].minor.yy454);} break; - case 203: /* likeop ::= NOT LIKE_KW|MATCH */ + case 205: /* likeop ::= NOT LIKE_KW|MATCH */ {yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} break; - case 204: /* expr ::= expr likeop expr */ + case 206: /* expr ::= expr likeop expr */ { ExprList *pList; int bNot = yymsp[-1].minor.yy0.n & 0x80000000; yymsp[-1].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy528); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy528); - yymsp[-2].minor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); - if( bNot ) yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy528, 0); - if( yymsp[-2].minor.yy528 ) yymsp[-2].minor.yy528->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy454); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy454); + yymsp[-2].minor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + if( bNot ) yymsp[-2].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy454, 0); + if( yymsp[-2].minor.yy454 ) yymsp[-2].minor.yy454->flags |= EP_InfixFunc; } break; - case 205: /* expr ::= expr likeop expr ESCAPE expr */ + case 207: /* expr ::= expr likeop expr ESCAPE expr */ { ExprList *pList; int bNot = yymsp[-3].minor.yy0.n & 0x80000000; yymsp[-3].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy528); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528); - yymsp[-4].minor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); - if( bNot ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); - if( yymsp[-4].minor.yy528 ) yymsp[-4].minor.yy528->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy454); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy454); + yymsp[-4].minor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); + if( bNot ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); + if( yymsp[-4].minor.yy454 ) yymsp[-4].minor.yy454->flags |= EP_InfixFunc; } break; - case 206: /* expr ::= expr ISNULL|NOTNULL */ -{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy528,0);} + case 208: /* expr ::= expr ISNULL|NOTNULL */ +{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy454,0);} break; - case 207: /* expr ::= expr NOT NULL */ -{yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy528,0);} + case 209: /* expr ::= expr NOT NULL */ +{yymsp[-2].minor.yy454 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy454,0);} break; - case 208: /* expr ::= expr IS expr */ + case 210: /* expr ::= expr IS expr */ { - yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy528,yymsp[0].minor.yy528); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-2].minor.yy528, TK_ISNULL); + yymsp[-2].minor.yy454 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy454,yymsp[0].minor.yy454); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-2].minor.yy454, TK_ISNULL); } break; - case 209: /* expr ::= expr IS NOT expr */ + case 211: /* expr ::= expr IS NOT expr */ { - yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy528,yymsp[0].minor.yy528); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-3].minor.yy528, TK_NOTNULL); + yymsp[-3].minor.yy454 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy454,yymsp[0].minor.yy454); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-3].minor.yy454, TK_NOTNULL); } break; - case 210: /* expr ::= expr IS NOT DISTINCT FROM expr */ + case 212: /* expr ::= expr IS NOT DISTINCT FROM expr */ { - yymsp[-5].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy528,yymsp[0].minor.yy528); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-5].minor.yy528, TK_ISNULL); + yymsp[-5].minor.yy454 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy454,yymsp[0].minor.yy454); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-5].minor.yy454, TK_ISNULL); } break; - case 211: /* expr ::= expr IS DISTINCT FROM expr */ + case 213: /* expr ::= expr IS DISTINCT FROM expr */ { - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy528,yymsp[0].minor.yy528); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-4].minor.yy528, TK_NOTNULL); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy454,yymsp[0].minor.yy454); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy454, yymsp[-4].minor.yy454, TK_NOTNULL); } break; - case 212: /* expr ::= NOT expr */ - case 213: /* expr ::= BITNOT expr */ yytestcase(yyruleno==213); -{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy528, 0);/*A-overwrites-B*/} + case 214: /* expr ::= NOT expr */ + case 215: /* expr ::= BITNOT expr */ yytestcase(yyruleno==215); +{yymsp[-1].minor.yy454 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy454, 0);/*A-overwrites-B*/} break; - case 214: /* expr ::= PLUS|MINUS expr */ + case 216: /* expr ::= PLUS|MINUS expr */ { - yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy528, 0); - /*A-overwrites-B*/ + Expr *p = yymsp[0].minor.yy454; + u8 op = yymsp[-1].major + (TK_UPLUS-TK_PLUS); + assert( TK_UPLUS>TK_PLUS ); + assert( TK_UMINUS == TK_MINUS + (TK_UPLUS - TK_PLUS) ); + if( p && p->op==TK_UPLUS ){ + p->op = op; + yymsp[-1].minor.yy454 = p; + }else{ + yymsp[-1].minor.yy454 = sqlite3PExpr(pParse, op, p, 0); + /*A-overwrites-B*/ + } } break; - case 215: /* expr ::= expr PTR expr */ + case 217: /* expr ::= expr PTR expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy528); - pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy528); - yylhsminor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy454); + pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy454); + yylhsminor.yy454 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); } - yymsp[-2].minor.yy528 = yylhsminor.yy528; + yymsp[-2].minor.yy454 = yylhsminor.yy454; break; - case 216: /* between_op ::= BETWEEN */ - case 219: /* in_op ::= IN */ yytestcase(yyruleno==219); -{yymsp[0].minor.yy394 = 0;} + case 218: /* between_op ::= BETWEEN */ + case 221: /* in_op ::= IN */ yytestcase(yyruleno==221); +{yymsp[0].minor.yy144 = 0;} break; - case 218: /* expr ::= expr between_op expr AND expr */ + case 220: /* expr ::= expr between_op expr AND expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528); - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy528, 0); - if( yymsp[-4].minor.yy528 ){ - yymsp[-4].minor.yy528->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy454); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy454, 0); + if( yymsp[-4].minor.yy454 ){ + yymsp[-4].minor.yy454->x.pList = pList; }else{ sqlite3ExprListDelete(pParse->db, pList); } - if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); } break; - case 221: /* expr ::= expr in_op LP exprlist RP */ + case 223: /* expr ::= expr in_op LP exprlist RP */ { - if( yymsp[-1].minor.yy322==0 ){ + if( yymsp[-1].minor.yy14==0 ){ /* Expressions of the form ** ** expr1 IN () @@ -175703,208 +177095,208 @@ static YYACTIONTYPE yy_reduce( ** simplify to constants 0 (false) and 1 (true), respectively, ** regardless of the value of expr1. */ - sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy528); - yymsp[-4].minor.yy528 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy394 ? "true" : "false"); - if( yymsp[-4].minor.yy528 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy528); - }else{ - Expr *pRHS = yymsp[-1].minor.yy322->a[0].pExpr; - if( yymsp[-1].minor.yy322->nExpr==1 && sqlite3ExprIsConstant(pRHS) && yymsp[-4].minor.yy528->op!=TK_VECTOR ){ - yymsp[-1].minor.yy322->a[0].pExpr = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); + sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy454); + yymsp[-4].minor.yy454 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy144 ? "true" : "false"); + if( yymsp[-4].minor.yy454 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy454); + }else{ + Expr *pRHS = yymsp[-1].minor.yy14->a[0].pExpr; + if( yymsp[-1].minor.yy14->nExpr==1 && sqlite3ExprIsConstant(pParse,pRHS) && yymsp[-4].minor.yy454->op!=TK_VECTOR ){ + yymsp[-1].minor.yy14->a[0].pExpr = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0); - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy528, pRHS); - }else if( yymsp[-1].minor.yy322->nExpr==1 && pRHS->op==TK_SELECT ){ - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pRHS->x.pSelect); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy454, pRHS); + }else if( yymsp[-1].minor.yy14->nExpr==1 && pRHS->op==TK_SELECT ){ + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pRHS->x.pSelect); pRHS->x.pSelect = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); - }else{ - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); - if( yymsp[-4].minor.yy528==0 ){ - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); - }else if( yymsp[-4].minor.yy528->pLeft->op==TK_VECTOR ){ - int nExpr = yymsp[-4].minor.yy528->pLeft->x.pList->nExpr; - Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy322); + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); + }else{ + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); + if( yymsp[-4].minor.yy454==0 ){ + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); + }else if( yymsp[-4].minor.yy454->pLeft->op==TK_VECTOR ){ + int nExpr = yymsp[-4].minor.yy454->pLeft->x.pList->nExpr; + Select *pSelectRHS = sqlite3ExprListToValues(pParse, nExpr, yymsp[-1].minor.yy14); if( pSelectRHS ){ parserDoubleLinkSelect(pParse, pSelectRHS); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pSelectRHS); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pSelectRHS); } }else{ - yymsp[-4].minor.yy528->x.pList = yymsp[-1].minor.yy322; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy528); + yymsp[-4].minor.yy454->x.pList = yymsp[-1].minor.yy14; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy454); } } - if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); } } break; - case 222: /* expr ::= LP select RP */ + case 224: /* expr ::= LP select RP */ { - yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); - sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy528, yymsp[-1].minor.yy47); + yymsp[-2].minor.yy454 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy454, yymsp[-1].minor.yy555); } break; - case 223: /* expr ::= expr in_op LP select RP */ + case 225: /* expr ::= expr in_op LP select RP */ { - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, yymsp[-1].minor.yy47); - if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, yymsp[-1].minor.yy555); + if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); } break; - case 224: /* expr ::= expr in_op nm dbnm paren_exprlist */ + case 226: /* expr ::= expr in_op nm dbnm paren_exprlist */ { SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0); - if( yymsp[0].minor.yy322 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy322); - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pSelect); - if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); + if( yymsp[0].minor.yy14 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy14); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy454, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy454, pSelect); + if( yymsp[-3].minor.yy144 ) yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy454, 0); } break; - case 225: /* expr ::= EXISTS LP select RP */ + case 227: /* expr ::= EXISTS LP select RP */ { Expr *p; - p = yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); - sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy47); + p = yymsp[-3].minor.yy454 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); + sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy555); } break; - case 226: /* expr ::= CASE case_operand case_exprlist case_else END */ + case 228: /* expr ::= CASE case_operand case_exprlist case_else END */ { - yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy528, 0); - if( yymsp[-4].minor.yy528 ){ - yymsp[-4].minor.yy528->x.pList = yymsp[-1].minor.yy528 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[-1].minor.yy528) : yymsp[-2].minor.yy322; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy528); + yymsp[-4].minor.yy454 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy454, 0); + if( yymsp[-4].minor.yy454 ){ + yymsp[-4].minor.yy454->x.pList = yymsp[-1].minor.yy454 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[-1].minor.yy454) : yymsp[-2].minor.yy14; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy454); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy322); - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy528); + sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy14); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy454); } } break; - case 227: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ + case 229: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[-2].minor.yy528); - yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[0].minor.yy528); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[-2].minor.yy454); + yymsp[-4].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[0].minor.yy454); } break; - case 228: /* case_exprlist ::= WHEN expr THEN expr */ + case 230: /* case_exprlist ::= WHEN expr THEN expr */ { - yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); - yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy322, yymsp[0].minor.yy528); + yymsp[-3].minor.yy14 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy454); + yymsp[-3].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy14, yymsp[0].minor.yy454); } break; - case 233: /* nexprlist ::= nexprlist COMMA expr */ -{yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[0].minor.yy528);} + case 235: /* nexprlist ::= nexprlist COMMA expr */ +{yymsp[-2].minor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[0].minor.yy454);} break; - case 234: /* nexprlist ::= expr */ -{yymsp[0].minor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy528); /*A-overwrites-Y*/} + case 236: /* nexprlist ::= expr */ +{yymsp[0].minor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy454); /*A-overwrites-Y*/} break; - case 236: /* paren_exprlist ::= LP exprlist RP */ - case 241: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==241); -{yymsp[-2].minor.yy322 = yymsp[-1].minor.yy322;} + case 238: /* paren_exprlist ::= LP exprlist RP */ + case 243: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==243); +{yymsp[-2].minor.yy14 = yymsp[-1].minor.yy14;} break; - case 237: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + case 239: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ { sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, - sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy322, yymsp[-10].minor.yy394, - &yymsp[-11].minor.yy0, yymsp[0].minor.yy528, SQLITE_SO_ASC, yymsp[-8].minor.yy394, SQLITE_IDXTYPE_APPDEF); + sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy14, yymsp[-10].minor.yy144, + &yymsp[-11].minor.yy0, yymsp[0].minor.yy454, SQLITE_SO_ASC, yymsp[-8].minor.yy144, SQLITE_IDXTYPE_APPDEF); if( IN_RENAME_OBJECT && pParse->pNewIndex ){ sqlite3RenameTokenMap(pParse, pParse->pNewIndex->zName, &yymsp[-4].minor.yy0); } } break; - case 238: /* uniqueflag ::= UNIQUE */ - case 280: /* raisetype ::= ABORT */ yytestcase(yyruleno==280); -{yymsp[0].minor.yy394 = OE_Abort;} + case 240: /* uniqueflag ::= UNIQUE */ + case 282: /* raisetype ::= ABORT */ yytestcase(yyruleno==282); +{yymsp[0].minor.yy144 = OE_Abort;} break; - case 239: /* uniqueflag ::= */ -{yymsp[1].minor.yy394 = OE_None;} + case 241: /* uniqueflag ::= */ +{yymsp[1].minor.yy144 = OE_None;} break; - case 242: /* eidlist ::= eidlist COMMA nm collate sortorder */ + case 244: /* eidlist ::= eidlist COMMA nm collate sortorder */ { - yymsp[-4].minor.yy322 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy322, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); + yymsp[-4].minor.yy14 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy14, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy144, yymsp[0].minor.yy144); } break; - case 243: /* eidlist ::= nm collate sortorder */ + case 245: /* eidlist ::= nm collate sortorder */ { - yymsp[-2].minor.yy322 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); /*A-overwrites-Y*/ + yymsp[-2].minor.yy14 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy144, yymsp[0].minor.yy144); /*A-overwrites-Y*/ } break; - case 246: /* cmd ::= DROP INDEX ifexists fullname */ -{sqlite3DropIndex(pParse, yymsp[0].minor.yy131, yymsp[-1].minor.yy394);} + case 248: /* cmd ::= DROP INDEX ifexists fullname */ +{sqlite3DropIndex(pParse, yymsp[0].minor.yy203, yymsp[-1].minor.yy144);} break; - case 247: /* cmd ::= VACUUM vinto */ -{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy528);} + case 249: /* cmd ::= VACUUM vinto */ +{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy454);} break; - case 248: /* cmd ::= VACUUM nm vinto */ -{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy528);} + case 250: /* cmd ::= VACUUM nm vinto */ +{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy454);} break; - case 251: /* cmd ::= PRAGMA nm dbnm */ + case 253: /* cmd ::= PRAGMA nm dbnm */ {sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} break; - case 252: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ + case 254: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);} break; - case 253: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ + case 255: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);} break; - case 254: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ + case 256: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);} break; - case 255: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ + case 257: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);} break; - case 258: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + case 260: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ { Token all; all.z = yymsp[-3].minor.yy0.z; all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; - sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy33, &all); + sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy427, &all); } break; - case 259: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + case 261: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ { - sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy394, yymsp[-4].minor.yy180.a, yymsp[-4].minor.yy180.b, yymsp[-2].minor.yy131, yymsp[0].minor.yy528, yymsp[-10].minor.yy394, yymsp[-8].minor.yy394); + sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy144, yymsp[-4].minor.yy286.a, yymsp[-4].minor.yy286.b, yymsp[-2].minor.yy203, yymsp[0].minor.yy454, yymsp[-10].minor.yy144, yymsp[-8].minor.yy144); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ } break; - case 260: /* trigger_time ::= BEFORE|AFTER */ -{ yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/ } + case 262: /* trigger_time ::= BEFORE|AFTER */ +{ yymsp[0].minor.yy144 = yymsp[0].major; /*A-overwrites-X*/ } break; - case 261: /* trigger_time ::= INSTEAD OF */ -{ yymsp[-1].minor.yy394 = TK_INSTEAD;} + case 263: /* trigger_time ::= INSTEAD OF */ +{ yymsp[-1].minor.yy144 = TK_INSTEAD;} break; - case 262: /* trigger_time ::= */ -{ yymsp[1].minor.yy394 = TK_BEFORE; } + case 264: /* trigger_time ::= */ +{ yymsp[1].minor.yy144 = TK_BEFORE; } break; - case 263: /* trigger_event ::= DELETE|INSERT */ - case 264: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==264); -{yymsp[0].minor.yy180.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy180.b = 0;} + case 265: /* trigger_event ::= DELETE|INSERT */ + case 266: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==266); +{yymsp[0].minor.yy286.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy286.b = 0;} break; - case 265: /* trigger_event ::= UPDATE OF idlist */ -{yymsp[-2].minor.yy180.a = TK_UPDATE; yymsp[-2].minor.yy180.b = yymsp[0].minor.yy254;} + case 267: /* trigger_event ::= UPDATE OF idlist */ +{yymsp[-2].minor.yy286.a = TK_UPDATE; yymsp[-2].minor.yy286.b = yymsp[0].minor.yy132;} break; - case 266: /* when_clause ::= */ - case 285: /* key_opt ::= */ yytestcase(yyruleno==285); -{ yymsp[1].minor.yy528 = 0; } + case 268: /* when_clause ::= */ + case 287: /* key_opt ::= */ yytestcase(yyruleno==287); +{ yymsp[1].minor.yy454 = 0; } break; - case 267: /* when_clause ::= WHEN expr */ - case 286: /* key_opt ::= KEY expr */ yytestcase(yyruleno==286); -{ yymsp[-1].minor.yy528 = yymsp[0].minor.yy528; } + case 269: /* when_clause ::= WHEN expr */ + case 288: /* key_opt ::= KEY expr */ yytestcase(yyruleno==288); +{ yymsp[-1].minor.yy454 = yymsp[0].minor.yy454; } break; - case 268: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + case 270: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ { - assert( yymsp[-2].minor.yy33!=0 ); - yymsp[-2].minor.yy33->pLast->pNext = yymsp[-1].minor.yy33; - yymsp[-2].minor.yy33->pLast = yymsp[-1].minor.yy33; + assert( yymsp[-2].minor.yy427!=0 ); + yymsp[-2].minor.yy427->pLast->pNext = yymsp[-1].minor.yy427; + yymsp[-2].minor.yy427->pLast = yymsp[-1].minor.yy427; } break; - case 269: /* trigger_cmd_list ::= trigger_cmd SEMI */ + case 271: /* trigger_cmd_list ::= trigger_cmd SEMI */ { - assert( yymsp[-1].minor.yy33!=0 ); - yymsp[-1].minor.yy33->pLast = yymsp[-1].minor.yy33; + assert( yymsp[-1].minor.yy427!=0 ); + yymsp[-1].minor.yy427->pLast = yymsp[-1].minor.yy427; } break; - case 270: /* trnm ::= nm DOT nm */ + case 272: /* trnm ::= nm DOT nm */ { yymsp[-2].minor.yy0 = yymsp[0].minor.yy0; sqlite3ErrorMsg(pParse, @@ -175912,367 +177304,377 @@ static YYACTIONTYPE yy_reduce( "statements within triggers"); } break; - case 271: /* tridxby ::= INDEXED BY nm */ + case 273: /* tridxby ::= INDEXED BY nm */ { sqlite3ErrorMsg(pParse, "the INDEXED BY clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 272: /* tridxby ::= NOT INDEXED */ + case 274: /* tridxby ::= NOT INDEXED */ { sqlite3ErrorMsg(pParse, "the NOT INDEXED clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 273: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ -{yylhsminor.yy33 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy131, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528, yymsp[-7].minor.yy394, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy522);} - yymsp[-8].minor.yy33 = yylhsminor.yy33; + case 275: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ +{yylhsminor.yy427 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy203, yymsp[-3].minor.yy14, yymsp[-1].minor.yy454, yymsp[-7].minor.yy144, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy168);} + yymsp[-8].minor.yy427 = yylhsminor.yy427; break; - case 274: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + case 276: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ { - yylhsminor.yy33 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy254,yymsp[-2].minor.yy47,yymsp[-6].minor.yy394,yymsp[-1].minor.yy444,yymsp[-7].minor.yy522,yymsp[0].minor.yy522);/*yylhsminor.yy33-overwrites-yymsp[-6].minor.yy394*/ + yylhsminor.yy427 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy132,yymsp[-2].minor.yy555,yymsp[-6].minor.yy144,yymsp[-1].minor.yy122,yymsp[-7].minor.yy168,yymsp[0].minor.yy168);/*yylhsminor.yy427-overwrites-yymsp[-6].minor.yy144*/ } - yymsp[-7].minor.yy33 = yylhsminor.yy33; + yymsp[-7].minor.yy427 = yylhsminor.yy427; break; - case 275: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ -{yylhsminor.yy33 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy528, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy522);} - yymsp[-5].minor.yy33 = yylhsminor.yy33; + case 277: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ +{yylhsminor.yy427 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy454, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy168);} + yymsp[-5].minor.yy427 = yylhsminor.yy427; break; - case 276: /* trigger_cmd ::= scanpt select scanpt */ -{yylhsminor.yy33 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy47, yymsp[-2].minor.yy522, yymsp[0].minor.yy522); /*yylhsminor.yy33-overwrites-yymsp[-1].minor.yy47*/} - yymsp[-2].minor.yy33 = yylhsminor.yy33; + case 278: /* trigger_cmd ::= scanpt select scanpt */ +{yylhsminor.yy427 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy555, yymsp[-2].minor.yy168, yymsp[0].minor.yy168); /*yylhsminor.yy427-overwrites-yymsp[-1].minor.yy555*/} + yymsp[-2].minor.yy427 = yylhsminor.yy427; break; - case 277: /* expr ::= RAISE LP IGNORE RP */ + case 279: /* expr ::= RAISE LP IGNORE RP */ { - yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); - if( yymsp[-3].minor.yy528 ){ - yymsp[-3].minor.yy528->affExpr = OE_Ignore; + yymsp[-3].minor.yy454 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); + if( yymsp[-3].minor.yy454 ){ + yymsp[-3].minor.yy454->affExpr = OE_Ignore; } } break; - case 278: /* expr ::= RAISE LP raisetype COMMA nm RP */ + case 280: /* expr ::= RAISE LP raisetype COMMA nm RP */ { - yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); - if( yymsp[-5].minor.yy528 ) { - yymsp[-5].minor.yy528->affExpr = (char)yymsp[-3].minor.yy394; + yymsp[-5].minor.yy454 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); + if( yymsp[-5].minor.yy454 ) { + yymsp[-5].minor.yy454->affExpr = (char)yymsp[-3].minor.yy144; } } break; - case 279: /* raisetype ::= ROLLBACK */ -{yymsp[0].minor.yy394 = OE_Rollback;} + case 281: /* raisetype ::= ROLLBACK */ +{yymsp[0].minor.yy144 = OE_Rollback;} break; - case 281: /* raisetype ::= FAIL */ -{yymsp[0].minor.yy394 = OE_Fail;} + case 283: /* raisetype ::= FAIL */ +{yymsp[0].minor.yy144 = OE_Fail;} break; - case 282: /* cmd ::= DROP TRIGGER ifexists fullname */ + case 284: /* cmd ::= DROP TRIGGER ifexists fullname */ { - sqlite3DropTrigger(pParse,yymsp[0].minor.yy131,yymsp[-1].minor.yy394); + sqlite3DropTrigger(pParse,yymsp[0].minor.yy203,yymsp[-1].minor.yy144); } break; - case 283: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + case 285: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ { - sqlite3Attach(pParse, yymsp[-3].minor.yy528, yymsp[-1].minor.yy528, yymsp[0].minor.yy528); + sqlite3Attach(pParse, yymsp[-3].minor.yy454, yymsp[-1].minor.yy454, yymsp[0].minor.yy454); } break; - case 284: /* cmd ::= DETACH database_kw_opt expr */ + case 286: /* cmd ::= DETACH database_kw_opt expr */ { - sqlite3Detach(pParse, yymsp[0].minor.yy528); + sqlite3Detach(pParse, yymsp[0].minor.yy454); } break; - case 287: /* cmd ::= REINDEX */ + case 289: /* cmd ::= REINDEX */ {sqlite3Reindex(pParse, 0, 0);} break; - case 288: /* cmd ::= REINDEX nm dbnm */ + case 290: /* cmd ::= REINDEX nm dbnm */ {sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 289: /* cmd ::= ANALYZE */ + case 291: /* cmd ::= ANALYZE */ {sqlite3Analyze(pParse, 0, 0);} break; - case 290: /* cmd ::= ANALYZE nm dbnm */ + case 292: /* cmd ::= ANALYZE nm dbnm */ {sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 291: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ + case 293: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ { - sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy131,&yymsp[0].minor.yy0); + sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy203,&yymsp[0].minor.yy0); } break; - case 292: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + case 294: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ { yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n; sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0); } break; - case 293: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + case 295: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ { - sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy131, &yymsp[0].minor.yy0); + sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy203, &yymsp[0].minor.yy0); } break; - case 294: /* add_column_fullname ::= fullname */ + case 296: /* add_column_fullname ::= fullname */ { disableLookaside(pParse); - sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy131); + sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy203); } break; - case 295: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + case 297: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ { - sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy131, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy203, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 296: /* cmd ::= create_vtab */ + case 298: /* cmd ::= create_vtab */ {sqlite3VtabFinishParse(pParse,0);} break; - case 297: /* cmd ::= create_vtab LP vtabarglist RP */ + case 299: /* cmd ::= create_vtab LP vtabarglist RP */ {sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);} break; - case 298: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + case 300: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ { - sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy394); + sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy144); } break; - case 299: /* vtabarg ::= */ + case 301: /* vtabarg ::= */ {sqlite3VtabArgInit(pParse);} break; - case 300: /* vtabargtoken ::= ANY */ - case 301: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==301); - case 302: /* lp ::= LP */ yytestcase(yyruleno==302); + case 302: /* vtabargtoken ::= ANY */ + case 303: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==303); + case 304: /* lp ::= LP */ yytestcase(yyruleno==304); {sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);} break; - case 303: /* with ::= WITH wqlist */ - case 304: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==304); -{ sqlite3WithPush(pParse, yymsp[0].minor.yy521, 1); } + case 305: /* with ::= WITH wqlist */ + case 306: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==306); +{ sqlite3WithPush(pParse, yymsp[0].minor.yy59, 1); } break; - case 305: /* wqas ::= AS */ -{yymsp[0].minor.yy516 = M10d_Any;} + case 307: /* wqas ::= AS */ +{yymsp[0].minor.yy462 = M10d_Any;} break; - case 306: /* wqas ::= AS MATERIALIZED */ -{yymsp[-1].minor.yy516 = M10d_Yes;} + case 308: /* wqas ::= AS MATERIALIZED */ +{yymsp[-1].minor.yy462 = M10d_Yes;} break; - case 307: /* wqas ::= AS NOT MATERIALIZED */ -{yymsp[-2].minor.yy516 = M10d_No;} + case 309: /* wqas ::= AS NOT MATERIALIZED */ +{yymsp[-2].minor.yy462 = M10d_No;} break; - case 308: /* wqitem ::= nm eidlist_opt wqas LP select RP */ + case 310: /* wqitem ::= withnm eidlist_opt wqas LP select RP */ { - yymsp[-5].minor.yy385 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy322, yymsp[-1].minor.yy47, yymsp[-3].minor.yy516); /*A-overwrites-X*/ + yymsp[-5].minor.yy67 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy14, yymsp[-1].minor.yy555, yymsp[-3].minor.yy462); /*A-overwrites-X*/ } break; - case 309: /* wqlist ::= wqitem */ + case 311: /* withnm ::= nm */ +{pParse->bHasWith = 1;} + break; + case 312: /* wqlist ::= wqitem */ { - yymsp[0].minor.yy521 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy385); /*A-overwrites-X*/ + yymsp[0].minor.yy59 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy67); /*A-overwrites-X*/ } break; - case 310: /* wqlist ::= wqlist COMMA wqitem */ + case 313: /* wqlist ::= wqlist COMMA wqitem */ { - yymsp[-2].minor.yy521 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy521, yymsp[0].minor.yy385); + yymsp[-2].minor.yy59 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy59, yymsp[0].minor.yy67); } break; - case 311: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ + case 314: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ { - assert( yymsp[0].minor.yy41!=0 ); - sqlite3WindowChain(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy41); - yymsp[0].minor.yy41->pNextWin = yymsp[-2].minor.yy41; - yylhsminor.yy41 = yymsp[0].minor.yy41; + assert( yymsp[0].minor.yy211!=0 ); + sqlite3WindowChain(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy211); + yymsp[0].minor.yy211->pNextWin = yymsp[-2].minor.yy211; + yylhsminor.yy211 = yymsp[0].minor.yy211; } - yymsp[-2].minor.yy41 = yylhsminor.yy41; + yymsp[-2].minor.yy211 = yylhsminor.yy211; break; - case 312: /* windowdefn ::= nm AS LP window RP */ + case 315: /* windowdefn ::= nm AS LP window RP */ { - if( ALWAYS(yymsp[-1].minor.yy41) ){ - yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); + if( ALWAYS(yymsp[-1].minor.yy211) ){ + yymsp[-1].minor.yy211->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); } - yylhsminor.yy41 = yymsp[-1].minor.yy41; + yylhsminor.yy211 = yymsp[-1].minor.yy211; } - yymsp[-4].minor.yy41 = yylhsminor.yy41; + yymsp[-4].minor.yy211 = yylhsminor.yy211; break; - case 313: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + case 316: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ { - yymsp[-4].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, 0); + yymsp[-4].minor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy14, yymsp[-1].minor.yy14, 0); } break; - case 314: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + case 317: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ { - yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, &yymsp[-5].minor.yy0); + yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, yymsp[-2].minor.yy14, yymsp[-1].minor.yy14, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy41 = yylhsminor.yy41; + yymsp[-5].minor.yy211 = yylhsminor.yy211; break; - case 315: /* window ::= ORDER BY sortlist frame_opt */ + case 318: /* window ::= ORDER BY sortlist frame_opt */ { - yymsp[-3].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, 0); + yymsp[-3].minor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, yymsp[-1].minor.yy14, 0); } break; - case 316: /* window ::= nm ORDER BY sortlist frame_opt */ + case 319: /* window ::= nm ORDER BY sortlist frame_opt */ { - yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0); + yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy41 = yylhsminor.yy41; + yymsp[-4].minor.yy211 = yylhsminor.yy211; break; - case 317: /* window ::= nm frame_opt */ + case 320: /* window ::= nm frame_opt */ { - yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, 0, &yymsp[-1].minor.yy0); + yylhsminor.yy211 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy211, 0, 0, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy41 = yylhsminor.yy41; + yymsp[-1].minor.yy211 = yylhsminor.yy211; break; - case 318: /* frame_opt ::= */ + case 321: /* frame_opt ::= */ { - yymsp[1].minor.yy41 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); + yymsp[1].minor.yy211 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); } break; - case 319: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + case 322: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ { - yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy394, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy516); + yylhsminor.yy211 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy144, yymsp[-1].minor.yy509.eType, yymsp[-1].minor.yy509.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy462); } - yymsp[-2].minor.yy41 = yylhsminor.yy41; + yymsp[-2].minor.yy211 = yylhsminor.yy211; break; - case 320: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + case 323: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ { - yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy394, yymsp[-3].minor.yy595.eType, yymsp[-3].minor.yy595.pExpr, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, yymsp[0].minor.yy516); + yylhsminor.yy211 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy144, yymsp[-3].minor.yy509.eType, yymsp[-3].minor.yy509.pExpr, yymsp[-1].minor.yy509.eType, yymsp[-1].minor.yy509.pExpr, yymsp[0].minor.yy462); } - yymsp[-5].minor.yy41 = yylhsminor.yy41; + yymsp[-5].minor.yy211 = yylhsminor.yy211; break; - case 322: /* frame_bound_s ::= frame_bound */ - case 324: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==324); -{yylhsminor.yy595 = yymsp[0].minor.yy595;} - yymsp[0].minor.yy595 = yylhsminor.yy595; + case 325: /* frame_bound_s ::= frame_bound */ + case 327: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==327); +{yylhsminor.yy509 = yymsp[0].minor.yy509;} + yymsp[0].minor.yy509 = yylhsminor.yy509; break; - case 323: /* frame_bound_s ::= UNBOUNDED PRECEDING */ - case 325: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==325); - case 327: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==327); -{yylhsminor.yy595.eType = yymsp[-1].major; yylhsminor.yy595.pExpr = 0;} - yymsp[-1].minor.yy595 = yylhsminor.yy595; + case 326: /* frame_bound_s ::= UNBOUNDED PRECEDING */ + case 328: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==328); + case 330: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==330); +{yylhsminor.yy509.eType = yymsp[-1].major; yylhsminor.yy509.pExpr = 0;} + yymsp[-1].minor.yy509 = yylhsminor.yy509; break; - case 326: /* frame_bound ::= expr PRECEDING|FOLLOWING */ -{yylhsminor.yy595.eType = yymsp[0].major; yylhsminor.yy595.pExpr = yymsp[-1].minor.yy528;} - yymsp[-1].minor.yy595 = yylhsminor.yy595; + case 329: /* frame_bound ::= expr PRECEDING|FOLLOWING */ +{yylhsminor.yy509.eType = yymsp[0].major; yylhsminor.yy509.pExpr = yymsp[-1].minor.yy454;} + yymsp[-1].minor.yy509 = yylhsminor.yy509; break; - case 328: /* frame_exclude_opt ::= */ -{yymsp[1].minor.yy516 = 0;} + case 331: /* frame_exclude_opt ::= */ +{yymsp[1].minor.yy462 = 0;} break; - case 329: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ -{yymsp[-1].minor.yy516 = yymsp[0].minor.yy516;} + case 332: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ +{yymsp[-1].minor.yy462 = yymsp[0].minor.yy462;} break; - case 330: /* frame_exclude ::= NO OTHERS */ - case 331: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==331); -{yymsp[-1].minor.yy516 = yymsp[-1].major; /*A-overwrites-X*/} + case 333: /* frame_exclude ::= NO OTHERS */ + case 334: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==334); +{yymsp[-1].minor.yy462 = yymsp[-1].major; /*A-overwrites-X*/} break; - case 332: /* frame_exclude ::= GROUP|TIES */ -{yymsp[0].minor.yy516 = yymsp[0].major; /*A-overwrites-X*/} + case 335: /* frame_exclude ::= GROUP|TIES */ +{yymsp[0].minor.yy462 = yymsp[0].major; /*A-overwrites-X*/} break; - case 333: /* window_clause ::= WINDOW windowdefn_list */ -{ yymsp[-1].minor.yy41 = yymsp[0].minor.yy41; } + case 336: /* window_clause ::= WINDOW windowdefn_list */ +{ yymsp[-1].minor.yy211 = yymsp[0].minor.yy211; } break; - case 334: /* filter_over ::= filter_clause over_clause */ + case 337: /* filter_over ::= filter_clause over_clause */ { - if( yymsp[0].minor.yy41 ){ - yymsp[0].minor.yy41->pFilter = yymsp[-1].minor.yy528; + if( yymsp[0].minor.yy211 ){ + yymsp[0].minor.yy211->pFilter = yymsp[-1].minor.yy454; }else{ - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy528); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy454); } - yylhsminor.yy41 = yymsp[0].minor.yy41; + yylhsminor.yy211 = yymsp[0].minor.yy211; } - yymsp[-1].minor.yy41 = yylhsminor.yy41; + yymsp[-1].minor.yy211 = yylhsminor.yy211; break; - case 335: /* filter_over ::= over_clause */ + case 338: /* filter_over ::= over_clause */ { - yylhsminor.yy41 = yymsp[0].minor.yy41; + yylhsminor.yy211 = yymsp[0].minor.yy211; } - yymsp[0].minor.yy41 = yylhsminor.yy41; + yymsp[0].minor.yy211 = yylhsminor.yy211; break; - case 336: /* filter_over ::= filter_clause */ + case 339: /* filter_over ::= filter_clause */ { - yylhsminor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yylhsminor.yy41 ){ - yylhsminor.yy41->eFrmType = TK_FILTER; - yylhsminor.yy41->pFilter = yymsp[0].minor.yy528; + yylhsminor.yy211 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yylhsminor.yy211 ){ + yylhsminor.yy211->eFrmType = TK_FILTER; + yylhsminor.yy211->pFilter = yymsp[0].minor.yy454; }else{ - sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy528); + sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy454); } } - yymsp[0].minor.yy41 = yylhsminor.yy41; + yymsp[0].minor.yy211 = yylhsminor.yy211; break; - case 337: /* over_clause ::= OVER LP window RP */ + case 340: /* over_clause ::= OVER LP window RP */ { - yymsp[-3].minor.yy41 = yymsp[-1].minor.yy41; - assert( yymsp[-3].minor.yy41!=0 ); + yymsp[-3].minor.yy211 = yymsp[-1].minor.yy211; + assert( yymsp[-3].minor.yy211!=0 ); } break; - case 338: /* over_clause ::= OVER nm */ + case 341: /* over_clause ::= OVER nm */ { - yymsp[-1].minor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yymsp[-1].minor.yy41 ){ - yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); + yymsp[-1].minor.yy211 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yymsp[-1].minor.yy211 ){ + yymsp[-1].minor.yy211->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); } } break; - case 339: /* filter_clause ::= FILTER LP WHERE expr RP */ -{ yymsp[-4].minor.yy528 = yymsp[-1].minor.yy528; } + case 342: /* filter_clause ::= FILTER LP WHERE expr RP */ +{ yymsp[-4].minor.yy454 = yymsp[-1].minor.yy454; } + break; + case 343: /* term ::= QNUMBER */ +{ + yylhsminor.yy454=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); + sqlite3DequoteNumber(pParse, yylhsminor.yy454); +} + yymsp[0].minor.yy454 = yylhsminor.yy454; break; default: - /* (340) input ::= cmdlist */ yytestcase(yyruleno==340); - /* (341) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==341); - /* (342) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=342); - /* (343) ecmd ::= SEMI */ yytestcase(yyruleno==343); - /* (344) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==344); - /* (345) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=345); - /* (346) trans_opt ::= */ yytestcase(yyruleno==346); - /* (347) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==347); - /* (348) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==348); - /* (349) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==349); - /* (350) savepoint_opt ::= */ yytestcase(yyruleno==350); - /* (351) cmd ::= create_table create_table_args */ yytestcase(yyruleno==351); - /* (352) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=352); - /* (353) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==353); - /* (354) columnlist ::= columnname carglist */ yytestcase(yyruleno==354); - /* (355) nm ::= ID|INDEXED|JOIN_KW */ yytestcase(yyruleno==355); - /* (356) nm ::= STRING */ yytestcase(yyruleno==356); - /* (357) typetoken ::= typename */ yytestcase(yyruleno==357); - /* (358) typename ::= ID|STRING */ yytestcase(yyruleno==358); - /* (359) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=359); - /* (360) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=360); - /* (361) carglist ::= carglist ccons */ yytestcase(yyruleno==361); - /* (362) carglist ::= */ yytestcase(yyruleno==362); - /* (363) ccons ::= NULL onconf */ yytestcase(yyruleno==363); - /* (364) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==364); - /* (365) ccons ::= AS generated */ yytestcase(yyruleno==365); - /* (366) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==366); - /* (367) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==367); - /* (368) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=368); - /* (369) tconscomma ::= */ yytestcase(yyruleno==369); - /* (370) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=370); - /* (371) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=371); - /* (372) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=372); - /* (373) oneselect ::= values */ yytestcase(yyruleno==373); - /* (374) sclp ::= selcollist COMMA */ yytestcase(yyruleno==374); - /* (375) as ::= ID|STRING */ yytestcase(yyruleno==375); - /* (376) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=376); - /* (377) returning ::= */ yytestcase(yyruleno==377); - /* (378) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=378); - /* (379) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==379); - /* (380) case_operand ::= expr */ yytestcase(yyruleno==380); - /* (381) exprlist ::= nexprlist */ yytestcase(yyruleno==381); - /* (382) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=382); - /* (383) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=383); - /* (384) nmnum ::= ON */ yytestcase(yyruleno==384); - /* (385) nmnum ::= DELETE */ yytestcase(yyruleno==385); - /* (386) nmnum ::= DEFAULT */ yytestcase(yyruleno==386); - /* (387) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==387); - /* (388) foreach_clause ::= */ yytestcase(yyruleno==388); - /* (389) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==389); - /* (390) trnm ::= nm */ yytestcase(yyruleno==390); - /* (391) tridxby ::= */ yytestcase(yyruleno==391); - /* (392) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==392); - /* (393) database_kw_opt ::= */ yytestcase(yyruleno==393); - /* (394) kwcolumn_opt ::= */ yytestcase(yyruleno==394); - /* (395) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==395); - /* (396) vtabarglist ::= vtabarg */ yytestcase(yyruleno==396); - /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==397); - /* (398) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==398); - /* (399) anylist ::= */ yytestcase(yyruleno==399); - /* (400) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==400); - /* (401) anylist ::= anylist ANY */ yytestcase(yyruleno==401); - /* (402) with ::= */ yytestcase(yyruleno==402); - /* (403) windowdefn_list ::= windowdefn (OPTIMIZED OUT) */ assert(yyruleno!=403); - /* (404) window ::= frame_opt (OPTIMIZED OUT) */ assert(yyruleno!=404); + /* (344) input ::= cmdlist */ yytestcase(yyruleno==344); + /* (345) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==345); + /* (346) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=346); + /* (347) ecmd ::= SEMI */ yytestcase(yyruleno==347); + /* (348) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==348); + /* (349) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=349); + /* (350) trans_opt ::= */ yytestcase(yyruleno==350); + /* (351) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==351); + /* (352) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==352); + /* (353) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==353); + /* (354) savepoint_opt ::= */ yytestcase(yyruleno==354); + /* (355) cmd ::= create_table create_table_args */ yytestcase(yyruleno==355); + /* (356) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=356); + /* (357) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==357); + /* (358) columnlist ::= columnname carglist */ yytestcase(yyruleno==358); + /* (359) nm ::= ID|INDEXED|JOIN_KW */ yytestcase(yyruleno==359); + /* (360) nm ::= STRING */ yytestcase(yyruleno==360); + /* (361) typetoken ::= typename */ yytestcase(yyruleno==361); + /* (362) typename ::= ID|STRING */ yytestcase(yyruleno==362); + /* (363) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=363); + /* (364) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=364); + /* (365) carglist ::= carglist ccons */ yytestcase(yyruleno==365); + /* (366) carglist ::= */ yytestcase(yyruleno==366); + /* (367) ccons ::= NULL onconf */ yytestcase(yyruleno==367); + /* (368) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==368); + /* (369) ccons ::= AS generated */ yytestcase(yyruleno==369); + /* (370) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==370); + /* (371) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==371); + /* (372) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=372); + /* (373) tconscomma ::= */ yytestcase(yyruleno==373); + /* (374) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=374); + /* (375) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=375); + /* (376) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=376); + /* (377) oneselect ::= values */ yytestcase(yyruleno==377); + /* (378) sclp ::= selcollist COMMA */ yytestcase(yyruleno==378); + /* (379) as ::= ID|STRING */ yytestcase(yyruleno==379); + /* (380) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=380); + /* (381) returning ::= */ yytestcase(yyruleno==381); + /* (382) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=382); + /* (383) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==383); + /* (384) case_operand ::= expr */ yytestcase(yyruleno==384); + /* (385) exprlist ::= nexprlist */ yytestcase(yyruleno==385); + /* (386) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=386); + /* (387) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=387); + /* (388) nmnum ::= ON */ yytestcase(yyruleno==388); + /* (389) nmnum ::= DELETE */ yytestcase(yyruleno==389); + /* (390) nmnum ::= DEFAULT */ yytestcase(yyruleno==390); + /* (391) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==391); + /* (392) foreach_clause ::= */ yytestcase(yyruleno==392); + /* (393) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==393); + /* (394) trnm ::= nm */ yytestcase(yyruleno==394); + /* (395) tridxby ::= */ yytestcase(yyruleno==395); + /* (396) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==396); + /* (397) database_kw_opt ::= */ yytestcase(yyruleno==397); + /* (398) kwcolumn_opt ::= */ yytestcase(yyruleno==398); + /* (399) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==399); + /* (400) vtabarglist ::= vtabarg */ yytestcase(yyruleno==400); + /* (401) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==401); + /* (402) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==402); + /* (403) anylist ::= */ yytestcase(yyruleno==403); + /* (404) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==404); + /* (405) anylist ::= anylist ANY */ yytestcase(yyruleno==405); + /* (406) with ::= */ yytestcase(yyruleno==406); + /* (407) windowdefn_list ::= windowdefn (OPTIMIZED OUT) */ assert(yyruleno!=407); + /* (408) window ::= frame_opt (OPTIMIZED OUT) */ assert(yyruleno!=408); break; /********** End reduce actions ************************************************/ }; @@ -176459,19 +177861,12 @@ SQLITE_PRIVATE void sqlite3Parser( (int)(yypParser->yytos - yypParser->yystack)); } #endif -#if YYSTACKDEPTH>0 if( yypParser->yytos>=yypParser->yystackEnd ){ - yyStackOverflow(yypParser); - break; - } -#else - if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ if( yyGrowStack(yypParser) ){ yyStackOverflow(yypParser); break; } } -#endif } yyact = yy_reduce(yypParser,yyruleno,yymajor,yyminor sqlite3ParserCTX_PARAM); }else if( yyact <= YY_MAX_SHIFTREDUCE ){ @@ -177542,27 +178937,58 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ *tokenType = TK_INTEGER; #ifndef SQLITE_OMIT_HEX_INTEGER if( z[0]=='0' && (z[1]=='x' || z[1]=='X') && sqlite3Isxdigit(z[2]) ){ - for(i=3; sqlite3Isxdigit(z[i]); i++){} - return i; - } + for(i=3; 1; i++){ + if( sqlite3Isxdigit(z[i])==0 ){ + if( z[i]==SQLITE_DIGIT_SEPARATOR ){ + *tokenType = TK_QNUMBER; + }else{ + break; + } + } + } + }else #endif - for(i=0; sqlite3Isdigit(z[i]); i++){} + { + for(i=0; 1; i++){ + if( sqlite3Isdigit(z[i])==0 ){ + if( z[i]==SQLITE_DIGIT_SEPARATOR ){ + *tokenType = TK_QNUMBER; + }else{ + break; + } + } + } #ifndef SQLITE_OMIT_FLOATING_POINT - if( z[i]=='.' ){ - i++; - while( sqlite3Isdigit(z[i]) ){ i++; } - *tokenType = TK_FLOAT; - } - if( (z[i]=='e' || z[i]=='E') && - ( sqlite3Isdigit(z[i+1]) - || ((z[i+1]=='+' || z[i+1]=='-') && sqlite3Isdigit(z[i+2])) - ) - ){ - i += 2; - while( sqlite3Isdigit(z[i]) ){ i++; } - *tokenType = TK_FLOAT; - } + if( z[i]=='.' ){ + if( *tokenType==TK_INTEGER ) *tokenType = TK_FLOAT; + for(i++; 1; i++){ + if( sqlite3Isdigit(z[i])==0 ){ + if( z[i]==SQLITE_DIGIT_SEPARATOR ){ + *tokenType = TK_QNUMBER; + }else{ + break; + } + } + } + } + if( (z[i]=='e' || z[i]=='E') && + ( sqlite3Isdigit(z[i+1]) + || ((z[i+1]=='+' || z[i+1]=='-') && sqlite3Isdigit(z[i+2])) + ) + ){ + if( *tokenType==TK_INTEGER ) *tokenType = TK_FLOAT; + for(i+=2; 1; i++){ + if( sqlite3Isdigit(z[i])==0 ){ + if( z[i]==SQLITE_DIGIT_SEPARATOR ){ + *tokenType = TK_QNUMBER; + }else{ + break; + } + } + } + } #endif + } while( IdChar(z[i]) ){ *tokenType = TK_ILLEGAL; i++; @@ -177727,10 +179153,13 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ if( tokenType>=TK_WINDOW ){ assert( tokenType==TK_SPACE || tokenType==TK_OVER || tokenType==TK_FILTER || tokenType==TK_ILLEGAL || tokenType==TK_WINDOW + || tokenType==TK_QNUMBER ); #else if( tokenType>=TK_SPACE ){ - assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL ); + assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL + || tokenType==TK_QNUMBER + ); #endif /* SQLITE_OMIT_WINDOWFUNC */ if( AtomicLoad(&db->u1.isInterrupted) ){ pParse->rc = SQLITE_INTERRUPT; @@ -177763,7 +179192,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ assert( n==6 ); tokenType = analyzeFilterKeyword((const u8*)&zSql[6], lastTokenParsed); #endif /* SQLITE_OMIT_WINDOWFUNC */ - }else{ + }else if( tokenType!=TK_QNUMBER ){ Token x; x.z = zSql; x.n = n; @@ -188758,22 +190187,24 @@ static int fts3IntegrityMethod( char **pzErr /* Write error message here */ ){ Fts3Table *p = (Fts3Table*)pVtab; - int rc; + int rc = SQLITE_OK; int bOk = 0; UNUSED_PARAMETER(isQuick); rc = sqlite3Fts3IntegrityCheck(p, &bOk); - assert( rc!=SQLITE_CORRUPT_VTAB || bOk==0 ); - if( rc!=SQLITE_OK && rc!=SQLITE_CORRUPT_VTAB ){ + assert( rc!=SQLITE_CORRUPT_VTAB ); + if( rc==SQLITE_ERROR || (rc&0xFF)==SQLITE_CORRUPT ){ *pzErr = sqlite3_mprintf("unable to validate the inverted index for" " FTS%d table %s.%s: %s", p->bFts4 ? 4 : 3, zSchema, zTabname, sqlite3_errstr(rc)); - }else if( bOk==0 ){ + if( *pzErr ) rc = SQLITE_OK; + }else if( rc==SQLITE_OK && bOk==0 ){ *pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s", p->bFts4 ? 4 : 3, zSchema, zTabname); + if( *pzErr==0 ) rc = SQLITE_NOMEM; } sqlite3Fts3SegmentsClose(p); - return SQLITE_OK; + return rc; } @@ -200435,7 +201866,12 @@ SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk){ sqlite3_finalize(pStmt); } - *pbOk = (rc==SQLITE_OK && cksum1==cksum2); + if( rc==SQLITE_CORRUPT_VTAB ){ + rc = SQLITE_OK; + *pbOk = 0; + }else{ + *pbOk = (rc==SQLITE_OK && cksum1==cksum2); + } return rc; } @@ -201341,7 +202777,7 @@ static void fts3SnippetDetails( } mCover |= mPhrase; - for(j=0; jnToken; j++){ + for(j=0; jnToken && jnSnippet; j++){ mHighlight |= (mPos>>j); } @@ -204002,7 +205438,6 @@ static void jsonAppendRawNZ(JsonString *p, const char *zIn, u32 N){ } } - /* Append formatted text (not to exceed N bytes) to the JsonString. */ static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){ @@ -204060,6 +205495,40 @@ static void jsonAppendSeparator(JsonString *p){ jsonAppendChar(p, ','); } +/* c is a control character. Append the canonical JSON representation +** of that control character to p. +** +** This routine assumes that the output buffer has already been enlarged +** sufficiently to hold the worst-case encoding plus a nul terminator. +*/ +static void jsonAppendControlChar(JsonString *p, u8 c){ + static const char aSpecial[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + assert( sizeof(aSpecial)==32 ); + assert( aSpecial['\b']=='b' ); + assert( aSpecial['\f']=='f' ); + assert( aSpecial['\n']=='n' ); + assert( aSpecial['\r']=='r' ); + assert( aSpecial['\t']=='t' ); + assert( c>=0 && cnUsed+7 <= p->nAlloc ); + if( aSpecial[c] ){ + p->zBuf[p->nUsed] = '\\'; + p->zBuf[p->nUsed+1] = aSpecial[c]; + p->nUsed += 2; + }else{ + p->zBuf[p->nUsed] = '\\'; + p->zBuf[p->nUsed+1] = 'u'; + p->zBuf[p->nUsed+2] = '0'; + p->zBuf[p->nUsed+3] = '0'; + p->zBuf[p->nUsed+4] = "0123456789abcdef"[c>>4]; + p->zBuf[p->nUsed+5] = "0123456789abcdef"[c&0xf]; + p->nUsed += 6; + } +} + /* Append the N-byte string in zIn to the end of the JsonString string ** under construction. Enclose the string in double-quotes ("...") and ** escape any double-quotes or backslash characters contained within the @@ -204119,35 +205588,14 @@ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ } c = z[0]; if( c=='"' || c=='\\' ){ - json_simple_escape: if( (p->nUsed+N+3 > p->nAlloc) && jsonStringGrow(p,N+3)!=0 ) return; p->zBuf[p->nUsed++] = '\\'; p->zBuf[p->nUsed++] = c; }else if( c=='\'' ){ p->zBuf[p->nUsed++] = c; }else{ - static const char aSpecial[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; - assert( sizeof(aSpecial)==32 ); - assert( aSpecial['\b']=='b' ); - assert( aSpecial['\f']=='f' ); - assert( aSpecial['\n']=='n' ); - assert( aSpecial['\r']=='r' ); - assert( aSpecial['\t']=='t' ); - assert( c>=0 && cnUsed+N+7 > p->nAlloc) && jsonStringGrow(p,N+7)!=0 ) return; - p->zBuf[p->nUsed++] = '\\'; - p->zBuf[p->nUsed++] = 'u'; - p->zBuf[p->nUsed++] = '0'; - p->zBuf[p->nUsed++] = '0'; - p->zBuf[p->nUsed++] = "0123456789abcdef"[c>>4]; - p->zBuf[p->nUsed++] = "0123456789abcdef"[c&0xf]; + jsonAppendControlChar(p, c); } z++; N--; @@ -204848,7 +206296,10 @@ static u32 jsonbValidityCheck( if( !jsonIsOk[z[j]] && z[j]!='\'' ){ if( z[j]=='"' ){ if( x==JSONB_TEXTJ ) return j+1; - }else if( z[j]!='\\' || j+1>=k ){ + }else if( z[j]<=0x1f ){ + /* Control characters in JSON5 string literals are ok */ + if( x==JSONB_TEXTJ ) return j+1; + }else if( NEVER(z[j]!='\\') || j+1>=k ){ return j+1; }else if( strchr("\"\\/bfnrt",z[j+1])!=0 ){ j++; @@ -205143,9 +206594,14 @@ static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){ return -1; } }else if( c<=0x1f ){ - /* Control characters are not allowed in strings */ - pParse->iErr = j; - return -1; + if( c==0 ){ + pParse->iErr = j; + return -1; + } + /* Control characters are not allowed in canonical JSON string + ** literals, but are allowed in JSON5 string literals. */ + opcode = JSONB_TEXT5; + pParse->hasNonstd = 1; }else if( c=='"' ){ opcode = JSONB_TEXT5; } @@ -205361,6 +206817,7 @@ static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){ return i+4; } /* fall-through into the default case that checks for NaN */ + /* no break */ deliberate_fall_through } default: { u32 k; @@ -205629,7 +207086,7 @@ static u32 jsonTranslateBlobToText( zIn = (const char*)&pParse->aBlob[i+n]; jsonAppendChar(pOut, '"'); while( sz2>0 ){ - for(k=0; k0 ){ jsonAppendRawNZ(pOut, zIn, k); if( k>=sz2 ){ @@ -205644,6 +207101,13 @@ static u32 jsonTranslateBlobToText( sz2--; continue; } + if( zIn[0]<=0x1f ){ + if( pOut->nUsed+7>pOut->nAlloc && jsonStringGrow(pOut,7) ) break; + jsonAppendControlChar(pOut, zIn[0]); + zIn++; + sz2--; + continue; + } assert( zIn[0]=='\\' ); assert( sz2>=1 ); if( sz2<2 ){ @@ -205746,6 +207210,112 @@ static u32 jsonTranslateBlobToText( return i+n+sz; } +/* Context for recursion of json_pretty() +*/ +typedef struct JsonPretty JsonPretty; +struct JsonPretty { + JsonParse *pParse; /* The BLOB being rendered */ + JsonString *pOut; /* Generate pretty output into this string */ + const char *zIndent; /* Use this text for indentation */ + u32 szIndent; /* Bytes in zIndent[] */ + u32 nIndent; /* Current level of indentation */ +}; + +/* Append indentation to the pretty JSON under construction */ +static void jsonPrettyIndent(JsonPretty *pPretty){ + u32 jj; + for(jj=0; jjnIndent; jj++){ + jsonAppendRaw(pPretty->pOut, pPretty->zIndent, pPretty->szIndent); + } +} + +/* +** Translate the binary JSONB representation of JSON beginning at +** pParse->aBlob[i] into a JSON text string. Append the JSON +** text onto the end of pOut. Return the index in pParse->aBlob[] +** of the first byte past the end of the element that is translated. +** +** This is a variant of jsonTranslateBlobToText() that "pretty-prints" +** the output. Extra whitespace is inserted to make the JSON easier +** for humans to read. +** +** If an error is detected in the BLOB input, the pOut->eErr flag +** might get set to JSTRING_MALFORMED. But not all BLOB input errors +** are detected. So a malformed JSONB input might either result +** in an error, or in incorrect JSON. +** +** The pOut->eErr JSTRING_OOM flag is set on a OOM. +*/ +static u32 jsonTranslateBlobToPrettyText( + JsonPretty *pPretty, /* Pretty-printing context */ + u32 i /* Start rendering at this index */ +){ + u32 sz, n, j, iEnd; + const JsonParse *pParse = pPretty->pParse; + JsonString *pOut = pPretty->pOut; + n = jsonbPayloadSize(pParse, i, &sz); + if( n==0 ){ + pOut->eErr |= JSTRING_MALFORMED; + return pParse->nBlob+1; + } + switch( pParse->aBlob[i] & 0x0f ){ + case JSONB_ARRAY: { + j = i+n; + iEnd = j+sz; + jsonAppendChar(pOut, '['); + if( jnIndent++; + while( pOut->eErr==0 ){ + jsonPrettyIndent(pPretty); + j = jsonTranslateBlobToPrettyText(pPretty, j); + if( j>=iEnd ) break; + jsonAppendRawNZ(pOut, ",\n", 2); + } + jsonAppendChar(pOut, '\n'); + pPretty->nIndent--; + jsonPrettyIndent(pPretty); + } + jsonAppendChar(pOut, ']'); + i = iEnd; + break; + } + case JSONB_OBJECT: { + j = i+n; + iEnd = j+sz; + jsonAppendChar(pOut, '{'); + if( jnIndent++; + while( pOut->eErr==0 ){ + jsonPrettyIndent(pPretty); + j = jsonTranslateBlobToText(pParse, j, pOut); + if( j>iEnd ){ + pOut->eErr |= JSTRING_MALFORMED; + break; + } + jsonAppendRawNZ(pOut, ": ", 2); + j = jsonTranslateBlobToPrettyText(pPretty, j); + if( j>=iEnd ) break; + jsonAppendRawNZ(pOut, ",\n", 2); + } + jsonAppendChar(pOut, '\n'); + pPretty->nIndent--; + jsonPrettyIndent(pPretty); + } + jsonAppendChar(pOut, '}'); + i = iEnd; + break; + } + default: { + i = jsonTranslateBlobToText(pParse, i, pOut); + break; + } + } + return i; +} + + /* Return true if the input pJson ** ** For performance reasons, this routine does not do a detailed check of the @@ -206996,11 +208566,12 @@ static void jsonParseFunc( if( p==0 ) return; if( argc==1 ){ jsonDebugPrintBlob(p, 0, p->nBlob, 0, &out); - sqlite3_result_text64(ctx, out.zText, out.nChar, SQLITE_DYNAMIC, SQLITE_UTF8); + sqlite3_result_text64(ctx,out.zText,out.nChar,SQLITE_TRANSIENT,SQLITE_UTF8); }else{ jsonShowParse(p); } jsonParseFree(p); + sqlite3_str_reset(&out); } #endif /* SQLITE_DEBUG */ @@ -207099,13 +208670,6 @@ static void jsonArrayLengthFunc( jsonParseFree(p); } -/* True if the string is all digits */ -static int jsonAllDigits(const char *z, int n){ - int i; - for(i=0; i $[NUMBER] // Not PG. Purely for convenience */ jsonStringInit(&jx, ctx); - if( jsonAllDigits(zPath, nPath) ){ + if( sqlite3_value_type(argv[i])==SQLITE_INTEGER ){ jsonAppendRawNZ(&jx, "[", 1); jsonAppendRaw(&jx, zPath, nPath); jsonAppendRawNZ(&jx, "]", 2); @@ -207664,6 +209228,40 @@ static void jsonTypeFunc( jsonParseFree(p); } +/* +** json_pretty(JSON) +** json_pretty(JSON, INDENT) +** +** Return text that is a pretty-printed rendering of the input JSON. +** If the argument is not valid JSON, return NULL. +** +** The INDENT argument is text that is used for indentation. If omitted, +** it defaults to four spaces (the same as PostgreSQL). +*/ +static void jsonPrettyFunc( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv +){ + JsonString s; /* The output string */ + JsonPretty x; /* Pretty printing context */ + + memset(&x, 0, sizeof(x)); + x.pParse = jsonParseFuncArg(ctx, argv[0], 0); + if( x.pParse==0 ) return; + x.pOut = &s; + jsonStringInit(&s, ctx); + if( argc==1 || (x.zIndent = (const char*)sqlite3_value_text(argv[1]))==0 ){ + x.zIndent = " "; + x.szIndent = 4; + }else{ + x.szIndent = (u32)strlen(x.zIndent); + } + jsonTranslateBlobToPrettyText(&x, 0); + jsonReturnString(&s, 0, 0); + jsonParseFree(x.pParse); +} + /* ** json_valid(JSON) ** json_valid(JSON, FLAGS) @@ -208678,6 +210276,8 @@ SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void){ JFUNCTION(jsonb_object, -1,0,1, 1,1,0, jsonObjectFunc), JFUNCTION(json_patch, 2,1,1, 0,0,0, jsonPatchFunc), JFUNCTION(jsonb_patch, 2,1,0, 0,1,0, jsonPatchFunc), + JFUNCTION(json_pretty, 1,1,0, 0,0,0, jsonPrettyFunc), + JFUNCTION(json_pretty, 2,1,0, 0,0,0, jsonPrettyFunc), JFUNCTION(json_quote, 1,0,1, 1,0,0, jsonQuoteFunc), JFUNCTION(json_remove, -1,1,1, 0,0,0, jsonRemoveFunc), JFUNCTION(jsonb_remove, -1,1,0, 0,1,0, jsonRemoveFunc), @@ -210577,6 +212177,8 @@ static int deserializeGeometry(sqlite3_value *pValue, RtreeConstraint *pCons){ return SQLITE_OK; } +SQLITE_PRIVATE int sqlite3IntFloatCompare(i64,double); + /* ** Rtree virtual table module xFilter method. */ @@ -210606,7 +212208,8 @@ static int rtreeFilter( i64 iNode = 0; int eType = sqlite3_value_numeric_type(argv[0]); if( eType==SQLITE_INTEGER - || (eType==SQLITE_FLOAT && sqlite3_value_double(argv[0])==iRowid) + || (eType==SQLITE_FLOAT + && 0==sqlite3IntFloatCompare(iRowid,sqlite3_value_double(argv[0]))) ){ rc = findLeafNode(pRtree, iRowid, &pLeaf, &iNode); }else{ @@ -211961,6 +213564,7 @@ static int rtreeUpdate( */ static int rtreeBeginTransaction(sqlite3_vtab *pVtab){ Rtree *pRtree = (Rtree *)pVtab; + assert( pRtree->inWrTrans==0 ); pRtree->inWrTrans = 1; return SQLITE_OK; } @@ -215515,7 +217119,7 @@ static void icuLoadCollation( UCollator *pUCollator; /* ICU library collation object */ int rc; /* Return code from sqlite3_create_collation_x() */ - assert(nArg==2); + assert(nArg==2 || nArg==3); (void)nArg; /* Unused parameter */ zLocale = (const char *)sqlite3_value_text(apArg[0]); zName = (const char *)sqlite3_value_text(apArg[1]); @@ -215530,7 +217134,39 @@ static void icuLoadCollation( return; } assert(p); - + if(nArg==3){ + const char *zOption = (const char*)sqlite3_value_text(apArg[2]); + static const struct { + const char *zName; + UColAttributeValue val; + } aStrength[] = { + { "PRIMARY", UCOL_PRIMARY }, + { "SECONDARY", UCOL_SECONDARY }, + { "TERTIARY", UCOL_TERTIARY }, + { "DEFAULT", UCOL_DEFAULT_STRENGTH }, + { "QUARTERNARY", UCOL_QUATERNARY }, + { "IDENTICAL", UCOL_IDENTICAL }, + }; + unsigned int i; + for(i=0; i=sizeof(aStrength)/sizeof(aStrength[0]) ){ + sqlite3_str *pStr = sqlite3_str_new(sqlite3_context_db_handle(p)); + sqlite3_str_appendf(pStr, + "unknown collation strength \"%s\" - should be one of:", + zOption); + for(i=0; ipTblIter, &p->zErrmsg); pIter->zTbl = 0; + pIter->zDataTbl = 0; }else{ pIter->zTbl = (const char*)sqlite3_column_text(pIter->pTblIter, 0); pIter->zDataTbl = (const char*)sqlite3_column_text(pIter->pTblIter,1); @@ -219483,7 +221122,7 @@ static i64 rbuShmChecksum(sqlite3rbu *p){ u32 volatile *ptr; p->rc = pDb->pMethods->xShmMap(pDb, 0, 32*1024, 0, (void volatile**)&ptr); if( p->rc==SQLITE_OK ){ - iRet = ((i64)ptr[10] << 32) + ptr[11]; + iRet = (i64)(((u64)ptr[10] << 32) + ptr[11]); } } return iRet; @@ -226954,14 +228593,14 @@ static int sessionChangesetNextOne( p->rc = sessionInputBuffer(&p->in, 2); if( p->rc!=SQLITE_OK ) return p->rc; + sessionDiscardData(&p->in); + p->in.iCurrent = p->in.iNext; + /* If the iterator is already at the end of the changeset, return DONE. */ if( p->in.iNext>=p->in.nData ){ return SQLITE_DONE; } - sessionDiscardData(&p->in); - p->in.iCurrent = p->in.iNext; - op = p->in.aData[p->in.iNext++]; while( op=='T' || op=='P' ){ if( pbNew ) *pbNew = 1; @@ -228696,6 +230335,7 @@ struct sqlite3_changegroup { int rc; /* Error code */ int bPatch; /* True to accumulate patchsets */ SessionTable *pList; /* List of tables in current patch */ + SessionBuffer rec; sqlite3 *db; /* Configured by changegroup_schema() */ char *zDb; /* Configured by changegroup_schema() */ @@ -228994,108 +230634,128 @@ static int sessionChangesetExtendRecord( } /* -** Add all changes in the changeset traversed by the iterator passed as -** the first argument to the changegroup hash tables. +** Locate or create a SessionTable object that may be used to add the +** change currently pointed to by iterator pIter to changegroup pGrp. +** If successful, set output variable (*ppTab) to point to the table +** object and return SQLITE_OK. Otherwise, if some error occurs, return +** an SQLite error code and leave (*ppTab) set to NULL. */ -static int sessionChangesetToHash( - sqlite3_changeset_iter *pIter, /* Iterator to read from */ - sqlite3_changegroup *pGrp, /* Changegroup object to add changeset to */ - int bRebase /* True if hash table is for rebasing */ +static int sessionChangesetFindTable( + sqlite3_changegroup *pGrp, + const char *zTab, + sqlite3_changeset_iter *pIter, + SessionTable **ppTab ){ - u8 *aRec; - int nRec; int rc = SQLITE_OK; SessionTable *pTab = 0; - SessionBuffer rec = {0, 0, 0}; - - while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec, 0) ){ - const char *zNew; - int nCol; - int op; - int iHash; - int bIndirect; - SessionChange *pChange; - SessionChange *pExist = 0; - SessionChange **pp; - - /* Ensure that only changesets, or only patchsets, but not a mixture - ** of both, are being combined. It is an error to try to combine a - ** changeset and a patchset. */ - if( pGrp->pList==0 ){ - pGrp->bPatch = pIter->bPatchset; - }else if( pIter->bPatchset!=pGrp->bPatch ){ - rc = SQLITE_ERROR; - break; - } + int nTab = (int)strlen(zTab); + u8 *abPK = 0; + int nCol = 0; - sqlite3changeset_op(pIter, &zNew, &nCol, &op, &bIndirect); - if( !pTab || sqlite3_stricmp(zNew, pTab->zName) ){ - /* Search the list for a matching table */ - int nNew = (int)strlen(zNew); - u8 *abPK; + *ppTab = 0; + sqlite3changeset_pk(pIter, &abPK, &nCol); - sqlite3changeset_pk(pIter, &abPK, 0); - for(pTab = pGrp->pList; pTab; pTab=pTab->pNext){ - if( 0==sqlite3_strnicmp(pTab->zName, zNew, nNew+1) ) break; - } - if( !pTab ){ - SessionTable **ppTab; + /* Search the list for an existing table */ + for(pTab = pGrp->pList; pTab; pTab=pTab->pNext){ + if( 0==sqlite3_strnicmp(pTab->zName, zTab, nTab+1) ) break; + } - pTab = sqlite3_malloc64(sizeof(SessionTable) + nCol + nNew+1); - if( !pTab ){ - rc = SQLITE_NOMEM; - break; - } - memset(pTab, 0, sizeof(SessionTable)); - pTab->nCol = nCol; - pTab->abPK = (u8*)&pTab[1]; - memcpy(pTab->abPK, abPK, nCol); - pTab->zName = (char*)&pTab->abPK[nCol]; - memcpy(pTab->zName, zNew, nNew+1); - - if( pGrp->db ){ - pTab->nCol = 0; - rc = sessionInitTable(0, pTab, pGrp->db, pGrp->zDb); - if( rc ){ - assert( pTab->azCol==0 ); - sqlite3_free(pTab); - break; - } - } + /* If one was not found above, create a new table now */ + if( !pTab ){ + SessionTable **ppNew; - /* The new object must be linked on to the end of the list, not - ** simply added to the start of it. This is to ensure that the - ** tables within the output of sqlite3changegroup_output() are in - ** the right order. */ - for(ppTab=&pGrp->pList; *ppTab; ppTab=&(*ppTab)->pNext); - *ppTab = pTab; - } + pTab = sqlite3_malloc64(sizeof(SessionTable) + nCol + nTab+1); + if( !pTab ){ + return SQLITE_NOMEM; + } + memset(pTab, 0, sizeof(SessionTable)); + pTab->nCol = nCol; + pTab->abPK = (u8*)&pTab[1]; + memcpy(pTab->abPK, abPK, nCol); + pTab->zName = (char*)&pTab->abPK[nCol]; + memcpy(pTab->zName, zTab, nTab+1); - if( !sessionChangesetCheckCompat(pTab, nCol, abPK) ){ - rc = SQLITE_SCHEMA; - break; + if( pGrp->db ){ + pTab->nCol = 0; + rc = sessionInitTable(0, pTab, pGrp->db, pGrp->zDb); + if( rc ){ + assert( pTab->azCol==0 ); + sqlite3_free(pTab); + return rc; } } - if( nColnCol ){ - assert( pGrp->db ); - rc = sessionChangesetExtendRecord(pGrp, pTab, nCol, op, aRec, nRec, &rec); - if( rc ) break; - aRec = rec.aBuf; - nRec = rec.nBuf; - } + /* The new object must be linked on to the end of the list, not + ** simply added to the start of it. This is to ensure that the + ** tables within the output of sqlite3changegroup_output() are in + ** the right order. */ + for(ppNew=&pGrp->pList; *ppNew; ppNew=&(*ppNew)->pNext); + *ppNew = pTab; + } - if( sessionGrowHash(0, pIter->bPatchset, pTab) ){ - rc = SQLITE_NOMEM; - break; - } + /* Check that the table is compatible. */ + if( !sessionChangesetCheckCompat(pTab, nCol, abPK) ){ + rc = SQLITE_SCHEMA; + } + + *ppTab = pTab; + return rc; +} + +/* +** Add the change currently indicated by iterator pIter to the hash table +** belonging to changegroup pGrp. +*/ +static int sessionOneChangeToHash( + sqlite3_changegroup *pGrp, + sqlite3_changeset_iter *pIter, + int bRebase +){ + int rc = SQLITE_OK; + int nCol = 0; + int op = 0; + int iHash = 0; + int bIndirect = 0; + SessionChange *pChange = 0; + SessionChange *pExist = 0; + SessionChange **pp = 0; + SessionTable *pTab = 0; + u8 *aRec = &pIter->in.aData[pIter->in.iCurrent + 2]; + int nRec = (pIter->in.iNext - pIter->in.iCurrent) - 2; + + /* Ensure that only changesets, or only patchsets, but not a mixture + ** of both, are being combined. It is an error to try to combine a + ** changeset and a patchset. */ + if( pGrp->pList==0 ){ + pGrp->bPatch = pIter->bPatchset; + }else if( pIter->bPatchset!=pGrp->bPatch ){ + rc = SQLITE_ERROR; + } + + if( rc==SQLITE_OK ){ + const char *zTab = 0; + sqlite3changeset_op(pIter, &zTab, &nCol, &op, &bIndirect); + rc = sessionChangesetFindTable(pGrp, zTab, pIter, &pTab); + } + + if( rc==SQLITE_OK && nColnCol ){ + SessionBuffer *pBuf = &pGrp->rec; + rc = sessionChangesetExtendRecord(pGrp, pTab, nCol, op, aRec, nRec, pBuf); + aRec = pBuf->aBuf; + nRec = pBuf->nBuf; + assert( pGrp->db ); + } + + if( rc==SQLITE_OK && sessionGrowHash(0, pIter->bPatchset, pTab) ){ + rc = SQLITE_NOMEM; + } + + if( rc==SQLITE_OK ){ + /* Search for existing entry. If found, remove it from the hash table. + ** Code below may link it back in. */ iHash = sessionChangeHash( pTab, (pIter->bPatchset && op==SQLITE_DELETE), aRec, pTab->nChange ); - - /* Search for existing entry. If found, remove it from the hash table. - ** Code below may link it back in. - */ for(pp=&pTab->apChange[iHash]; *pp; pp=&(*pp)->pNext){ int bPkOnly1 = 0; int bPkOnly2 = 0; @@ -229110,19 +230770,41 @@ static int sessionChangesetToHash( break; } } + } + if( rc==SQLITE_OK ){ rc = sessionChangeMerge(pTab, bRebase, pIter->bPatchset, pExist, op, bIndirect, aRec, nRec, &pChange ); - if( rc ) break; - if( pChange ){ - pChange->pNext = pTab->apChange[iHash]; - pTab->apChange[iHash] = pChange; - pTab->nEntry++; - } + } + if( rc==SQLITE_OK && pChange ){ + pChange->pNext = pTab->apChange[iHash]; + pTab->apChange[iHash] = pChange; + pTab->nEntry++; + } + + if( rc==SQLITE_OK ) rc = pIter->rc; + return rc; +} + +/* +** Add all changes in the changeset traversed by the iterator passed as +** the first argument to the changegroup hash tables. +*/ +static int sessionChangesetToHash( + sqlite3_changeset_iter *pIter, /* Iterator to read from */ + sqlite3_changegroup *pGrp, /* Changegroup object to add changeset to */ + int bRebase /* True if hash table is for rebasing */ +){ + u8 *aRec; + int nRec; + int rc = SQLITE_OK; + + while( SQLITE_ROW==(sessionChangesetNext(pIter, &aRec, &nRec, 0)) ){ + rc = sessionOneChangeToHash(pGrp, pIter, bRebase); + if( rc!=SQLITE_OK ) break; } - sqlite3_free(rec.aBuf); if( rc==SQLITE_OK ) rc = pIter->rc; return rc; } @@ -229250,6 +230932,23 @@ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup *pGrp, int nData, void return rc; } +/* +** Add a single change to a changeset-group. +*/ +SQLITE_API int sqlite3changegroup_add_change( + sqlite3_changegroup *pGrp, + sqlite3_changeset_iter *pIter +){ + if( pIter->in.iCurrent==pIter->in.iNext + || pIter->rc!=SQLITE_OK + || pIter->bInvert + ){ + /* Iterator does not point to any valid entry or is an INVERT iterator. */ + return SQLITE_ERROR; + } + return sessionOneChangeToHash(pGrp, pIter, 0); +} + /* ** Obtain a buffer containing a changeset representing the concatenation ** of all changesets added to the group so far. @@ -229299,6 +230998,7 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup *pGrp){ if( pGrp ){ sqlite3_free(pGrp->zDb); sessionDeleteTable(0, pGrp->pList); + sqlite3_free(pGrp->rec.aBuf); sqlite3_free(pGrp); } } @@ -229700,6 +231400,7 @@ SQLITE_API int sqlite3rebaser_rebase_strm( SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p){ if( p ){ sessionDeleteTable(0, p->grp.pList); + sqlite3_free(p->grp.rec.aBuf); sqlite3_free(p); } } @@ -229797,8 +231498,8 @@ struct Fts5PhraseIter { ** EXTENSION API FUNCTIONS ** ** xUserData(pFts): -** Return a copy of the context pointer the extension function was -** registered with. +** Return a copy of the pUserData pointer passed to the xCreateFunction() +** API when the extension function was registered. ** ** xColumnTotalSize(pFts, iCol, pnToken): ** If parameter iCol is less than zero, set output variable *pnToken @@ -231394,6 +233095,9 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); ** sqlite3Fts5ParserARG_STORE Code to store %extra_argument into fts5yypParser ** sqlite3Fts5ParserARG_FETCH Code to extract %extra_argument from fts5yypParser ** sqlite3Fts5ParserCTX_* As sqlite3Fts5ParserARG_ except for %extra_context +** fts5YYREALLOC Name of the realloc() function to use +** fts5YYFREE Name of the free() function to use +** fts5YYDYNSTACK True if stack space should be extended on heap ** fts5YYERRORSYMBOL is the code number of the error symbol. If not ** defined, then do no error processing. ** fts5YYNSTATE the combined number of states. @@ -231407,6 +233111,8 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); ** fts5YY_NO_ACTION The fts5yy_action[] code for no-op ** fts5YY_MIN_REDUCE Minimum value for reduce actions ** fts5YY_MAX_REDUCE Maximum value for reduce actions +** fts5YY_MIN_DSTRCTR Minimum symbol value that has a destructor +** fts5YY_MAX_DSTRCTR Maximum symbol value that has a destructor */ #ifndef INTERFACE # define INTERFACE 1 @@ -231433,6 +233139,9 @@ typedef union { #define sqlite3Fts5ParserARG_PARAM ,pParse #define sqlite3Fts5ParserARG_FETCH Fts5Parse *pParse=fts5yypParser->pParse; #define sqlite3Fts5ParserARG_STORE fts5yypParser->pParse=pParse; +#define fts5YYREALLOC realloc +#define fts5YYFREE free +#define fts5YYDYNSTACK 0 #define sqlite3Fts5ParserCTX_SDECL #define sqlite3Fts5ParserCTX_PDECL #define sqlite3Fts5ParserCTX_PARAM @@ -231450,6 +233159,8 @@ typedef union { #define fts5YY_NO_ACTION 82 #define fts5YY_MIN_REDUCE 83 #define fts5YY_MAX_REDUCE 110 +#define fts5YY_MIN_DSTRCTR 16 +#define fts5YY_MAX_DSTRCTR 24 /************* End control #defines *******************************************/ #define fts5YY_NLOOKAHEAD ((int)(sizeof(fts5yy_lookahead)/sizeof(fts5yy_lookahead[0]))) @@ -231465,6 +233176,22 @@ typedef union { # define fts5yytestcase(X) #endif +/* Macro to determine if stack space has the ability to grow using +** heap memory. +*/ +#if fts5YYSTACKDEPTH<=0 || fts5YYDYNSTACK +# define fts5YYGROWABLESTACK 1 +#else +# define fts5YYGROWABLESTACK 0 +#endif + +/* Guarantee a minimum number of initial stack slots. +*/ +#if fts5YYSTACKDEPTH<=0 +# undef fts5YYSTACKDEPTH +# define fts5YYSTACKDEPTH 2 /* Need a minimum stack size */ +#endif + /* Next are the tables used to determine what action to take based on the ** current state and lookahead token. These tables are used to implement @@ -231625,14 +233352,9 @@ struct fts5yyParser { #endif sqlite3Fts5ParserARG_SDECL /* A place to hold %extra_argument */ sqlite3Fts5ParserCTX_SDECL /* A place to hold %extra_context */ -#if fts5YYSTACKDEPTH<=0 - int fts5yystksz; /* Current side of the stack */ - fts5yyStackEntry *fts5yystack; /* The parser's stack */ - fts5yyStackEntry fts5yystk0; /* First stack entry */ -#else - fts5yyStackEntry fts5yystack[fts5YYSTACKDEPTH]; /* The parser's stack */ - fts5yyStackEntry *fts5yystackEnd; /* Last entry in the stack */ -#endif + fts5yyStackEntry *fts5yystackEnd; /* Last entry in the stack */ + fts5yyStackEntry *fts5yystack; /* The parser stack */ + fts5yyStackEntry fts5yystk0[fts5YYSTACKDEPTH]; /* Initial stack space */ }; typedef struct fts5yyParser fts5yyParser; @@ -231739,37 +233461,45 @@ static const char *const fts5yyRuleName[] = { #endif /* NDEBUG */ -#if fts5YYSTACKDEPTH<=0 +#if fts5YYGROWABLESTACK /* ** Try to increase the size of the parser stack. Return the number ** of errors. Return 0 on success. */ static int fts5yyGrowStack(fts5yyParser *p){ + int oldSize = 1 + (int)(p->fts5yystackEnd - p->fts5yystack); int newSize; int idx; fts5yyStackEntry *pNew; - newSize = p->fts5yystksz*2 + 100; - idx = p->fts5yytos ? (int)(p->fts5yytos - p->fts5yystack) : 0; - if( p->fts5yystack==&p->fts5yystk0 ){ - pNew = malloc(newSize*sizeof(pNew[0])); - if( pNew ) pNew[0] = p->fts5yystk0; + newSize = oldSize*2 + 100; + idx = (int)(p->fts5yytos - p->fts5yystack); + if( p->fts5yystack==p->fts5yystk0 ){ + pNew = fts5YYREALLOC(0, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; + memcpy(pNew, p->fts5yystack, oldSize*sizeof(pNew[0])); }else{ - pNew = realloc(p->fts5yystack, newSize*sizeof(pNew[0])); + pNew = fts5YYREALLOC(p->fts5yystack, newSize*sizeof(pNew[0])); + if( pNew==0 ) return 1; } - if( pNew ){ - p->fts5yystack = pNew; - p->fts5yytos = &p->fts5yystack[idx]; + p->fts5yystack = pNew; + p->fts5yytos = &p->fts5yystack[idx]; #ifndef NDEBUG - if( fts5yyTraceFILE ){ - fprintf(fts5yyTraceFILE,"%sStack grows from %d to %d entries.\n", - fts5yyTracePrompt, p->fts5yystksz, newSize); - } -#endif - p->fts5yystksz = newSize; + if( fts5yyTraceFILE ){ + fprintf(fts5yyTraceFILE,"%sStack grows from %d to %d entries.\n", + fts5yyTracePrompt, oldSize, newSize); } - return pNew==0; +#endif + p->fts5yystackEnd = &p->fts5yystack[newSize-1]; + return 0; } +#endif /* fts5YYGROWABLESTACK */ + +#if !fts5YYGROWABLESTACK +/* For builds that do no have a growable stack, fts5yyGrowStack always +** returns an error. +*/ +# define fts5yyGrowStack(X) 1 #endif /* Datatype of the argument to the memory allocated passed as the @@ -231789,24 +233519,14 @@ static void sqlite3Fts5ParserInit(void *fts5yypRawParser sqlite3Fts5ParserCTX_PD #ifdef fts5YYTRACKMAXSTACKDEPTH fts5yypParser->fts5yyhwm = 0; #endif -#if fts5YYSTACKDEPTH<=0 - fts5yypParser->fts5yytos = NULL; - fts5yypParser->fts5yystack = NULL; - fts5yypParser->fts5yystksz = 0; - if( fts5yyGrowStack(fts5yypParser) ){ - fts5yypParser->fts5yystack = &fts5yypParser->fts5yystk0; - fts5yypParser->fts5yystksz = 1; - } -#endif + fts5yypParser->fts5yystack = fts5yypParser->fts5yystk0; + fts5yypParser->fts5yystackEnd = &fts5yypParser->fts5yystack[fts5YYSTACKDEPTH-1]; #ifndef fts5YYNOERRORRECOVERY fts5yypParser->fts5yyerrcnt = -1; #endif fts5yypParser->fts5yytos = fts5yypParser->fts5yystack; fts5yypParser->fts5yystack[0].stateno = 0; fts5yypParser->fts5yystack[0].major = 0; -#if fts5YYSTACKDEPTH>0 - fts5yypParser->fts5yystackEnd = &fts5yypParser->fts5yystack[fts5YYSTACKDEPTH-1]; -#endif } #ifndef sqlite3Fts5Parser_ENGINEALWAYSONSTACK @@ -231920,9 +233640,26 @@ static void fts5yy_pop_parser_stack(fts5yyParser *pParser){ */ static void sqlite3Fts5ParserFinalize(void *p){ fts5yyParser *pParser = (fts5yyParser*)p; - while( pParser->fts5yytos>pParser->fts5yystack ) fts5yy_pop_parser_stack(pParser); -#if fts5YYSTACKDEPTH<=0 - if( pParser->fts5yystack!=&pParser->fts5yystk0 ) free(pParser->fts5yystack); + + /* In-lined version of calling fts5yy_pop_parser_stack() for each + ** element left in the stack */ + fts5yyStackEntry *fts5yytos = pParser->fts5yytos; + while( fts5yytos>pParser->fts5yystack ){ +#ifndef NDEBUG + if( fts5yyTraceFILE ){ + fprintf(fts5yyTraceFILE,"%sPopping %s\n", + fts5yyTracePrompt, + fts5yyTokenName[fts5yytos->major]); + } +#endif + if( fts5yytos->major>=fts5YY_MIN_DSTRCTR ){ + fts5yy_destructor(pParser, fts5yytos->major, &fts5yytos->minor); + } + fts5yytos--; + } + +#if fts5YYGROWABLESTACK + if( pParser->fts5yystack!=pParser->fts5yystk0 ) fts5YYFREE(pParser->fts5yystack); #endif } @@ -232149,25 +233886,19 @@ static void fts5yy_shift( assert( fts5yypParser->fts5yyhwm == (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack) ); } #endif -#if fts5YYSTACKDEPTH>0 - if( fts5yypParser->fts5yytos>fts5yypParser->fts5yystackEnd ){ - fts5yypParser->fts5yytos--; - fts5yyStackOverflow(fts5yypParser); - return; - } -#else - if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz] ){ + fts5yytos = fts5yypParser->fts5yytos; + if( fts5yytos>fts5yypParser->fts5yystackEnd ){ if( fts5yyGrowStack(fts5yypParser) ){ fts5yypParser->fts5yytos--; fts5yyStackOverflow(fts5yypParser); return; } + fts5yytos = fts5yypParser->fts5yytos; + assert( fts5yytos <= fts5yypParser->fts5yystackEnd ); } -#endif if( fts5yyNewState > fts5YY_MAX_SHIFT ){ fts5yyNewState += fts5YY_MIN_REDUCE - fts5YY_MIN_SHIFTREDUCE; } - fts5yytos = fts5yypParser->fts5yytos; fts5yytos->stateno = fts5yyNewState; fts5yytos->major = fts5yyMajor; fts5yytos->minor.fts5yy0 = fts5yyMinor; @@ -232604,19 +234335,12 @@ static void sqlite3Fts5Parser( (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)); } #endif -#if fts5YYSTACKDEPTH>0 if( fts5yypParser->fts5yytos>=fts5yypParser->fts5yystackEnd ){ - fts5yyStackOverflow(fts5yypParser); - break; - } -#else - if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz-1] ){ if( fts5yyGrowStack(fts5yypParser) ){ fts5yyStackOverflow(fts5yypParser); break; } } -#endif } fts5yyact = fts5yy_reduce(fts5yypParser,fts5yyruleno,fts5yymajor,fts5yyminor sqlite3Fts5ParserCTX_PARAM); }else if( fts5yyact <= fts5YY_MAX_SHIFTREDUCE ){ @@ -235293,7 +237017,11 @@ static int sqlite3Fts5ExprNew( } sqlite3_free(sParse.apPhrase); - *pzErr = sParse.zErr; + if( 0==*pzErr ){ + *pzErr = sParse.zErr; + }else{ + sqlite3_free(sParse.zErr); + } return sParse.rc; } @@ -237421,6 +239149,7 @@ static Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( assert( pRight->eType==FTS5_STRING || pRight->eType==FTS5_TERM || pRight->eType==FTS5_EOF + || (pRight->eType==FTS5_AND && pParse->bPhraseToAnd) ); if( pLeft->eType==FTS5_AND ){ @@ -249588,6 +251317,7 @@ static int fts5UpdateMethod( rc = SQLITE_ERROR; }else{ rc = fts5SpecialDelete(pTab, apVal); + bUpdateOrDelete = 1; } }else{ rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]); @@ -250762,14 +252492,16 @@ static int sqlite3Fts5GetTokenizer( if( pMod==0 ){ assert( nArg>0 ); rc = SQLITE_ERROR; - *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]); + if( pzErr ) *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]); }else{ rc = pMod->x.xCreate( pMod->pUserData, (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->pTok ); pConfig->pTokApi = &pMod->x; if( rc!=SQLITE_OK ){ - if( pzErr ) *pzErr = sqlite3_mprintf("error in tokenizer constructor"); + if( pzErr && rc!=SQLITE_NOMEM ){ + *pzErr = sqlite3_mprintf("error in tokenizer constructor"); + } }else{ pConfig->ePattern = sqlite3Fts5TokenizerPattern( pMod->x.xCreate, pConfig->pTok @@ -250828,7 +252560,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2024-04-15 13:34:05 8653b758870e6ef0c98d46b3ace27849054af85da891eb121e9aaa537f1e8355", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33", -1, SQLITE_TRANSIENT); } /* @@ -250863,18 +252595,25 @@ static int fts5IntegrityMethod( assert( pzErr!=0 && *pzErr==0 ); UNUSED_PARAM(isQuick); + assert( pTab->p.pConfig->pzErrmsg==0 ); + pTab->p.pConfig->pzErrmsg = pzErr; rc = sqlite3Fts5StorageIntegrity(pTab->pStorage, 0); - if( (rc&0xff)==SQLITE_CORRUPT ){ - *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s", - zSchema, zTabname); - }else if( rc!=SQLITE_OK ){ - *pzErr = sqlite3_mprintf("unable to validate the inverted index for" - " FTS5 table %s.%s: %s", - zSchema, zTabname, sqlite3_errstr(rc)); + if( *pzErr==0 && rc!=SQLITE_OK ){ + if( (rc&0xff)==SQLITE_CORRUPT ){ + *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s", + zSchema, zTabname); + rc = (*pzErr) ? SQLITE_OK : SQLITE_NOMEM; + }else{ + *pzErr = sqlite3_mprintf("unable to validate the inverted index for" + " FTS5 table %s.%s: %s", + zSchema, zTabname, sqlite3_errstr(rc)); + } } + sqlite3Fts5IndexCloseReader(pTab->p.pIndex); + pTab->p.pConfig->pzErrmsg = 0; - return SQLITE_OK; + return rc; } static int fts5Init(sqlite3 *db){ @@ -252306,7 +254045,7 @@ static int fts5AsciiCreate( int i; memset(p, 0, sizeof(AsciiTokenizer)); memcpy(p->aTokenChar, aAsciiTokenChar, sizeof(aAsciiTokenChar)); - for(i=0; rc==SQLITE_OK && ibFold = 1; pNew->iFoldParam = 0; - for(i=0; rc==SQLITE_OK && iiFoldParam!=0 && pNew->bFold==0 ){ rc = SQLITE_ERROR; diff --git a/src/database/sqlite/sqlite3.h b/src/database/sqlite/sqlite3.h index 2618b37a7b89df..f64ca01727829c 100644 --- a/src/database/sqlite/sqlite3.h +++ b/src/database/sqlite/sqlite3.h @@ -146,9 +146,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.45.3" -#define SQLITE_VERSION_NUMBER 3045003 -#define SQLITE_SOURCE_ID "2024-04-15 13:34:05 8653b758870e6ef0c98d46b3ace27849054af85da891eb121e9aaa537f1e8355" +#define SQLITE_VERSION "3.46.1" +#define SQLITE_VERSION_NUMBER 3046001 +#define SQLITE_SOURCE_ID "2024-08-13 09:16:08 c9c2ab54ba1f5f46360f1b4f35d849cd3f080e6fc2b6c60e91b16c63f69a1e33" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -764,11 +764,11 @@ struct sqlite3_file { ** ** xLock() upgrades the database file lock. In other words, xLock() moves the ** database file lock in the direction NONE toward EXCLUSIVE. The argument to -** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never +** xLock() is always one of SHARED, RESERVED, PENDING, or EXCLUSIVE, never ** SQLITE_LOCK_NONE. If the database file lock is already at or above the ** requested lock, then the call to xLock() is a no-op. ** xUnlock() downgrades the database file lock to either SHARED or NONE. -* If the lock is already at or below the requested lock state, then the call +** If the lock is already at or below the requested lock state, then the call ** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, @@ -3305,8 +3305,8 @@ SQLITE_API int sqlite3_set_authorizer( #define SQLITE_RECURSIVE 33 /* NULL NULL */ /* -** CAPI3REF: Tracing And Profiling Functions -** METHOD: sqlite3 +** CAPI3REF: Deprecated Tracing And Profiling Functions +** DEPRECATED ** ** These routines are deprecated. Use the [sqlite3_trace_v2()] interface ** instead of the routines described here. @@ -6887,6 +6887,12 @@ SQLITE_API int sqlite3_autovacuum_pages( ** The exceptions defined in this paragraph might change in a future ** release of SQLite. ** +** Whether the update hook is invoked before or after the +** corresponding change is currently unspecified and may differ +** depending on the type of change. Do not rely on the order of the +** hook call with regards to the final result of the operation which +** triggers the hook. +** ** The update hook implementation must not do anything that will modify ** the database connection that invoked the update hook. Any actions ** to modify the database connection must be deferred until after the @@ -8357,7 +8363,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); ** The sqlite3_keyword_count() interface returns the number of distinct ** keywords understood by SQLite. ** -** The sqlite3_keyword_name(N,Z,L) interface finds the N-th keyword and +** The sqlite3_keyword_name(N,Z,L) interface finds the 0-based N-th keyword and ** makes *Z point to that keyword expressed as UTF8 and writes the number ** of bytes in the keyword into *L. The string that *Z points to is not ** zero-terminated. The sqlite3_keyword_name(N,Z,L) routine returns @@ -9936,24 +9942,45 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); **

  • ** ^(If the sqlite3_vtab_distinct() interface returns 2, that means ** that the query planner does not need the rows returned in any particular -** order, as long as rows with the same values in all "aOrderBy" columns -** are adjacent.)^ ^(Furthermore, only a single row for each particular -** combination of values in the columns identified by the "aOrderBy" field -** needs to be returned.)^ ^It is always ok for two or more rows with the same -** values in all "aOrderBy" columns to be returned, as long as all such rows -** are adjacent. ^The virtual table may, if it chooses, omit extra rows -** that have the same value for all columns identified by "aOrderBy". -** ^However omitting the extra rows is optional. +** order, as long as rows with the same values in all columns identified +** by "aOrderBy" are adjacent.)^ ^(Furthermore, when two or more rows +** contain the same values for all columns identified by "colUsed", all but +** one such row may optionally be omitted from the result.)^ +** The virtual table is not required to omit rows that are duplicates +** over the "colUsed" columns, but if the virtual table can do that without +** too much extra effort, it could potentially help the query to run faster. ** This mode is used for a DISTINCT query. **

  • -** ^(If the sqlite3_vtab_distinct() interface returns 3, that means -** that the query planner needs only distinct rows but it does need the -** rows to be sorted.)^ ^The virtual table implementation is free to omit -** rows that are identical in all aOrderBy columns, if it wants to, but -** it is not required to omit any rows. This mode is used for queries +** ^(If the sqlite3_vtab_distinct() interface returns 3, that means the +** virtual table must return rows in the order defined by "aOrderBy" as +** if the sqlite3_vtab_distinct() interface had returned 0. However if +** two or more rows in the result have the same values for all columns +** identified by "colUsed", then all but one such row may optionally be +** omitted.)^ Like when the return value is 2, the virtual table +** is not required to omit rows that are duplicates over the "colUsed" +** columns, but if the virtual table can do that without +** too much extra effort, it could potentially help the query to run faster. +** This mode is used for queries ** that have both DISTINCT and ORDER BY clauses. ** ** +**

    The following table summarizes the conditions under which the +** virtual table is allowed to set the "orderByConsumed" flag based on +** the value returned by sqlite3_vtab_distinct(). This table is a +** restatement of the previous four paragraphs: +** +** +** +**
    sqlite3_vtab_distinct() return value +** Rows are returned in aOrderBy order +** Rows with the same value in all aOrderBy columns are adjacent +** Duplicates over all colUsed columns may be omitted +**
    0yesyesno +**
    1noyesno +**
    2noyesyes +**
    3yesyesyes +**
    +** ** ^For the purposes of comparing virtual table output values to see if the ** values are same value for sorting purposes, two NULL values are considered ** to be the same. In other words, the comparison operator is "IS" @@ -11998,6 +12025,30 @@ SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const c */ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); +/* +** CAPI3REF: Add A Single Change To A Changegroup +** METHOD: sqlite3_changegroup +** +** This function adds the single change currently indicated by the iterator +** passed as the second argument to the changegroup object. The rules for +** adding the change are just as described for [sqlite3changegroup_add()]. +** +** If the change is successfully added to the changegroup, SQLITE_OK is +** returned. Otherwise, an SQLite error code is returned. +** +** The iterator must point to a valid entry when this function is called. +** If it does not, SQLITE_ERROR is returned and no change is added to the +** changegroup. Additionally, the iterator must not have been opened with +** the SQLITE_CHANGESETAPPLY_INVERT flag. In this case SQLITE_ERROR is also +** returned. +*/ +SQLITE_API int sqlite3changegroup_add_change( + sqlite3_changegroup*, + sqlite3_changeset_iter* +); + + + /* ** CAPI3REF: Obtain A Composite Changeset From A Changegroup ** METHOD: sqlite3_changegroup @@ -12802,8 +12853,8 @@ struct Fts5PhraseIter { ** EXTENSION API FUNCTIONS ** ** xUserData(pFts): -** Return a copy of the context pointer the extension function was -** registered with. +** Return a copy of the pUserData pointer passed to the xCreateFunction() +** API when the extension function was registered. ** ** xColumnTotalSize(pFts, iCol, pnToken): ** If parameter iCol is less than zero, set output variable *pnToken diff --git a/src/database/sqlite/sqlite_functions.c b/src/database/sqlite/sqlite_functions.c index e62743f596a695..6166a51603a235 100644 --- a/src/database/sqlite/sqlite_functions.c +++ b/src/database/sqlite/sqlite_functions.c @@ -140,6 +140,10 @@ int configure_sqlite_database(sqlite3 *database, int target_version, const char if (init_database_batch(database, list, description)) return 1; + snprintfz(buf, sizeof(buf) - 1, "PRAGMA optimize=0x10002"); + if (init_database_batch(database, list, description)) + return 1; + return 0; } @@ -338,7 +342,6 @@ void sql_close_database(sqlite3 *database, const char *database_name) if (unlikely(!database)) return; - (void) db_execute(database, "PRAGMA analysis_limit=10000"); (void) db_execute(database, "PRAGMA optimize"); netdata_log_info("%s: Closing sqlite database", database_name); diff --git a/src/database/sqlite/sqlite_health.c b/src/database/sqlite/sqlite_health.c index 3a800d384253b1..c78018cda5a7b3 100644 --- a/src/database/sqlite/sqlite_health.c +++ b/src/database/sqlite/sqlite_health.c @@ -746,20 +746,20 @@ void sql_health_alarm_log_load(RRDHOST *host) * Store an alert config hash in the database */ #define SQL_STORE_ALERT_CONFIG_HASH \ - "insert or replace into alert_hash (hash_id, date_updated, alarm, template, " \ + "INSERT OR REPLACE INTO alert_hash (hash_id, date_updated, alarm, template, " \ "on_key, class, component, type, lookup, every, units, calc, " \ "green, red, warn, crit, exec, to_key, info, delay, options, repeat, host_labels, " \ "p_db_lookup_dimensions, p_db_lookup_method, p_db_lookup_options, p_db_lookup_after, " \ "p_db_lookup_before, p_update_every, source, chart_labels, summary, time_group_condition, " \ "time_group_value, dims_group, data_source) " \ - "values (@hash_id,UNIXEPOCH(),@alarm,@template," \ + "VALUES (@hash_id,UNIXEPOCH(),@alarm,@template," \ "@on_key,@class,@component,@type,@lookup,@every,@units,@calc," \ "@green,@red,@warn,@crit,@exec,@to_key,@info,@delay,@options,@repeat,@host_labels," \ "@p_db_lookup_dimensions,@p_db_lookup_method,@p_db_lookup_options,@p_db_lookup_after," \ "@p_db_lookup_before,@p_update_every,@source,@chart_labels,@summary, @time_group_condition, " \ "@time_group_value, @dims_group, @data_source)" -void sql_alert_store_config(RRD_ALERT_PROTOTYPE *ap __maybe_unused) +void sql_alert_store_config(RRD_ALERT_PROTOTYPE *ap) { static __thread sqlite3_stmt *res = NULL; int param = 0; @@ -767,7 +767,7 @@ void sql_alert_store_config(RRD_ALERT_PROTOTYPE *ap __maybe_unused) if (!PREPARE_COMPILED_STATEMENT(db_meta, SQL_STORE_ALERT_CONFIG_HASH, &res)) return; - BUFFER *buf = buffer_create(128, NULL); + CLEAN_BUFFER *buf = buffer_create(128, NULL); SQLITE_BIND_FAIL( done, sqlite3_bind_blob(res, ++param, &ap->config.hash_id, sizeof(ap->config.hash_id), SQLITE_STATIC)); @@ -833,7 +833,14 @@ void sql_alert_store_config(RRD_ALERT_PROTOTYPE *ap __maybe_unused) else SQLITE_BIND_FAIL(done, sqlite3_bind_null(res, ++param)); - SQLITE_BIND_FAIL(done, sqlite3_bind_int(res, ++param, ap->config.update_every)); + if (!ap->config.has_custom_repeat_config) + SQLITE_BIND_FAIL(done, sqlite3_bind_null(res, ++param)); + else { + char repeat[255]; + snprintfz(repeat, sizeof(repeat) - 1, "warning %us critical %us", ap->config.warn_repeat_every, ap->config.crit_repeat_every); + SQLITE_BIND_FAIL(done, sqlite3_bind_text(res, ++param, repeat, -1, SQLITE_STATIC)); + } + SQLITE_BIND_FAIL(done, SQLITE3_BIND_STRING_OR_NULL(res, ++param, ap->match.host_labels)); if (ap->config.after) { @@ -866,7 +873,6 @@ void sql_alert_store_config(RRD_ALERT_PROTOTYPE *ap __maybe_unused) error_report("Failed to store alert config, rc = %d", rc); done: - buffer_free(buf); REPORT_BIND_FAIL(res, param); SQLITE_RESET(res); } diff --git a/src/go/go.mod b/src/go/go.mod index b40e70d9944297..b5587778e96f11 100644 --- a/src/go/go.mod +++ b/src/go/go.mod @@ -12,7 +12,7 @@ require ( github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de github.com/axiomhq/hyperloglog v0.1.0 github.com/blang/semver/v4 v4.0.0 - github.com/bmatcuk/doublestar/v4 v4.6.1 + github.com/bmatcuk/doublestar/v4 v4.7.1 github.com/clbanning/rfile/v2 v2.0.0-20231024120205-ac3fca974b0e github.com/cloudflare/cfssl v1.6.5 github.com/coreos/go-systemd/v22 v22.5.0 @@ -41,12 +41,12 @@ require ( github.com/prometheus-community/pro-bing v0.4.1 github.com/prometheus/common v0.60.0 github.com/prometheus/prometheus v2.5.0+incompatible - github.com/redis/go-redis/v9 v9.6.1 - github.com/sijms/go-ora/v2 v2.8.20 + github.com/redis/go-redis/v9 v9.6.2 + github.com/sijms/go-ora/v2 v2.8.22 github.com/stretchr/testify v1.9.0 github.com/tidwall/gjson v1.18.0 github.com/valyala/fastjson v1.6.4 - github.com/vmware/govmomi v0.43.0 + github.com/vmware/govmomi v0.44.1 go.mongodb.org/mongo-driver v1.17.1 golang.org/x/net v0.30.0 golang.org/x/text v0.19.0 diff --git a/src/go/go.sum b/src/go/go.sum index bc1f41539e7314..5fe4842d12602e 100644 --- a/src/go/go.sum +++ b/src/go/go.sum @@ -32,8 +32,8 @@ github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2io github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= -github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q= +github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= @@ -340,8 +340,8 @@ github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJN github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/prometheus v0.50.1 h1:N2L+DYrxqPh4WZStU+o1p/gQlBaqFbcLBTjlp3vpdXw= github.com/prometheus/prometheus v0.50.1/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU= -github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= -github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= +github.com/redis/go-redis/v9 v9.6.2 h1:w0uvkRbc9KpgD98zcvo5IrVUsn0lXpRMuhNgiHDJzdk= +github.com/redis/go-redis/v9 v9.6.2/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= @@ -355,8 +355,8 @@ github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9Nz github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/sijms/go-ora/v2 v2.8.20 h1:VeJ97pwuIesYCeMgFmw60IiYZDst98annQCtxbLP7qU= -github.com/sijms/go-ora/v2 v2.8.20/go.mod h1:EHxlY6x7y9HAsdfumurRfTd+v8NrEOTR3Xl4FWlH6xk= +github.com/sijms/go-ora/v2 v2.8.22 h1:3ABgRzVKxS439cEgSLjFKutIwOyhnyi4oOSBywEdOlU= +github.com/sijms/go-ora/v2 v2.8.22/go.mod h1:QgFInVi3ZWyqAiJwzBQA+nbKYKH77tdp1PYoCqhR2dU= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -391,8 +391,8 @@ github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= -github.com/vmware/govmomi v0.43.0 h1:7Kg3Bkdly+TrE67BYXzRq7ZrDnn7xqpKX95uEh2f9Go= -github.com/vmware/govmomi v0.43.0/go.mod h1:IOv5nTXCPqH9qVJAlRuAGffogaLsNs8aF+e7vLgsHJU= +github.com/vmware/govmomi v0.44.1 h1:Hbt0nvVY8fnp3DGRJHngLflTos/uRrW54lhmJ/zKZFc= +github.com/vmware/govmomi v0.44.1/go.mod h1:uoLVU9zlXC4p4GmLVG+ZJmBC0Gn3Q7mytOJvi39OhxA= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners.go index 17f08c5f5fa474..3ec61d8b872190 100644 --- a/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners.go +++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners.go @@ -131,7 +131,7 @@ func (d *Discoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup func (d *Discoverer) discoverLocalListeners(ctx context.Context, in chan<- []model.TargetGroup) error { bs, err := d.ll.discover(ctx) if err != nil { - if errors.Is(err, context.Canceled) { + if errors.Is(err, context.DeadlineExceeded) { // there is no point in continuing pointless attempts/use cpu // https://github.com/netdata/netdata/discussions/18751#discussioncomment-10908472 if d.timeoutRuns++; d.timeoutRuns > 5 && d.successRuns == 0 { diff --git a/src/go/plugin/go.d/modules/apcupsd/apcupsd_test.go b/src/go/plugin/go.d/modules/apcupsd/apcupsd_test.go index aca6670aaf4d0b..fa189974f0ee58 100644 --- a/src/go/plugin/go.d/modules/apcupsd/apcupsd_test.go +++ b/src/go/plugin/go.d/modules/apcupsd/apcupsd_test.go @@ -144,7 +144,7 @@ func TestApcupsd_Collect(t *testing.T) { "input_voltage_max": 23920, "input_voltage_min": 23400, "itemp": 3279, - "load": 55, + "load": 5580, "load_percent": 930, "output_voltage": 23660, "output_voltage_nominal": 23000, diff --git a/src/go/plugin/go.d/modules/apcupsd/collect.go b/src/go/plugin/go.d/modules/apcupsd/collect.go index 88205bd66a7803..74d566e001b79a 100644 --- a/src/go/plugin/go.d/modules/apcupsd/collect.go +++ b/src/go/plugin/go.d/modules/apcupsd/collect.go @@ -101,7 +101,7 @@ func (a *Apcupsd) collectStatus(mx map[string]int64, resp []byte) error { mx["timeleft"] = int64(*st.timeleft * 60 * precision) // to seconds } if st.nompower != nil && st.loadpct != nil { - mx["load"] = int64(*st.nompower * *st.loadpct / 100) + mx["load"] = int64(*st.nompower * *st.loadpct) } if st.battdate != "" { if v, err := battdateSecondsAgo(st.battdate); err != nil { diff --git a/src/health/health.d/net.conf b/src/health/health.d/net.conf index 448a3733d5a3b2..609741acac88df 100644 --- a/src/health/health.d/net.conf +++ b/src/health/health.d/net.conf @@ -19,7 +19,7 @@ component: Network class: Workload type: System component: Network -host labels: _os=linux +host labels: _os=linux windows lookup: average -1m unaligned absolute of received calc: ($interface_speed > 0) ? ($this * 100 / ($interface_speed * 1000)) : ( nan ) units: % @@ -35,7 +35,7 @@ host labels: _os=linux class: Workload type: System component: Network -host labels: _os=linux +host labels: _os=linux windows lookup: average -1m unaligned absolute of sent calc: ($interface_speed > 0) ? ($this * 100 / ($interface_speed * 1000)) : ( nan ) units: % @@ -214,7 +214,6 @@ host labels: _os=linux class: Workload type: System component: Network -host labels: _os=linux freebsd lookup: average -1m unaligned of received units: packets every: 10s @@ -225,7 +224,6 @@ host labels: _os=linux freebsd class: Workload type: System component: Network -host labels: _os=linux freebsd lookup: average -10s unaligned of received calc: $this * 100 / (($1m_received_packets_rate < 1000)?(1000):($1m_received_packets_rate)) every: 10s @@ -237,3 +235,21 @@ host labels: _os=linux freebsd info: Ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, \ compared to the rate over the last minute to: silent + +# ----------------------------------------------------------------------------- +# output queue length + + template: network_interface_output_queue_length + on: net.queue_length + class: Errors + type: System + component: Network +host labels: _os=windows + units: packets + every: 10s + warn: $length > 2 + delay: up 1m down 1m multiplier 1.5 max 1h + summary: System network interface ${label:device} output queue length + info: The Output Queue Length on interface ${label:device} should be zero, otherwise there are delays and bottlenecks. + to: silent + diff --git a/src/health/rrdvar.c b/src/health/rrdvar.c index 30b8bffab25d52..5d6e3cf84bd875 100644 --- a/src/health/rrdvar.c +++ b/src/health/rrdvar.c @@ -93,7 +93,7 @@ void rrdvar_host_variable_set(RRDHOST *host, const RRDVAR_ACQUIRED *rva, NETDATA // CUSTOM CHART VARIABLES const RRDVAR_ACQUIRED *rrdvar_chart_variable_add_and_acquire(RRDSET *st, const char *name) { - if(unlikely(!st->rrdvars)) return NULL; + if(unlikely(!st || !st->rrdvars)) return NULL; STRING *name_string = rrdvar_name_to_string(name); const RRDVAR_ACQUIRED *rs = rrdvar_add_and_acquire(st->rrdvars, name_string, NAN); @@ -102,7 +102,7 @@ const RRDVAR_ACQUIRED *rrdvar_chart_variable_add_and_acquire(RRDSET *st, const c } void rrdvar_chart_variable_set(RRDSET *st, const RRDVAR_ACQUIRED *rva, NETDATA_DOUBLE value) { - if(unlikely(!st->rrdvars || !rva)) return; + if(unlikely(!st || !st->rrdvars || !rva)) return; RRDVAR *rv = dictionary_acquired_item_value((const DICTIONARY_ITEM *)rva); if(rv->value != value) { diff --git a/src/health/rrdvar.h b/src/health/rrdvar.h index 40af39d55c2996..f61b04b4a763e7 100644 --- a/src/health/rrdvar.h +++ b/src/health/rrdvar.h @@ -17,7 +17,7 @@ void rrdvar_host_variable_set(RRDHOST *host, const RRDVAR_ACQUIRED *rva, NETDATA int rrdvar_walkthrough_read(DICTIONARY *dict, int (*callback)(const DICTIONARY_ITEM *item, void *rrdvar, void *data), void *data); #define rrdvar_host_variable_release(host, rva) rrdvar_release((host)->rrdvars, rva) -#define rrdvar_chart_variable_release(st, rva) rrdvar_release((st)->rrdvars, rva) +#define rrdvar_chart_variable_release(st, rva) do { if(st) rrdvar_release((st)->rrdvars, rva); } while(0) void rrdvar_release(DICTIONARY *dict, const RRDVAR_ACQUIRED *rva); NETDATA_DOUBLE rrdvar2number(const RRDVAR_ACQUIRED *rva); diff --git a/src/libnetdata/aral/aral.c b/src/libnetdata/aral/aral.c index 9532d63dde8ace..fadb74b398883f 100644 --- a/src/libnetdata/aral/aral.c +++ b/src/libnetdata/aral/aral.c @@ -117,7 +117,7 @@ size_t aral_structures(ARAL *ar) { return aral_structures_from_stats(ar->stats); } -struct aral_statistics *aral_statistics(ARAL *ar) { +struct aral_statistics *aral_get_statistics(ARAL *ar) { return ar->stats; } diff --git a/src/libnetdata/aral/aral.h b/src/libnetdata/aral/aral.h index 1ff0672ebe6e6e..4cd21d17afbeaf 100644 --- a/src/libnetdata/aral/aral.h +++ b/src/libnetdata/aral/aral.h @@ -32,7 +32,7 @@ ARAL *aral_create(const char *name, size_t element_size, size_t initial_page_ele size_t aral_element_size(ARAL *ar); size_t aral_overhead(ARAL *ar); size_t aral_structures(ARAL *ar); -struct aral_statistics *aral_statistics(ARAL *ar); +struct aral_statistics *aral_get_statistics(ARAL *ar); size_t aral_structures_from_stats(struct aral_statistics *stats); size_t aral_overhead_from_stats(struct aral_statistics *stats); diff --git a/src/libnetdata/buffer/buffer.h b/src/libnetdata/buffer/buffer.h index 962ba25839b540..01a24be35a8839 100644 --- a/src/libnetdata/buffer/buffer.h +++ b/src/libnetdata/buffer/buffer.h @@ -521,13 +521,13 @@ static inline size_t print_int64(char *dst, int64_t value) { #define UINT64_MAX_LENGTH (24) // 21 should be enough static inline void buffer_print_uint64(BUFFER *wb, uint64_t value) { - buffer_need_bytes(wb, 50); + buffer_need_bytes(wb, UINT64_MAX_LENGTH); wb->len += print_uint64(&wb->buffer[wb->len], value); buffer_overflow_check(wb); } static inline void buffer_print_int64(BUFFER *wb, int64_t value) { - buffer_need_bytes(wb, 50); + buffer_need_bytes(wb, UINT64_MAX_LENGTH); wb->len += print_int64(&wb->buffer[wb->len], value); buffer_overflow_check(wb); } diff --git a/src/libnetdata/facets/facets.c b/src/libnetdata/facets/facets.c index 0ea36403347335..50ddf79ef48f2f 100644 --- a/src/libnetdata/facets/facets.c +++ b/src/libnetdata/facets/facets.c @@ -2363,8 +2363,8 @@ void facets_accepted_parameters_to_json_array(FACETS *facets, BUFFER *wb, bool w if(with_keys) { FACET_KEY *k; - foreach_key_in_facets(facets, k){ - if (!k->values.enabled) + foreach_key_in_facets(facets, k) { + if (!k->values.enabled || k->options & FACET_KEY_OPTION_HIDDEN) continue; buffer_json_add_array_item_string(wb, facets_key_id(k)); @@ -2591,7 +2591,7 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) CLEAN_BUFFER *tb = buffer_create(0, NULL); FACET_KEY *k; foreach_key_in_facets(facets, k) { - if(!k->values.enabled) + if(!k->values.enabled || k->options & FACET_KEY_OPTION_HIDDEN) continue; facets_sort_and_reorder_values(k); @@ -2677,37 +2677,40 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) FACET_KEY *k; foreach_key_in_facets(facets, k) { - RRDF_FIELD_OPTIONS options = RRDF_FIELD_OPTS_WRAP; - RRDF_FIELD_VISUAL visual = (k->options & FACET_KEY_OPTION_RICH_TEXT) ? RRDF_FIELD_VISUAL_RICH : RRDF_FIELD_VISUAL_VALUE; - RRDF_FIELD_TRANSFORM transform = RRDF_FIELD_TRANSFORM_NONE; - - if (k->options & (FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_STICKY) || - ((facets->options & FACETS_OPTION_ALL_FACETS_VISIBLE) && k->values.enabled) || - simple_pattern_matches(facets->visible_keys, k->name)) - options |= RRDF_FIELD_OPTS_VISIBLE; - - if (k->options & FACET_KEY_OPTION_MAIN_TEXT) - options |= RRDF_FIELD_OPTS_FULL_WIDTH | RRDF_FIELD_OPTS_WRAP; - - if (k->options & FACET_KEY_OPTION_EXPANDED_FILTER) - options |= RRDF_FIELD_OPTS_EXPANDED_FILTER; - - if (k->options & FACET_KEY_OPTION_PRETTY_XML) - transform = RRDF_FIELD_TRANSFORM_XML; - - const char *key_id = facets_key_id(k); - - buffer_rrdf_table_add_field( - wb, field_id++, - key_id, k->name ? k->name : key_id, - RRDF_FIELD_TYPE_STRING, - visual, transform, 0, NULL, NAN, - RRDF_FIELD_SORT_FIXED, - NULL, - RRDF_FIELD_SUMMARY_COUNT, - (k->options & FACET_KEY_OPTION_NEVER_FACET) ? RRDF_FIELD_FILTER_NONE : RRDF_FIELD_FILTER_FACET, - options, FACET_VALUE_UNSET); - } + if(k->options & FACET_KEY_OPTION_HIDDEN) + continue; + + RRDF_FIELD_OPTIONS options = RRDF_FIELD_OPTS_WRAP; + RRDF_FIELD_VISUAL visual = (k->options & FACET_KEY_OPTION_RICH_TEXT) ? RRDF_FIELD_VISUAL_RICH : RRDF_FIELD_VISUAL_VALUE; + RRDF_FIELD_TRANSFORM transform = RRDF_FIELD_TRANSFORM_NONE; + + if (k->options & (FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_STICKY) || + ((facets->options & FACETS_OPTION_ALL_FACETS_VISIBLE) && k->values.enabled) || + simple_pattern_matches(facets->visible_keys, k->name)) + options |= RRDF_FIELD_OPTS_VISIBLE; + + if (k->options & FACET_KEY_OPTION_MAIN_TEXT) + options |= RRDF_FIELD_OPTS_FULL_WIDTH | RRDF_FIELD_OPTS_WRAP; + + if (k->options & FACET_KEY_OPTION_EXPANDED_FILTER) + options |= RRDF_FIELD_OPTS_EXPANDED_FILTER; + + if (k->options & FACET_KEY_OPTION_PRETTY_XML) + transform = RRDF_FIELD_TRANSFORM_XML; + + const char *key_id = facets_key_id(k); + + buffer_rrdf_table_add_field( + wb, field_id++, + key_id, k->name ? k->name : key_id, + RRDF_FIELD_TYPE_STRING, + visual, transform, 0, NULL, NAN, + RRDF_FIELD_SORT_FIXED, + NULL, + RRDF_FIELD_SUMMARY_COUNT, + (k->options & FACET_KEY_OPTION_NEVER_FACET) ? RRDF_FIELD_FILTER_NONE : RRDF_FIELD_FILTER_FACET, + options, FACET_VALUE_UNSET); + } foreach_key_in_facets_done(k); } buffer_json_object_close(wb); // columns @@ -2744,6 +2747,9 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) FACET_KEY *k; foreach_key_in_facets(facets, k) { + if(k->options & FACET_KEY_OPTION_HIDDEN) + continue; + FACET_ROW_KEY_VALUE *rkv = dictionary_get(row->dict, k->name); if(unlikely(k->dynamic.cb)) { @@ -2786,7 +2792,7 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) { FACET_KEY *k; foreach_key_in_facets(facets, k) { - if (!k->values.enabled) + if (!k->values.enabled || k->options & FACET_KEY_OPTION_HIDDEN) continue; if(unlikely(!first_histogram_hash)) diff --git a/src/libnetdata/facets/facets.h b/src/libnetdata/facets/facets.h index 5f14c4e9386856..1d2b89c2bfe4c5 100644 --- a/src/libnetdata/facets/facets.h +++ b/src/libnetdata/facets/facets.h @@ -37,6 +37,7 @@ typedef enum __attribute__((packed)) { FACET_KEY_OPTION_TRANSFORM_VIEW = (1 << 10), // when registering the transformation, do it only at the view, not on all data FACET_KEY_OPTION_EXPANDED_FILTER = (1 << 11), // the presentation should have this filter expanded by default FACET_KEY_OPTION_PRETTY_XML = (1 << 12), // instruct the UI to parse this as an XML document + FACET_KEY_OPTION_HIDDEN = (1 << 13), // do not include this field in the response } FACET_KEY_OPTIONS; typedef enum __attribute__((packed)) { diff --git a/src/libnetdata/facets/logs_query_status.h b/src/libnetdata/facets/logs_query_status.h index 45171304668f01..4fde249980e108 100644 --- a/src/libnetdata/facets/logs_query_status.h +++ b/src/libnetdata/facets/logs_query_status.h @@ -17,7 +17,6 @@ #define LQS_PARAMETER_IF_MODIFIED_SINCE "if_modified_since" #define LQS_PARAMETER_DATA_ONLY "data_only" #define LQS_PARAMETER_SOURCE "__logs_sources" // this must never conflict with user fields -#define LQS_PARAMETER_SOURCE_NAME "Logs Sources" // this is how it is shown to users #define LQS_PARAMETER_INFO "info" #define LQS_PARAMETER_SLICE "slice" #define LQS_PARAMETER_DELTA "delta" diff --git a/src/libnetdata/libnetdata.h b/src/libnetdata/libnetdata.h index d40987f3234da1..969e677cb77655 100644 --- a/src/libnetdata/libnetdata.h +++ b/src/libnetdata/libnetdata.h @@ -459,14 +459,14 @@ extern const char *netdata_configured_host_prefix; #include "datetime/rfc3339.h" #include "datetime/rfc7231.h" #include "completion/completion.h" -#include "log/log.h" +#include "libnetdata/log/nd_log.h" #include "spawn_server/spawn_server.h" #include "spawn_server/spawn_popen.h" #include "simple_pattern/simple_pattern.h" #include "socket/security.h" #include "socket/socket.h" #include "config/appconfig.h" -#include "log/journal.h" +#include "log/systemd-journal-helpers.h" #include "buffered_reader/buffered_reader.h" #include "procfile/procfile.h" #include "string/string.h" diff --git a/src/libnetdata/log/README.md b/src/libnetdata/log/README.md index 44ba99f8065904..c7a42f28b05afa 100644 --- a/src/libnetdata/log/README.md +++ b/src/libnetdata/log/README.md @@ -17,14 +17,15 @@ For each log source, Netdata supports the following output methods: - **off**, to disable this log source - **journal**, to send the logs to systemd-journal. +- **etw**, to send the logs to Event Tracing for Windows (ETW). +- **wel**, to send the logs to the Windows Event Log (WEL). - **syslog**, to send the logs to syslog. - **system**, to send the output to `stderr` or `stdout` depending on the log source. - **stdout**, to write the logs to Netdata's `stdout`. - **stderr**, to write the logs to Netdata's `stderr`. - **filename**, to send the logs to a file. -For `daemon` and `collector` the default is `journal` when systemd-journal is available. -To decide if systemd-journal is available, Netdata checks: +On Linux, when systemd-journal is available, the default is `journal` for `daemon` and `collector` and `filename` for the rest. To decide if systemd-journal is available, Netdata checks: 1. `stderr` is connected to systemd-journald 2. `/run/systemd/journal/socket` exists @@ -32,13 +33,16 @@ To decide if systemd-journal is available, Netdata checks: If any of the above is detected, Netdata will select `journal` for `daemon` and `collector` sources. -All other sources default to a file. +On Windows, the default is `etw` and if that is not available it falls back to `wel`. The availability of `etw` is decided at compile time. ## Log formats | Format | Description | |---------|--------------------------------------------------------------------------------------------------------| | journal | journald-specific log format. Automatically selected when logging to systemd-journal. | +| etw | Event Tracing for Windows specific format. Structured logging in Event Viewer. | +| wel | Windows Event Log specific format. Basic field-based logging in Event Viewer. | +| journal | journald-specific log format. Automatically selected when logging to systemd-journal. | | logfmt | logs data as a series of key/value pairs. The default when logging to any output other than `journal`. | | json | logs data in JSON format. | @@ -57,6 +61,9 @@ Each time Netdata logs, it assigns a priority to the log. It can be one of this | info | the default log level about information the user should know. | | debug | these are more verbose logs that can be ignored. | +For `etw` these are mapped to `Verbose`, `Informational`, `Warning`, `Error` and `Critical`. +For `wel` these are mapped to `Informational`, `Warning`, `Error`. + ## Logs Configuration In `netdata.conf`, there are the following settings: @@ -108,66 +115,69 @@ Sending a `SIGHUP` to Netdata, will instruct it to re-open all its log files.

    All fields exposed by Netdata -| journal | logfmt | json | Description | -|:--------------------------------------:|:------------------------------:|:------------------------------:|:---------------------------------------------------------------------------------------------------------:| -| `_SOURCE_REALTIME_TIMESTAMP` | `time` | `time` | the timestamp of the event | -| `SYSLOG_IDENTIFIER` | `comm` | `comm` | the program logging the event | -| `ND_LOG_SOURCE` | `source` | `source` | one of the [log sources](#log-sources) | -| `PRIORITY`
    numeric | `level`
    text | `level`
    numeric | one of the [log levels](#log-levels) | -| `ERRNO` | `errno` | `errno` | the numeric value of `errno` | -| `INVOCATION_ID` | - | - | a unique UUID of the Netdata session, reset on every Netdata restart, inherited by systemd when available | -| `CODE_LINE` | - | - | the line number of of the source code logging this event | -| `CODE_FILE` | - | - | the filename of the source code logging this event | -| `CODE_FUNCTION` | - | - | the function name of the source code logging this event | -| `TID` | `tid` | `tid` | the thread id of the thread logging this event | -| `THREAD_TAG` | `thread` | `thread` | the name of the thread logging this event | -| `MESSAGE_ID` | `msg_id` | `msg_id` | see [message IDs](#message-ids) | -| `ND_MODULE` | `module` | `module` | the Netdata module logging this event | -| `ND_NIDL_NODE` | `node` | `node` | the hostname of the node the event is related to | -| `ND_NIDL_INSTANCE` | `instance` | `instance` | the instance of the node the event is related to | -| `ND_NIDL_CONTEXT` | `context` | `context` | the context the event is related to (this is usually the chart name, as shown on netdata dashboards | -| `ND_NIDL_DIMENSION` | `dimension` | `dimension` | the dimension the event is related to | -| `ND_SRC_TRANSPORT` | `src_transport` | `src_transport` | when the event happened during a request, this is the request transport | -| `ND_SRC_IP` | `src_ip` | `src_ip` | when the event happened during an inbound request, this is the IP the request came from | -| `ND_SRC_PORT` | `src_port` | `src_port` | when the event happened during an inbound request, this is the port the request came from | -| `ND_SRC_FORWARDED_HOST` | `src_forwarded_host` | `src_forwarded_host` | the contents of the HTTP header `X-Forwarded-Host` | -| `ND_SRC_FORWARDED_FOR` | `src_forwarded_for` | `src_forwarded_for` | the contents of the HTTP header `X-Forwarded-For` | -| `ND_SRC_CAPABILITIES` | `src_capabilities` | `src_capabilities` | when the request came from a child, this is the communication capabilities of the child | -| `ND_DST_TRANSPORT` | `dst_transport` | `dst_transport` | when the event happened during an outbound request, this is the outbound request transport | -| `ND_DST_IP` | `dst_ip` | `dst_ip` | when the event happened during an outbound request, this is the IP the request destination | -| `ND_DST_PORT` | `dst_port` | `dst_port` | when the event happened during an outbound request, this is the port the request destination | -| `ND_DST_CAPABILITIES` | `dst_capabilities` | `dst_capabilities` | when the request goes to a parent, this is the communication capabilities of the parent | -| `ND_REQUEST_METHOD` | `req_method` | `req_method` | when the event happened during an inbound request, this is the method the request was received | -| `ND_RESPONSE_CODE` | `code` | `code` | when responding to a request, this this the response code | -| `ND_CONNECTION_ID` | `conn` | `conn` | when there is a connection id for an inbound connection, this is the connection id | -| `ND_TRANSACTION_ID` | `transaction` | `transaction` | the transaction id (UUID) of all API requests | -| `ND_RESPONSE_SENT_BYTES` | `sent_bytes` | `sent_bytes` | the bytes we sent to API responses | -| `ND_RESPONSE_SIZE_BYTES` | `size_bytes` | `size_bytes` | the uncompressed bytes of the API responses | -| `ND_RESPONSE_PREP_TIME_USEC` | `prep_ut` | `prep_ut` | the time needed to prepare a response | -| `ND_RESPONSE_SENT_TIME_USEC` | `sent_ut` | `sent_ut` | the time needed to send a response | -| `ND_RESPONSE_TOTAL_TIME_USEC` | `total_ut` | `total_ut` | the total time needed to complete a response | -| `ND_ALERT_ID` | `alert_id` | `alert_id` | the alert id this event is related to | -| `ND_ALERT_EVENT_ID` | `alert_event_id` | `alert_event_id` | a sequential number of the alert transition (per host) | -| `ND_ALERT_UNIQUE_ID` | `alert_unique_id` | `alert_unique_id` | a sequential number of the alert transition (per alert) | -| `ND_ALERT_TRANSITION_ID` | `alert_transition_id` | `alert_transition_id` | the unique UUID of this alert transition | -| `ND_ALERT_CONFIG` | `alert_config` | `alert_config` | the alert configuration hash (UUID) | -| `ND_ALERT_NAME` | `alert` | `alert` | the alert name | -| `ND_ALERT_CLASS` | `alert_class` | `alert_class` | the alert classification | -| `ND_ALERT_COMPONENT` | `alert_component` | `alert_component` | the alert component | -| `ND_ALERT_TYPE` | `alert_type` | `alert_type` | the alert type | -| `ND_ALERT_EXEC` | `alert_exec` | `alert_exec` | the alert notification program | -| `ND_ALERT_RECIPIENT` | `alert_recipient` | `alert_recipient` | the alert recipient(s) | -| `ND_ALERT_VALUE` | `alert_value` | `alert_value` | the current alert value | -| `ND_ALERT_VALUE_OLD` | `alert_value_old` | `alert_value_old` | the previous alert value | -| `ND_ALERT_STATUS` | `alert_status` | `alert_status` | the current alert status | -| `ND_ALERT_STATUS_OLD` | `alert_value_old` | `alert_value_old` | the previous alert value | -| `ND_ALERT_UNITS` | `alert_units` | `alert_units` | the units of the alert | -| `ND_ALERT_SUMMARY` | `alert_summary` | `alert_summary` | the summary text of the alert | -| `ND_ALERT_INFO` | `alert_info` | `alert_info` | the info text of the alert | -| `ND_ALERT_DURATION` | `alert_duration` | `alert_duration` | the duration the alert was in its previous state | -| `ND_ALERT_NOTIFICATION_TIMESTAMP_USEC` | `alert_notification_timestamp` | `alert_notification_timestamp` | the timestamp the notification delivery is scheduled | -| `ND_REQUEST` | `request` | `request` | the full request during which the event happened | -| `MESSAGE` | `msg` | `msg` | the event message | +| `journal` | `logfmt` and `json` | `etw` | `wel` | Description | +|:--------------------------------------:|:------------------------------:|:-----------------------------:|:-----:|:----------------------------------------------------------------------------------------------------------| +| `_SOURCE_REALTIME_TIMESTAMP` | `time` | `Timestamp` | 1 | the timestamp of the event | +| `SYSLOG_IDENTIFIER` | `comm` | `Program` | 2 | the program logging the event | +| `ND_LOG_SOURCE` | `source` | `NetdataLogSource` | 3 | one of the [log sources](#log-sources) | +| `PRIORITY`
    numeric | `level`
    text | `Level`
    text | 4 | one of the [log levels](#log-levels) | +| `ERRNO` | `errno` | `UnixErrno` | 5 | the numeric value of `errno` | +| `INVOCATION_ID` | - | `InvocationID` | 7 | a unique UUID of the Netdata session, reset on every Netdata restart, inherited by systemd when available | +| `CODE_LINE` | - | `CodeLine` | 8 | the line number of of the source code logging this event | +| `CODE_FILE` | - | `CodeFile` | 9 | the filename of the source code logging this event | +| `CODE_FUNCTION` | - | `CodeFunction` | 10 | the function name of the source code logging this event | +| `TID` | `tid` | `ThreadID` | 11 | the thread id of the thread logging this event | +| `THREAD_TAG` | `thread` | `ThreadName` | 12 | the name of the thread logging this event | +| `MESSAGE_ID` | `msg_id` | `MessageID` | 13 | see [message IDs](#message-ids) | +| `ND_MODULE` | `module` | `Module` | 14 | the Netdata module logging this event | +| `ND_NIDL_NODE` | `node` | `Node` | 15 | the hostname of the node the event is related to | +| `ND_NIDL_INSTANCE` | `instance` | `Instance` | 16 | the instance of the node the event is related to | +| `ND_NIDL_CONTEXT` | `context` | `Context` | 17 | the context the event is related to (this is usually the chart name, as shown on netdata dashboards | +| `ND_NIDL_DIMENSION` | `dimension` | `Dimension` | 18 | the dimension the event is related to | +| `ND_SRC_TRANSPORT` | `src_transport` | `SourceTransport` | 19 | when the event happened during a request, this is the request transport | +| `ND_SRC_IP` | `src_ip` | `SourceIP` | 24 | when the event happened during an inbound request, this is the IP the request came from | +| `ND_SRC_PORT` | `src_port` | `SourcePort` | 25 | when the event happened during an inbound request, this is the port the request came from | +| `ND_SRC_FORWARDED_HOST` | `src_forwarded_host` | `SourceForwardedHost` | 26 | the contents of the HTTP header `X-Forwarded-Host` | +| `ND_SRC_FORWARDED_FOR` | `src_forwarded_for` | `SourceForwardedFor` | 27 | the contents of the HTTP header `X-Forwarded-For` | +| `ND_SRC_CAPABILITIES` | `src_capabilities` | `SourceCapabilities` | 28 | when the request came from a child, this is the communication capabilities of the child | +| `ND_DST_TRANSPORT` | `dst_transport` | `DestinationTransport` | 29 | when the event happened during an outbound request, this is the outbound request transport | +| `ND_DST_IP` | `dst_ip` | `DestinationIP` | 30 | when the event happened during an outbound request, this is the IP the request destination | +| `ND_DST_PORT` | `dst_port` | `DestinationPort` | 31 | when the event happened during an outbound request, this is the port the request destination | +| `ND_DST_CAPABILITIES` | `dst_capabilities` | `DestinationCapabilities` | 32 | when the request goes to a parent, this is the communication capabilities of the parent | +| `ND_REQUEST_METHOD` | `req_method` | `RequestMethod` | 33 | when the event happened during an inbound request, this is the method the request was received | +| `ND_RESPONSE_CODE` | `code` | `ResponseCode` | 34 | when responding to a request, this this the response code | +| `ND_CONNECTION_ID` | `conn` | `ConnectionID` | 35 | when there is a connection id for an inbound connection, this is the connection id | +| `ND_TRANSACTION_ID` | `transaction` | `TransactionID` | 36 | the transaction id (UUID) of all API requests | +| `ND_RESPONSE_SENT_BYTES` | `sent_bytes` | `ResponseSentBytes` | 37 | the bytes we sent to API responses | +| `ND_RESPONSE_SIZE_BYTES` | `size_bytes` | `ResponseSizeBytes` | 38 | the uncompressed bytes of the API responses | +| `ND_RESPONSE_PREP_TIME_USEC` | `prep_ut` | `ResponsePreparationTimeUsec` | 39 | the time needed to prepare a response | +| `ND_RESPONSE_SENT_TIME_USEC` | `sent_ut` | `ResponseSentTimeUsec` | 40 | the time needed to send a response | +| `ND_RESPONSE_TOTAL_TIME_USEC` | `total_ut` | `ResponseTotalTimeUsec` | 41 | the total time needed to complete a response | +| `ND_ALERT_ID` | `alert_id` | `AlertID` | 42 | the alert id this event is related to | +| `ND_ALERT_EVENT_ID` | `alert_event_id` | `AlertEventID` | 44 | a sequential number of the alert transition (per host) | +| `ND_ALERT_UNIQUE_ID` | `alert_unique_id` | `AlertUniqueID` | 43 | a sequential number of the alert transition (per alert) | +| `ND_ALERT_TRANSITION_ID` | `alert_transition_id` | `AlertTransitionID` | 45 | the unique UUID of this alert transition | +| `ND_ALERT_CONFIG` | `alert_config` | `AlertConfig` | 46 | the alert configuration hash (UUID) | +| `ND_ALERT_NAME` | `alert` | `AlertName` | 47 | the alert name | +| `ND_ALERT_CLASS` | `alert_class` | `AlertClass` | 48 | the alert classification | +| `ND_ALERT_COMPONENT` | `alert_component` | `AlertComponent` | 49 | the alert component | +| `ND_ALERT_TYPE` | `alert_type` | `AlertType` | 50 | the alert type | +| `ND_ALERT_EXEC` | `alert_exec` | `AlertExec` | 51 | the alert notification program | +| `ND_ALERT_RECIPIENT` | `alert_recipient` | `AlertRecipient` | 52 | the alert recipient(s) | +| `ND_ALERT_VALUE` | `alert_value` | `AlertValue` | 54 | the current alert value | +| `ND_ALERT_VALUE_OLD` | `alert_value_old` | `AlertOldValue` | 55 | the previous alert value | +| `ND_ALERT_STATUS` | `alert_status` | `AlertStatus` | 56 | the current alert status | +| `ND_ALERT_STATUS_OLD` | `alert_value_old` | `AlertOldStatus` | 57 | the previous alert status | +| `ND_ALERT_UNITS` | `alert_units` | `AlertUnits` | 59 | the units of the alert | +| `ND_ALERT_SUMMARY` | `alert_summary` | `AlertSummary` | 60 | the summary text of the alert | +| `ND_ALERT_INFO` | `alert_info` | `AlertInfo` | 61 | the info text of the alert | +| `ND_ALERT_DURATION` | `alert_duration` | `AlertDuration` | 53 | the duration the alert was in its previous state | +| `ND_ALERT_NOTIFICATION_TIMESTAMP_USEC` | `alert_notification_timestamp` | `AlertNotificationTimeUsec` | 62 | the timestamp the notification delivery is scheduled | +| `ND_REQUEST` | `request` | `Request` | 63 | the full request during which the event happened | +| `MESSAGE` | `msg` | `Message` | 64 | the event message | + +For `wel` (Windows Event Logs), all logs have an array of 64 fields strings, and their index number provides their meaning. +For `etw` (Event Tracing for Windows), Netdata logs in a structured way, and field names are available.
    @@ -212,3 +222,117 @@ journalctl -u netdata --namespace=netdata # All netdata logs, the newest entries are displayed first journalctl -u netdata --namespace=netdata -r ``` + +## Using Event Tracing for Windows (ETW) + +ETW requires the publisher `Netdata` to be registered. Our Windows installer does this automatically. + +Registering the publisher is done via a manifest (`%SystemRoot%\System32\wevt_netdata_manifest.xml`) +and its messages resources DLL (`%SystemRoot%\System32\wevt_netdata.dll`). + +If needed, the publisher can be registered and unregistered manually using these commands: + +```bat +REM register the Netdata publisher +wevtutil im "%SystemRoot%\System32\wevt_netdata_manifest.xml" "/mf:%SystemRoot%\System32\wevt_netdata.dll" "/rf:%SystemRoot%\System32\wevt_netdata.dll" + +REM unregister the Netdata publisher +wevtutil um "%SystemRoot%\System32\wevt_netdata_manifest.xml" +``` + +The structure of the logs are as follows: + + - Publisher `Netdata` + - Channel `Netdata/Daemon`: general messages about the Netdata service + - Channel `Netdata/Collector`: general messages about Netdata external plugins + - Channel `Netdata/Health`: alert transitions and general messages generated by Netdata's health engine + - Channel `Netdata/Access`: all accesses to Netdata APIs + - Channel `Netdata/Aclk`: for cloud connectivity tracing (disabled by default) + +Retention can be configured per Channel via the Event Viewer. Netdata does not set a default, so the system default is used. + +> **IMPORTANT**
    +> Event Tracing for Windows (ETW) does not allow logging the percentage character `%`. +> The `%` followed by a number, is recursively used for fields expansion and ETW has not +> provided any way to escape the character for preventing further expansion.
    +>
    +> To work around this limitation, Netdata replaces all `%` which are followed by a number, with `â„…` +> (the Unicode character `care of`). Visually, they look similar, but when copying IPv6 addresses +> or URLs from the logs, you have to be careful to manually replace `â„…` with `%` before using them. + +## Using Windows Event Logs (WEL) + +WEL has a different logs structure and unfortunately WEL and ETW need to use different names if they are to be used +concurrently. + +For WEL, Netdata logs as follows: + + - Channel `NetdataWEL` (unfortunately `Netdata` cannot be used, it conflicts with the ETW Publisher name) + - Publisher `NetdataDaemon`: general messages about the Netdata service + - Publisher `NetdataCollector`: general messages about Netdata external plugins + - Publisher `NetdataHealth`: alert transitions and general messages generated by Netdata's health engine + - Publisher `NetdataAccess`: all accesses to Netdata APIs + - Publisher `NetdataAclk`: for cloud connectivity tracing (disabled by default) + +Publishers must have unique names system-wide, so we had to prefix them with `Netdata`. + +Retention can be configured per Publisher via the Event Viewer or the Registry. +Netdata sets by default 20MiB for all of them, except `NetdataAclk` (5MiB) and `NetdataAccess` (35MiB), +for a total of 100MiB. + +For WEL some registry entries are needed. Netdata automatically takes care of them when it starts. + +WEL does not have the problem ETW has with the percent character `%`, so Netdata logs it as-is. + +## Differences between ETW and WEL + +There are key differences between ETW and WEL. + +### Publishers and Providers +**Publishers** are collections of ETW Providers. A Publisher is implied by a manifest file, +each of which is considered a Publisher, and each manifest file can define multiple **Providers** in it. +Other than that there is no entity related to **Publishers** in the system. + +**Publishers** are not defined for WEL. + +**Providers** are the applications or modules logging. Provider names must be unique across the system, +for ETW and WEL together. + +To define a **Provider**: + +- ETW requires a **Publisher** manifest coupled with resources DLLs and must be registered + via `wevtutil` (handled by the Netdata Windows installer automatically). +- WEL requires some registry entries and a message resources DLL (handled by Netdata automatically on startup). + +The Provider appears as `Source` in the Event Viewer, for both WEL and ETW. + +### Channels +- **Channels** for WEL are collections of WEL Providers, (each WEL Provider is a single Stream of logs). +- **Channels** for ETW slice the logs of each Provider into multiple Streams. + +WEL Channels cannot have the same name as ETW Providers. This is why Netdata's ETW provider is +called `Netdata`, and WEL channel is called `NetdataWEL`. + +Despite the fact that ETW **Publishers** and WEL **Channels** are both collections of Providers, +they are not similar. In ETW a Publisher is a collection on the publisher's Providers, but in WEL +a Channel may include independent WEL Providers (e.g. the "Applications" Channel). Additionally, +WEL Channels cannot include ETW Providers. + +### Retention +Retention is always defined per Stream. + +- Retention in ETW is defined per ETW Channel (ETW Provider Stream). +- Retention in WEL is defined per WEL Provider (each WEL Provider is a single Stream). + +### Messages Formatting +- ETW supports recursive fields expansion, and therefore `%N` in fields is expanded recursively + (or replaced with an error message if expansion fails). Netdata replaces `%N` with `â„…N` to stop + recursive expansion (since `%N` cannot be logged otherwise). +- WEL performs a single field expansion, and therefore the `%` character in fields is never expanded. + +### Usability + +- ETW names all the fields and allows multiple datatypes per field, enabling log consumers to know + what each field means and its datatype. +- WEL uses a simple string table for fields, and consumers need to map these string fields based on + their index. diff --git a/src/libnetdata/log/log.c b/src/libnetdata/log/log.c deleted file mode 100644 index b7efa26464c4e4..00000000000000 --- a/src/libnetdata/log/log.c +++ /dev/null @@ -1,2555 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -// do not REMOVE this, it is used by systemd-journal includes to prevent saving the file, function, line of the -// source code that makes the calls, allowing our loggers to log the lines of source code that actually log -#define SD_JOURNAL_SUPPRESS_LOCATION - -#include "../libnetdata.h" - -#if defined(OS_WINDOWS) -#include -#endif - -#ifdef __FreeBSD__ -#include -#endif - -#ifdef __APPLE__ -#include -#endif - -#if !defined(ENABLE_SENTRY) && defined(HAVE_BACKTRACE) -#include -#endif - -#ifdef HAVE_SYSTEMD -#include -#endif - -const char *program_name = ""; -uint64_t debug_flags = 0; -int aclklog_enabled = 0; - -// ---------------------------------------------------------------------------- - -struct nd_log_source; -static bool nd_log_limit_reached(struct nd_log_source *source); - -// ---------------------------------------------------------------------------- - -void errno_clear(void) { - errno = 0; - -#if defined(OS_WINDOWS) - SetLastError(ERROR_SUCCESS); -#endif -} - -// ---------------------------------------------------------------------------- -// logging method - -typedef enum __attribute__((__packed__)) { - NDLM_DISABLED = 0, - NDLM_DEVNULL, - NDLM_DEFAULT, - NDLM_JOURNAL, - NDLM_SYSLOG, - NDLM_STDOUT, - NDLM_STDERR, - NDLM_FILE, -} ND_LOG_METHOD; - -static struct { - ND_LOG_METHOD method; - const char *name; -} nd_log_methods[] = { - { .method = NDLM_DISABLED, .name = "none" }, - { .method = NDLM_DEVNULL, .name = "/dev/null" }, - { .method = NDLM_DEFAULT, .name = "default" }, - { .method = NDLM_JOURNAL, .name = "journal" }, - { .method = NDLM_SYSLOG, .name = "syslog" }, - { .method = NDLM_STDOUT, .name = "stdout" }, - { .method = NDLM_STDERR, .name = "stderr" }, - { .method = NDLM_FILE, .name = "file" }, -}; - -static ND_LOG_METHOD nd_log_method2id(const char *method) { - if(!method || !*method) - return NDLM_DEFAULT; - - size_t entries = sizeof(nd_log_methods) / sizeof(nd_log_methods[0]); - for(size_t i = 0; i < entries ;i++) { - if(strcmp(nd_log_methods[i].name, method) == 0) - return nd_log_methods[i].method; - } - - return NDLM_FILE; -} - -static const char *nd_log_id2method(ND_LOG_METHOD method) { - size_t entries = sizeof(nd_log_methods) / sizeof(nd_log_methods[0]); - for(size_t i = 0; i < entries ;i++) { - if(method == nd_log_methods[i].method) - return nd_log_methods[i].name; - } - - return "unknown"; -} - -#define IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(ndlo) ((ndlo) == NDLM_JOURNAL || (ndlo) == NDLM_SYSLOG || (ndlo) == NDLM_STDERR) - -const char *nd_log_method_for_external_plugins(const char *s) { - if(s && *s) { - ND_LOG_METHOD method = nd_log_method2id(s); - if(IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(method)) - return nd_log_id2method(method); - } - - return nd_log_id2method(NDLM_STDERR); -} - -// ---------------------------------------------------------------------------- -// workaround strerror_r() - -#if defined(STRERROR_R_CHAR_P) -// GLIBC version of strerror_r -static const char *strerror_result(const char *a, const char *b) { (void)b; return a; } -#elif defined(HAVE_STRERROR_R) -// POSIX version of strerror_r -static const char *strerror_result(int a, const char *b) { (void)a; return b; } -#elif defined(HAVE_C__GENERIC) - -// what a trick! -// http://stackoverflow.com/questions/479207/function-overloading-in-c -static const char *strerror_result_int(int a, const char *b) { (void)a; return b; } -static const char *strerror_result_string(const char *a, const char *b) { (void)b; return a; } - -#define strerror_result(a, b) _Generic((a), \ - int: strerror_result_int, \ - char *: strerror_result_string \ - )(a, b) - -#else -#error "cannot detect the format of function strerror_r()" -#endif - -static const char *errno2str(int errnum, char *buf, size_t size) { - return strerror_result(strerror_r(errnum, buf, size), buf); -} - -// ---------------------------------------------------------------------------- -// facilities -// -// sys/syslog.h (Linux) -// sys/sys/syslog.h (FreeBSD) -// bsd/sys/syslog.h (darwin-xnu) - -static struct { - int facility; - const char *name; -} nd_log_facilities[] = { - { LOG_AUTH, "auth" }, - { LOG_AUTHPRIV, "authpriv" }, - { LOG_CRON, "cron" }, - { LOG_DAEMON, "daemon" }, - { LOG_FTP, "ftp" }, - { LOG_KERN, "kern" }, - { LOG_LPR, "lpr" }, - { LOG_MAIL, "mail" }, - { LOG_NEWS, "news" }, - { LOG_SYSLOG, "syslog" }, - { LOG_USER, "user" }, - { LOG_UUCP, "uucp" }, - { LOG_LOCAL0, "local0" }, - { LOG_LOCAL1, "local1" }, - { LOG_LOCAL2, "local2" }, - { LOG_LOCAL3, "local3" }, - { LOG_LOCAL4, "local4" }, - { LOG_LOCAL5, "local5" }, - { LOG_LOCAL6, "local6" }, - { LOG_LOCAL7, "local7" }, - -#ifdef __FreeBSD__ - { LOG_CONSOLE, "console" }, - { LOG_NTP, "ntp" }, - - // FreeBSD does not consider 'security' as deprecated. - { LOG_SECURITY, "security" }, -#else - // For all other O/S 'security' is mapped to 'auth'. - { LOG_AUTH, "security" }, -#endif - -#ifdef __APPLE__ - { LOG_INSTALL, "install" }, - { LOG_NETINFO, "netinfo" }, - { LOG_RAS, "ras" }, - { LOG_REMOTEAUTH, "remoteauth" }, - { LOG_LAUNCHD, "launchd" }, - -#endif -}; - -static int nd_log_facility2id(const char *facility) { - size_t entries = sizeof(nd_log_facilities) / sizeof(nd_log_facilities[0]); - for(size_t i = 0; i < entries ;i++) { - if(strcmp(nd_log_facilities[i].name, facility) == 0) - return nd_log_facilities[i].facility; - } - - return LOG_DAEMON; -} - -static const char *nd_log_id2facility(int facility) { - size_t entries = sizeof(nd_log_facilities) / sizeof(nd_log_facilities[0]); - for(size_t i = 0; i < entries ;i++) { - if(nd_log_facilities[i].facility == facility) - return nd_log_facilities[i].name; - } - - return "daemon"; -} - -// ---------------------------------------------------------------------------- -// priorities - -static struct { - ND_LOG_FIELD_PRIORITY priority; - const char *name; -} nd_log_priorities[] = { - { .priority = NDLP_EMERG, .name = "emergency" }, - { .priority = NDLP_EMERG, .name = "emerg" }, - { .priority = NDLP_ALERT, .name = "alert" }, - { .priority = NDLP_CRIT, .name = "critical" }, - { .priority = NDLP_CRIT, .name = "crit" }, - { .priority = NDLP_ERR, .name = "error" }, - { .priority = NDLP_ERR, .name = "err" }, - { .priority = NDLP_WARNING, .name = "warning" }, - { .priority = NDLP_WARNING, .name = "warn" }, - { .priority = NDLP_NOTICE, .name = "notice" }, - { .priority = NDLP_INFO, .name = NDLP_INFO_STR }, - { .priority = NDLP_DEBUG, .name = "debug" }, -}; - -int nd_log_priority2id(const char *priority) { - size_t entries = sizeof(nd_log_priorities) / sizeof(nd_log_priorities[0]); - for(size_t i = 0; i < entries ;i++) { - if(strcmp(nd_log_priorities[i].name, priority) == 0) - return nd_log_priorities[i].priority; - } - - return NDLP_INFO; -} - -const char *nd_log_id2priority(ND_LOG_FIELD_PRIORITY priority) { - size_t entries = sizeof(nd_log_priorities) / sizeof(nd_log_priorities[0]); - for(size_t i = 0; i < entries ;i++) { - if(priority == nd_log_priorities[i].priority) - return nd_log_priorities[i].name; - } - - return NDLP_INFO_STR; -} - -// ---------------------------------------------------------------------------- -// log sources - -const char *nd_log_sources[] = { - [NDLS_UNSET] = "UNSET", - [NDLS_ACCESS] = "access", - [NDLS_ACLK] = "aclk", - [NDLS_COLLECTORS] = "collector", - [NDLS_DAEMON] = "daemon", - [NDLS_HEALTH] = "health", - [NDLS_DEBUG] = "debug", -}; - -size_t nd_log_source2id(const char *source, ND_LOG_SOURCES def) { - size_t entries = sizeof(nd_log_sources) / sizeof(nd_log_sources[0]); - for(size_t i = 0; i < entries ;i++) { - if(strcmp(nd_log_sources[i], source) == 0) - return i; - } - - return def; -} - - -static const char *nd_log_id2source(ND_LOG_SOURCES source) { - size_t entries = sizeof(nd_log_sources) / sizeof(nd_log_sources[0]); - if(source < entries) - return nd_log_sources[source]; - - return nd_log_sources[NDLS_COLLECTORS]; -} - -// ---------------------------------------------------------------------------- -// log output formats - -typedef enum __attribute__((__packed__)) { - NDLF_JOURNAL, - NDLF_LOGFMT, - NDLF_JSON, -} ND_LOG_FORMAT; - -static struct { - ND_LOG_FORMAT format; - const char *name; -} nd_log_formats[] = { - { .format = NDLF_JOURNAL, .name = "journal" }, - { .format = NDLF_LOGFMT, .name = "logfmt" }, - { .format = NDLF_JSON, .name = "json" }, -}; - -static ND_LOG_FORMAT nd_log_format2id(const char *format) { - if(!format || !*format) - return NDLF_LOGFMT; - - size_t entries = sizeof(nd_log_formats) / sizeof(nd_log_formats[0]); - for(size_t i = 0; i < entries ;i++) { - if(strcmp(nd_log_formats[i].name, format) == 0) - return nd_log_formats[i].format; - } - - return NDLF_LOGFMT; -} - -static const char *nd_log_id2format(ND_LOG_FORMAT format) { - size_t entries = sizeof(nd_log_formats) / sizeof(nd_log_formats[0]); - for(size_t i = 0; i < entries ;i++) { - if(format == nd_log_formats[i].format) - return nd_log_formats[i].name; - } - - return "logfmt"; -} - -// ---------------------------------------------------------------------------- -// format dates - -void log_date(char *buffer, size_t len, time_t now) { - if(unlikely(!buffer || !len)) - return; - - time_t t = now; - struct tm *tmp, tmbuf; - - tmp = localtime_r(&t, &tmbuf); - - if (unlikely(!tmp)) { - buffer[0] = '\0'; - return; - } - - if (unlikely(strftime(buffer, len, "%Y-%m-%d %H:%M:%S", tmp) == 0)) - buffer[0] = '\0'; - - buffer[len - 1] = '\0'; -} - -// ---------------------------------------------------------------------------- - -struct nd_log_limit { - usec_t started_monotonic_ut; - uint32_t counter; - uint32_t prevented; - - uint32_t throttle_period; - uint32_t logs_per_period; - uint32_t logs_per_period_backup; -}; - -#define ND_LOG_LIMITS_DEFAULT (struct nd_log_limit){ .logs_per_period = ND_LOG_DEFAULT_THROTTLE_LOGS, .logs_per_period_backup = ND_LOG_DEFAULT_THROTTLE_LOGS, .throttle_period = ND_LOG_DEFAULT_THROTTLE_PERIOD, } -#define ND_LOG_LIMITS_UNLIMITED (struct nd_log_limit){ .logs_per_period = 0, .logs_per_period_backup = 0, .throttle_period = 0, } - -struct nd_log_source { - SPINLOCK spinlock; - ND_LOG_METHOD method; - ND_LOG_FORMAT format; - const char *filename; - int fd; - FILE *fp; - - ND_LOG_FIELD_PRIORITY min_priority; - const char *pending_msg; - struct nd_log_limit limits; -}; - -static struct { - nd_uuid_t invocation_id; - - ND_LOG_SOURCES overwrite_process_source; - - struct nd_log_source sources[_NDLS_MAX]; - - struct { - bool initialized; - } journal; - - struct { - bool initialized; - int fd; - char filename[FILENAME_MAX + 1]; - } journal_direct; - - struct { - bool initialized; - int facility; - } syslog; - - struct { - SPINLOCK spinlock; - bool initialized; - } std_output; - - struct { - SPINLOCK spinlock; - bool initialized; - } std_error; - -} nd_log = { - .overwrite_process_source = 0, - .journal = { - .initialized = false, - }, - .journal_direct = { - .initialized = false, - .fd = -1, - }, - .syslog = { - .initialized = false, - .facility = LOG_DAEMON, - }, - .std_output = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .initialized = false, - }, - .std_error = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .initialized = false, - }, - .sources = { - [NDLS_UNSET] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_DISABLED, - .format = NDLF_JOURNAL, - .filename = NULL, - .fd = -1, - .fp = NULL, - .min_priority = NDLP_EMERG, - .limits = ND_LOG_LIMITS_UNLIMITED, - }, - [NDLS_ACCESS] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_DEFAULT, - .format = NDLF_LOGFMT, - .filename = LOG_DIR "/access.log", - .fd = -1, - .fp = NULL, - .min_priority = NDLP_DEBUG, - .limits = ND_LOG_LIMITS_UNLIMITED, - }, - [NDLS_ACLK] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_FILE, - .format = NDLF_LOGFMT, - .filename = LOG_DIR "/aclk.log", - .fd = -1, - .fp = NULL, - .min_priority = NDLP_DEBUG, - .limits = ND_LOG_LIMITS_UNLIMITED, - }, - [NDLS_COLLECTORS] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_DEFAULT, - .format = NDLF_LOGFMT, - .filename = LOG_DIR "/collector.log", - .fd = STDERR_FILENO, - .fp = NULL, - .min_priority = NDLP_INFO, - .limits = ND_LOG_LIMITS_DEFAULT, - }, - [NDLS_DEBUG] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_DISABLED, - .format = NDLF_LOGFMT, - .filename = LOG_DIR "/debug.log", - .fd = STDOUT_FILENO, - .fp = NULL, - .min_priority = NDLP_DEBUG, - .limits = ND_LOG_LIMITS_UNLIMITED, - }, - [NDLS_DAEMON] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_DEFAULT, - .filename = LOG_DIR "/daemon.log", - .format = NDLF_LOGFMT, - .fd = -1, - .fp = NULL, - .min_priority = NDLP_INFO, - .limits = ND_LOG_LIMITS_DEFAULT, - }, - [NDLS_HEALTH] = { - .spinlock = NETDATA_SPINLOCK_INITIALIZER, - .method = NDLM_DEFAULT, - .format = NDLF_LOGFMT, - .filename = LOG_DIR "/health.log", - .fd = -1, - .fp = NULL, - .min_priority = NDLP_DEBUG, - .limits = ND_LOG_LIMITS_UNLIMITED, - }, - }, -}; - -__attribute__((constructor)) void initialize_invocation_id(void) { - // check for a NETDATA_INVOCATION_ID - if(uuid_parse_flexi(getenv("NETDATA_INVOCATION_ID"), nd_log.invocation_id) != 0) { - // not found, check for systemd set INVOCATION_ID - if(uuid_parse_flexi(getenv("INVOCATION_ID"), nd_log.invocation_id) != 0) { - // not found, generate a new one - uuid_generate_random(nd_log.invocation_id); - } - } - - char uuid[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(nd_log.invocation_id, uuid); - nd_setenv("NETDATA_INVOCATION_ID", uuid, 1); -} - -int nd_log_health_fd(void) { - if(nd_log.sources[NDLS_HEALTH].method == NDLM_FILE && nd_log.sources[NDLS_HEALTH].fd != -1) - return nd_log.sources[NDLS_HEALTH].fd; - - return STDERR_FILENO; -} - -int nd_log_collectors_fd(void) { - if(nd_log.sources[NDLS_COLLECTORS].method == NDLM_FILE && nd_log.sources[NDLS_COLLECTORS].fd != -1) - return nd_log.sources[NDLS_COLLECTORS].fd; - - return STDERR_FILENO; -} - -void nd_log_set_user_settings(ND_LOG_SOURCES source, const char *setting) { - char buf[FILENAME_MAX + 100]; - if(setting && *setting) - strncpyz(buf, setting, sizeof(buf) - 1); - else - buf[0] = '\0'; - - struct nd_log_source *ls = &nd_log.sources[source]; - char *output = strrchr(buf, '@'); - - if(!output) - // all of it is the output - output = buf; - else { - // we found an '@', the next char is the output - *output = '\0'; - output++; - - // parse the other params - char *remaining = buf; - while(remaining) { - char *value = strsep_skip_consecutive_separators(&remaining, ","); - if (!value || !*value) continue; - - char *name = strsep_skip_consecutive_separators(&value, "="); - if (!name || !*name) continue; - - if(strcmp(name, "logfmt") == 0) - ls->format = NDLF_LOGFMT; - else if(strcmp(name, "json") == 0) - ls->format = NDLF_JSON; - else if(strcmp(name, "journal") == 0) - ls->format = NDLF_JOURNAL; - else if(strcmp(name, "level") == 0 && value && *value) - ls->min_priority = nd_log_priority2id(value); - else if(strcmp(name, "protection") == 0 && value && *value) { - if(strcmp(value, "off") == 0 || strcmp(value, "none") == 0) { - ls->limits = ND_LOG_LIMITS_UNLIMITED; - ls->limits.counter = 0; - ls->limits.prevented = 0; - } - else { - ls->limits = ND_LOG_LIMITS_DEFAULT; - - char *slash = strchr(value, '/'); - if(slash) { - *slash = '\0'; - slash++; - ls->limits.logs_per_period = ls->limits.logs_per_period_backup = str2u(value); - - int period; - if(!duration_parse_seconds(slash, &period)) { - nd_log(NDLS_DAEMON, NDLP_ERR, "Error while parsing period '%s'", slash); - period = ND_LOG_DEFAULT_THROTTLE_PERIOD; - } - - ls->limits.throttle_period = period; - } - else { - ls->limits.logs_per_period = ls->limits.logs_per_period_backup = str2u(value); - ls->limits.throttle_period = ND_LOG_DEFAULT_THROTTLE_PERIOD; - } - } - } - else - nd_log(NDLS_DAEMON, NDLP_ERR, - "Error while parsing configuration of log source '%s'. " - "In config '%s', '%s' is not understood.", - nd_log_id2source(source), setting, name); - } - } - - if(!output || !*output || strcmp(output, "none") == 0 || strcmp(output, "off") == 0) { - ls->method = NDLM_DISABLED; - ls->filename = "/dev/null"; - } - else if(strcmp(output, "journal") == 0) { - ls->method = NDLM_JOURNAL; - ls->filename = NULL; - } - else if(strcmp(output, "syslog") == 0) { - ls->method = NDLM_SYSLOG; - ls->filename = NULL; - } - else if(strcmp(output, "/dev/null") == 0) { - ls->method = NDLM_DEVNULL; - ls->filename = "/dev/null"; - } - else if(strcmp(output, "system") == 0) { - if(ls->fd == STDERR_FILENO) { - ls->method = NDLM_STDERR; - ls->filename = NULL; - ls->fd = STDERR_FILENO; - } - else { - ls->method = NDLM_STDOUT; - ls->filename = NULL; - ls->fd = STDOUT_FILENO; - } - } - else if(strcmp(output, "stderr") == 0) { - ls->method = NDLM_STDERR; - ls->filename = NULL; - ls->fd = STDERR_FILENO; - } - else if(strcmp(output, "stdout") == 0) { - ls->method = NDLM_STDOUT; - ls->filename = NULL; - ls->fd = STDOUT_FILENO; - } - else { - ls->method = NDLM_FILE; - ls->filename = strdupz(output); - } - -#if defined(NETDATA_INTERNAL_CHECKS) || defined(NETDATA_DEV_MODE) - ls->min_priority = NDLP_DEBUG; -#endif - - if(source == NDLS_COLLECTORS) { - // set the method for the collector processes we will spawn - - ND_LOG_METHOD method; - ND_LOG_FORMAT format = ls->format; - ND_LOG_FIELD_PRIORITY priority = ls->min_priority; - - if(ls->method == NDLM_SYSLOG || ls->method == NDLM_JOURNAL) - method = ls->method; - else - method = NDLM_STDERR; - - nd_setenv("NETDATA_LOG_METHOD", nd_log_id2method(method), 1); - nd_setenv("NETDATA_LOG_FORMAT", nd_log_id2format(format), 1); - nd_setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1); - } -} - -void nd_log_set_priority_level(const char *setting) { - if(!setting || !*setting) - setting = "info"; - - ND_LOG_FIELD_PRIORITY priority = nd_log_priority2id(setting); - -#if defined(NETDATA_INTERNAL_CHECKS) || defined(NETDATA_DEV_MODE) - priority = NDLP_DEBUG; -#endif - - for (size_t i = 0; i < _NDLS_MAX; i++) { - if (i != NDLS_DEBUG) - nd_log.sources[i].min_priority = priority; - } - - // the right one - nd_setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1); -} - -void nd_log_set_facility(const char *facility) { - if(!facility || !*facility) - facility = "daemon"; - - nd_log.syslog.facility = nd_log_facility2id(facility); - nd_setenv("NETDATA_SYSLOG_FACILITY", nd_log_id2facility(nd_log.syslog.facility), 1); -} - -void nd_log_set_flood_protection(size_t logs, time_t period) { - nd_log.sources[NDLS_DAEMON].limits.logs_per_period = - nd_log.sources[NDLS_DAEMON].limits.logs_per_period_backup; - nd_log.sources[NDLS_COLLECTORS].limits.logs_per_period = - nd_log.sources[NDLS_COLLECTORS].limits.logs_per_period_backup = logs; - - nd_log.sources[NDLS_DAEMON].limits.throttle_period = - nd_log.sources[NDLS_COLLECTORS].limits.throttle_period = period; - - char buf[100]; - snprintfz(buf, sizeof(buf), "%" PRIu64, (uint64_t )period); - nd_setenv("NETDATA_ERRORS_THROTTLE_PERIOD", buf, 1); - snprintfz(buf, sizeof(buf), "%" PRIu64, (uint64_t )logs); - nd_setenv("NETDATA_ERRORS_PER_PERIOD", buf, 1); -} - -static bool nd_log_journal_systemd_init(void) { -#ifdef HAVE_SYSTEMD - nd_log.journal.initialized = true; -#else - nd_log.journal.initialized = false; -#endif - - return nd_log.journal.initialized; -} - -static void nd_log_journal_direct_set_env(void) { - if(nd_log.sources[NDLS_COLLECTORS].method == NDLM_JOURNAL) - nd_setenv("NETDATA_SYSTEMD_JOURNAL_PATH", nd_log.journal_direct.filename, 1); -} - -static bool nd_log_journal_direct_init(const char *path) { - if(nd_log.journal_direct.initialized) { - nd_log_journal_direct_set_env(); - return true; - } - - int fd; - char filename[FILENAME_MAX + 1]; - if(!is_path_unix_socket(path)) { - - journal_construct_path(filename, sizeof(filename), netdata_configured_host_prefix, "netdata"); - if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) { - - journal_construct_path(filename, sizeof(filename), netdata_configured_host_prefix, NULL); - if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) { - - journal_construct_path(filename, sizeof(filename), NULL, "netdata"); - if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) { - - journal_construct_path(filename, sizeof(filename), NULL, NULL); - if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) - return false; - } - } - } - } - else { - snprintfz(filename, sizeof(filename), "%s", path); - fd = journal_direct_fd(filename); - } - - if(fd < 0) - return false; - - nd_log.journal_direct.fd = fd; - nd_log.journal_direct.initialized = true; - - strncpyz(nd_log.journal_direct.filename, filename, sizeof(nd_log.journal_direct.filename) - 1); - nd_log_journal_direct_set_env(); - - return true; -} - -static void nd_log_syslog_init() { - if(nd_log.syslog.initialized) - return; - - openlog(program_name, LOG_PID, nd_log.syslog.facility); - nd_log.syslog.initialized = true; -} - -void nd_log_initialize_for_external_plugins(const char *name) { - // if we don't run under Netdata, log to stderr, - // otherwise, use the logging method Netdata wants us to use. - nd_setenv("NETDATA_LOG_METHOD", "stderr", 0); - nd_setenv("NETDATA_LOG_FORMAT", "logfmt", 0); - - nd_log.overwrite_process_source = NDLS_COLLECTORS; - program_name = name; - - for(size_t i = 0; i < _NDLS_MAX ;i++) { - nd_log.sources[i].method = STDERR_FILENO; - nd_log.sources[i].fd = -1; - nd_log.sources[i].fp = NULL; - } - - nd_log_set_priority_level(getenv("NETDATA_LOG_LEVEL")); - nd_log_set_facility(getenv("NETDATA_SYSLOG_FACILITY")); - - time_t period = 1200; - size_t logs = 200; - const char *s = getenv("NETDATA_ERRORS_THROTTLE_PERIOD"); - if(s && *s >= '0' && *s <= '9') { - period = str2l(s); - if(period < 0) period = 0; - } - - s = getenv("NETDATA_ERRORS_PER_PERIOD"); - if(s && *s >= '0' && *s <= '9') - logs = str2u(s); - - nd_log_set_flood_protection(logs, period); - - if(!netdata_configured_host_prefix) { - s = getenv("NETDATA_HOST_PREFIX"); - if(s && *s) - netdata_configured_host_prefix = (char *)s; - } - - ND_LOG_METHOD method = nd_log_method2id(getenv("NETDATA_LOG_METHOD")); - ND_LOG_FORMAT format = nd_log_format2id(getenv("NETDATA_LOG_FORMAT")); - - if(!IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(method)) { - if(is_stderr_connected_to_journal()) { - nd_log(NDLS_COLLECTORS, NDLP_WARNING, "NETDATA_LOG_METHOD is not set. Using journal."); - method = NDLM_JOURNAL; - } - else { - nd_log(NDLS_COLLECTORS, NDLP_WARNING, "NETDATA_LOG_METHOD is not set. Using stderr."); - method = NDLM_STDERR; - } - } - - switch(method) { - case NDLM_JOURNAL: - if(!nd_log_journal_direct_init(getenv("NETDATA_SYSTEMD_JOURNAL_PATH")) || - !nd_log_journal_direct_init(NULL) || !nd_log_journal_systemd_init()) { - nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to initialize journal. Using stderr."); - method = NDLM_STDERR; - } - break; - - case NDLM_SYSLOG: - nd_log_syslog_init(); - break; - - default: - method = NDLM_STDERR; - break; - } - - for(size_t i = 0; i < _NDLS_MAX ;i++) { - nd_log.sources[i].method = method; - nd_log.sources[i].format = format; - nd_log.sources[i].fd = -1; - nd_log.sources[i].fp = NULL; - } - -// nd_log(NDLS_COLLECTORS, NDLP_NOTICE, "FINAL_LOG_METHOD: %s", nd_log_id2method(method)); -} - -static bool nd_log_replace_existing_fd(struct nd_log_source *e, int new_fd) { - if(new_fd == -1 || e->fd == -1 || - (e->fd == STDOUT_FILENO && nd_log.std_output.initialized) || - (e->fd == STDERR_FILENO && nd_log.std_error.initialized)) - return false; - - if(new_fd != e->fd) { - int t = dup2(new_fd, e->fd); - - bool ret = true; - if (t == -1) { - netdata_log_error("Cannot dup2() new fd %d to old fd %d for '%s'", new_fd, e->fd, e->filename); - ret = false; - } - else - close(new_fd); - - if(e->fd == STDOUT_FILENO) - nd_log.std_output.initialized = true; - else if(e->fd == STDERR_FILENO) - nd_log.std_error.initialized = true; - - return ret; - } - - return false; -} - -static void nd_log_open(struct nd_log_source *e, ND_LOG_SOURCES source) { - if(e->method == NDLM_DEFAULT) - nd_log_set_user_settings(source, e->filename); - - if((e->method == NDLM_FILE && !e->filename) || - (e->method == NDLM_DEVNULL && e->fd == -1)) - e->method = NDLM_DISABLED; - - if(e->fp) - fflush(e->fp); - - switch(e->method) { - case NDLM_SYSLOG: - nd_log_syslog_init(); - break; - - case NDLM_JOURNAL: - nd_log_journal_direct_init(NULL); - nd_log_journal_systemd_init(); - break; - - case NDLM_STDOUT: - e->fp = stdout; - e->fd = STDOUT_FILENO; - break; - - case NDLM_DISABLED: - break; - - case NDLM_DEFAULT: - case NDLM_STDERR: - e->method = NDLM_STDERR; - e->fp = stderr; - e->fd = STDERR_FILENO; - break; - - case NDLM_DEVNULL: - case NDLM_FILE: { - int fd = open(e->filename, O_WRONLY | O_APPEND | O_CREAT, 0664); - if(fd == -1) { - if(e->fd != STDOUT_FILENO && e->fd != STDERR_FILENO) { - e->fd = STDERR_FILENO; - e->method = NDLM_STDERR; - netdata_log_error("Cannot open log file '%s'. Falling back to stderr.", e->filename); - } - else - netdata_log_error("Cannot open log file '%s'. Leaving fd %d as-is.", e->filename, e->fd); - } - else { - if (!nd_log_replace_existing_fd(e, fd)) { - if(e->fd == STDOUT_FILENO || e->fd == STDERR_FILENO) { - if(e->fd == STDOUT_FILENO) - e->method = NDLM_STDOUT; - else if(e->fd == STDERR_FILENO) - e->method = NDLM_STDERR; - - // we have dup2() fd, so we can close the one we opened - if(fd != STDOUT_FILENO && fd != STDERR_FILENO) - close(fd); - } - else - e->fd = fd; - } - } - - // at this point we have e->fd set properly - - if(e->fd == STDOUT_FILENO) - e->fp = stdout; - else if(e->fd == STDERR_FILENO) - e->fp = stderr; - - if(!e->fp) { - e->fp = fdopen(e->fd, "a"); - if (!e->fp) { - netdata_log_error("Cannot fdopen() fd %d ('%s')", e->fd, e->filename); - - if(e->fd != STDOUT_FILENO && e->fd != STDERR_FILENO) - close(e->fd); - - e->fp = stderr; - e->fd = STDERR_FILENO; - } - } - else { - if (setvbuf(e->fp, NULL, _IOLBF, 0) != 0) - netdata_log_error("Cannot set line buffering on fd %d ('%s')", e->fd, e->filename); - } - } - break; - } -} - -static void nd_log_stdin_init(int fd, const char *filename) { - int f = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0664); - if(f == -1) - return; - - if(f != fd) { - dup2(f, fd); - close(f); - } -} - -void nd_log_initialize(void) { - nd_log_stdin_init(STDIN_FILENO, "/dev/null"); - - for(size_t i = 0 ; i < _NDLS_MAX ; i++) - nd_log_open(&nd_log.sources[i], i); -} - -void nd_log_reopen_log_files(bool log) { - if(log) - netdata_log_info("Reopening all log files."); - - nd_log.std_output.initialized = false; - nd_log.std_error.initialized = false; - nd_log_initialize(); - - if(log) - netdata_log_info("Log files re-opened."); -} - -void nd_log_reopen_log_files_for_spawn_server(void) { - if(nd_log.syslog.initialized) { - closelog(); - nd_log.syslog.initialized = false; - nd_log_syslog_init(); - } - - if(nd_log.journal_direct.initialized) { - close(nd_log.journal_direct.fd); - nd_log.journal_direct.fd = -1; - nd_log.journal_direct.initialized = false; - nd_log_journal_direct_init(NULL); - } - - nd_log.sources[NDLS_UNSET].method = NDLM_DISABLED; - nd_log.sources[NDLS_ACCESS].method = NDLM_DISABLED; - nd_log.sources[NDLS_ACLK].method = NDLM_DISABLED; - nd_log.sources[NDLS_DEBUG].method = NDLM_DISABLED; - nd_log.sources[NDLS_HEALTH].method = NDLM_DISABLED; - nd_log_reopen_log_files(false); -} - -void chown_open_file(int fd, uid_t uid, gid_t gid) { - if(fd == -1) return; - - struct stat buf; - - if(fstat(fd, &buf) == -1) { - netdata_log_error("Cannot fstat() fd %d", fd); - return; - } - - if((buf.st_uid != uid || buf.st_gid != gid) && S_ISREG(buf.st_mode)) { - if(fchown(fd, uid, gid) == -1) - netdata_log_error("Cannot fchown() fd %d.", fd); - } -} - -void nd_log_chown_log_files(uid_t uid, gid_t gid) { - for(size_t i = 0 ; i < _NDLS_MAX ; i++) { - if(nd_log.sources[i].fd != -1 && nd_log.sources[i].fd != STDIN_FILENO) - chown_open_file(nd_log.sources[i].fd, uid, gid); - } -} - -// ---------------------------------------------------------------------------- -// annotators -struct log_field; -static void errno_annotator(BUFFER *wb, const char *key, struct log_field *lf); -static void priority_annotator(BUFFER *wb, const char *key, struct log_field *lf); -static void timestamp_usec_annotator(BUFFER *wb, const char *key, struct log_field *lf); - -#if defined(OS_WINDOWS) -static void winerror_annotator(BUFFER *wb, const char *key, struct log_field *lf); -#endif - -// ---------------------------------------------------------------------------- - -typedef void (*annotator_t)(BUFFER *wb, const char *key, struct log_field *lf); - -struct log_field { - const char *journal; - const char *logfmt; - annotator_t logfmt_annotator; - struct log_stack_entry entry; -}; - -#define THREAD_LOG_STACK_MAX 50 - -static __thread struct log_stack_entry *thread_log_stack_base[THREAD_LOG_STACK_MAX]; -static __thread size_t thread_log_stack_next = 0; - -static __thread struct log_field thread_log_fields[_NDF_MAX] = { - // THE ORDER DEFINES THE ORDER FIELDS WILL APPEAR IN logfmt - - [NDF_STOP] = { // processing will not stop on this - so it is ok to be first - .journal = NULL, - .logfmt = NULL, - .logfmt_annotator = NULL, - }, - [NDF_TIMESTAMP_REALTIME_USEC] = { - .journal = NULL, - .logfmt = "time", - .logfmt_annotator = timestamp_usec_annotator, - }, - [NDF_SYSLOG_IDENTIFIER] = { - .journal = "SYSLOG_IDENTIFIER", // standard journald field - .logfmt = "comm", - }, - [NDF_LOG_SOURCE] = { - .journal = "ND_LOG_SOURCE", - .logfmt = "source", - }, - [NDF_PRIORITY] = { - .journal = "PRIORITY", // standard journald field - .logfmt = "level", - .logfmt_annotator = priority_annotator, - }, - [NDF_ERRNO] = { - .journal = "ERRNO", // standard journald field - .logfmt = "errno", - .logfmt_annotator = errno_annotator, - }, -#if defined(OS_WINDOWS) - [NDF_WINERROR] = { - .journal = "WINERROR", - .logfmt = "winerror", - .logfmt_annotator = winerror_annotator, - }, -#endif - [NDF_INVOCATION_ID] = { - .journal = "INVOCATION_ID", // standard journald field - .logfmt = NULL, - }, - [NDF_LINE] = { - .journal = "CODE_LINE", // standard journald field - .logfmt = NULL, - }, - [NDF_FILE] = { - .journal = "CODE_FILE", // standard journald field - .logfmt = NULL, - }, - [NDF_FUNC] = { - .journal = "CODE_FUNC", // standard journald field - .logfmt = NULL, - }, - [NDF_TID] = { - .journal = "TID", // standard journald field - .logfmt = "tid", - }, - [NDF_THREAD_TAG] = { - .journal = "THREAD_TAG", - .logfmt = "thread", - }, - [NDF_MESSAGE_ID] = { - .journal = "MESSAGE_ID", - .logfmt = "msg_id", - }, - [NDF_MODULE] = { - .journal = "ND_MODULE", - .logfmt = "module", - }, - [NDF_NIDL_NODE] = { - .journal = "ND_NIDL_NODE", - .logfmt = "node", - }, - [NDF_NIDL_INSTANCE] = { - .journal = "ND_NIDL_INSTANCE", - .logfmt = "instance", - }, - [NDF_NIDL_CONTEXT] = { - .journal = "ND_NIDL_CONTEXT", - .logfmt = "context", - }, - [NDF_NIDL_DIMENSION] = { - .journal = "ND_NIDL_DIMENSION", - .logfmt = "dimension", - }, - [NDF_SRC_TRANSPORT] = { - .journal = "ND_SRC_TRANSPORT", - .logfmt = "src_transport", - }, - [NDF_ACCOUNT_ID] = { - .journal = "ND_ACCOUNT_ID", - .logfmt = "account", - }, - [NDF_USER_NAME] = { - .journal = "ND_USER_NAME", - .logfmt = "user", - }, - [NDF_USER_ROLE] = { - .journal = "ND_USER_ROLE", - .logfmt = "role", - }, - [NDF_USER_ACCESS] = { - .journal = "ND_USER_PERMISSIONS", - .logfmt = "permissions", - }, - [NDF_SRC_IP] = { - .journal = "ND_SRC_IP", - .logfmt = "src_ip", - }, - [NDF_SRC_FORWARDED_HOST] = { - .journal = "ND_SRC_FORWARDED_HOST", - .logfmt = "src_forwarded_host", - }, - [NDF_SRC_FORWARDED_FOR] = { - .journal = "ND_SRC_FORWARDED_FOR", - .logfmt = "src_forwarded_for", - }, - [NDF_SRC_PORT] = { - .journal = "ND_SRC_PORT", - .logfmt = "src_port", - }, - [NDF_SRC_CAPABILITIES] = { - .journal = "ND_SRC_CAPABILITIES", - .logfmt = "src_capabilities", - }, - [NDF_DST_TRANSPORT] = { - .journal = "ND_DST_TRANSPORT", - .logfmt = "dst_transport", - }, - [NDF_DST_IP] = { - .journal = "ND_DST_IP", - .logfmt = "dst_ip", - }, - [NDF_DST_PORT] = { - .journal = "ND_DST_PORT", - .logfmt = "dst_port", - }, - [NDF_DST_CAPABILITIES] = { - .journal = "ND_DST_CAPABILITIES", - .logfmt = "dst_capabilities", - }, - [NDF_REQUEST_METHOD] = { - .journal = "ND_REQUEST_METHOD", - .logfmt = "req_method", - }, - [NDF_RESPONSE_CODE] = { - .journal = "ND_RESPONSE_CODE", - .logfmt = "code", - }, - [NDF_CONNECTION_ID] = { - .journal = "ND_CONNECTION_ID", - .logfmt = "conn", - }, - [NDF_TRANSACTION_ID] = { - .journal = "ND_TRANSACTION_ID", - .logfmt = "transaction", - }, - [NDF_RESPONSE_SENT_BYTES] = { - .journal = "ND_RESPONSE_SENT_BYTES", - .logfmt = "sent_bytes", - }, - [NDF_RESPONSE_SIZE_BYTES] = { - .journal = "ND_RESPONSE_SIZE_BYTES", - .logfmt = "size_bytes", - }, - [NDF_RESPONSE_PREPARATION_TIME_USEC] = { - .journal = "ND_RESPONSE_PREP_TIME_USEC", - .logfmt = "prep_ut", - }, - [NDF_RESPONSE_SENT_TIME_USEC] = { - .journal = "ND_RESPONSE_SENT_TIME_USEC", - .logfmt = "sent_ut", - }, - [NDF_RESPONSE_TOTAL_TIME_USEC] = { - .journal = "ND_RESPONSE_TOTAL_TIME_USEC", - .logfmt = "total_ut", - }, - [NDF_ALERT_ID] = { - .journal = "ND_ALERT_ID", - .logfmt = "alert_id", - }, - [NDF_ALERT_UNIQUE_ID] = { - .journal = "ND_ALERT_UNIQUE_ID", - .logfmt = "alert_unique_id", - }, - [NDF_ALERT_TRANSITION_ID] = { - .journal = "ND_ALERT_TRANSITION_ID", - .logfmt = "alert_transition_id", - }, - [NDF_ALERT_EVENT_ID] = { - .journal = "ND_ALERT_EVENT_ID", - .logfmt = "alert_event_id", - }, - [NDF_ALERT_CONFIG_HASH] = { - .journal = "ND_ALERT_CONFIG", - .logfmt = "alert_config", - }, - [NDF_ALERT_NAME] = { - .journal = "ND_ALERT_NAME", - .logfmt = "alert", - }, - [NDF_ALERT_CLASS] = { - .journal = "ND_ALERT_CLASS", - .logfmt = "alert_class", - }, - [NDF_ALERT_COMPONENT] = { - .journal = "ND_ALERT_COMPONENT", - .logfmt = "alert_component", - }, - [NDF_ALERT_TYPE] = { - .journal = "ND_ALERT_TYPE", - .logfmt = "alert_type", - }, - [NDF_ALERT_EXEC] = { - .journal = "ND_ALERT_EXEC", - .logfmt = "alert_exec", - }, - [NDF_ALERT_RECIPIENT] = { - .journal = "ND_ALERT_RECIPIENT", - .logfmt = "alert_recipient", - }, - [NDF_ALERT_VALUE] = { - .journal = "ND_ALERT_VALUE", - .logfmt = "alert_value", - }, - [NDF_ALERT_VALUE_OLD] = { - .journal = "ND_ALERT_VALUE_OLD", - .logfmt = "alert_value_old", - }, - [NDF_ALERT_STATUS] = { - .journal = "ND_ALERT_STATUS", - .logfmt = "alert_status", - }, - [NDF_ALERT_STATUS_OLD] = { - .journal = "ND_ALERT_STATUS_OLD", - .logfmt = "alert_value_old", - }, - [NDF_ALERT_UNITS] = { - .journal = "ND_ALERT_UNITS", - .logfmt = "alert_units", - }, - [NDF_ALERT_SUMMARY] = { - .journal = "ND_ALERT_SUMMARY", - .logfmt = "alert_summary", - }, - [NDF_ALERT_INFO] = { - .journal = "ND_ALERT_INFO", - .logfmt = "alert_info", - }, - [NDF_ALERT_DURATION] = { - .journal = "ND_ALERT_DURATION", - .logfmt = "alert_duration", - }, - [NDF_ALERT_NOTIFICATION_REALTIME_USEC] = { - .journal = "ND_ALERT_NOTIFICATION_TIMESTAMP_USEC", - .logfmt = "alert_notification_timestamp", - .logfmt_annotator = timestamp_usec_annotator, - }, - - // put new items here - // leave the request URL and the message last - - [NDF_REQUEST] = { - .journal = "ND_REQUEST", - .logfmt = "request", - }, - [NDF_MESSAGE] = { - .journal = "MESSAGE", - .logfmt = "msg", - }, -}; - -#define THREAD_FIELDS_MAX (sizeof(thread_log_fields) / sizeof(thread_log_fields[0])) - -ND_LOG_FIELD_ID nd_log_field_id_by_name(const char *field, size_t len) { - for(size_t i = 0; i < THREAD_FIELDS_MAX ;i++) { - if(thread_log_fields[i].journal && strlen(thread_log_fields[i].journal) == len && strncmp(field, thread_log_fields[i].journal, len) == 0) - return i; - } - - return NDF_STOP; -} - -void log_stack_pop(void *ptr) { - if(!ptr) return; - - struct log_stack_entry *lgs = *(struct log_stack_entry (*)[])ptr; - - if(unlikely(!thread_log_stack_next || lgs != thread_log_stack_base[thread_log_stack_next - 1])) { - fatal("You cannot pop in the middle of the stack, or an item not in the stack"); - return; - } - - thread_log_stack_next--; -} - -void log_stack_push(struct log_stack_entry *lgs) { - if(!lgs || thread_log_stack_next >= THREAD_LOG_STACK_MAX) return; - thread_log_stack_base[thread_log_stack_next++] = lgs; -} - -// ---------------------------------------------------------------------------- -// json formatter - -static void nd_logger_json(BUFFER *wb, struct log_field *fields, size_t fields_max) { - - // --- FIELD_PARSER_VERSIONS --- - // - // IMPORTANT: - // THERE ARE 6 VERSIONS OF THIS CODE - // - // 1. journal (direct socket API), - // 2. journal (libsystemd API), - // 3. logfmt, - // 4. json, - // 5. convert to uint64 - // 6. convert to int64 - // - // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES - - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); - CLEAN_BUFFER *tmp = NULL; - - for (size_t i = 0; i < fields_max; i++) { - if (!fields[i].entry.set || !fields[i].logfmt) - continue; - - const char *key = fields[i].logfmt; - - const char *s = NULL; - switch(fields[i].entry.type) { - case NDFT_TXT: - s = fields[i].entry.txt; - break; - case NDFT_STR: - s = string2str(fields[i].entry.str); - break; - case NDFT_BFR: - s = buffer_tostring(fields[i].entry.bfr); - break; - case NDFT_U64: - buffer_json_member_add_uint64(wb, key, fields[i].entry.u64); - break; - case NDFT_I64: - buffer_json_member_add_int64(wb, key, fields[i].entry.i64); - break; - case NDFT_DBL: - buffer_json_member_add_double(wb, key, fields[i].entry.dbl); - break; - case NDFT_UUID: - if(!uuid_is_null(*fields[i].entry.uuid)) { - char u[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(*fields[i].entry.uuid, u); - buffer_json_member_add_string(wb, key, u); - } - break; - case NDFT_CALLBACK: { - if(!tmp) - tmp = buffer_create(1024, NULL); - else - buffer_flush(tmp); - if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) - s = buffer_tostring(tmp); - else - s = NULL; - } - break; - default: - s = "UNHANDLED"; - break; - } - - if(s && *s) - buffer_json_member_add_string(wb, key, s); - } - - buffer_json_finalize(wb); -} - -// ---------------------------------------------------------------------------- -// logfmt formatter - - -static int64_t log_field_to_int64(struct log_field *lf) { - - // --- FIELD_PARSER_VERSIONS --- - // - // IMPORTANT: - // THERE ARE 6 VERSIONS OF THIS CODE - // - // 1. journal (direct socket API), - // 2. journal (libsystemd API), - // 3. logfmt, - // 4. json, - // 5. convert to uint64 - // 6. convert to int64 - // - // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES - - CLEAN_BUFFER *tmp = NULL; - const char *s = NULL; - - switch(lf->entry.type) { - case NDFT_UUID: - case NDFT_UNSET: - return 0; - - case NDFT_TXT: - s = lf->entry.txt; - break; - - case NDFT_STR: - s = string2str(lf->entry.str); - break; - - case NDFT_BFR: - s = buffer_tostring(lf->entry.bfr); - break; - - case NDFT_CALLBACK: - tmp = buffer_create(0, NULL); - - if(lf->entry.cb.formatter(tmp, lf->entry.cb.formatter_data)) - s = buffer_tostring(tmp); - else - s = NULL; - break; - - case NDFT_U64: - return (int64_t)lf->entry.u64; - - case NDFT_I64: - return (int64_t)lf->entry.i64; - - case NDFT_DBL: - return (int64_t)lf->entry.dbl; - } - - if(s && *s) - return str2ll(s, NULL); - - return 0; -} - -static uint64_t log_field_to_uint64(struct log_field *lf) { - - // --- FIELD_PARSER_VERSIONS --- - // - // IMPORTANT: - // THERE ARE 6 VERSIONS OF THIS CODE - // - // 1. journal (direct socket API), - // 2. journal (libsystemd API), - // 3. logfmt, - // 4. json, - // 5. convert to uint64 - // 6. convert to int64 - // - // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES - - CLEAN_BUFFER *tmp = NULL; - const char *s = NULL; - - switch(lf->entry.type) { - case NDFT_UUID: - case NDFT_UNSET: - return 0; - - case NDFT_TXT: - s = lf->entry.txt; - break; - - case NDFT_STR: - s = string2str(lf->entry.str); - break; - - case NDFT_BFR: - s = buffer_tostring(lf->entry.bfr); - break; - - case NDFT_CALLBACK: - tmp = buffer_create(0, NULL); - - if(lf->entry.cb.formatter(tmp, lf->entry.cb.formatter_data)) - s = buffer_tostring(tmp); - else - s = NULL; - break; - - case NDFT_U64: - return lf->entry.u64; - - case NDFT_I64: - return lf->entry.i64; - - case NDFT_DBL: - return (uint64_t) lf->entry.dbl; - } - - if(s && *s) - return str2uint64_t(s, NULL); - - return 0; -} - -static void timestamp_usec_annotator(BUFFER *wb, const char *key, struct log_field *lf) { - usec_t ut = log_field_to_uint64(lf); - - if(!ut) - return; - - char datetime[RFC3339_MAX_LENGTH]; - rfc3339_datetime_ut(datetime, sizeof(datetime), ut, 3, false); - - if(buffer_strlen(wb)) - buffer_fast_strcat(wb, " ", 1); - - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - buffer_json_strcat(wb, datetime); -} - -static void errno_annotator(BUFFER *wb, const char *key, struct log_field *lf) { - int64_t errnum = log_field_to_int64(lf); - - if(errnum == 0) - return; - - char buf[1024]; - const char *s = errno2str((int)errnum, buf, sizeof(buf)); - - if(buffer_strlen(wb)) - buffer_fast_strcat(wb, " ", 1); - - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=\"", 2); - buffer_print_int64(wb, errnum); - buffer_fast_strcat(wb, ", ", 2); - buffer_json_strcat(wb, s); - buffer_fast_strcat(wb, "\"", 1); -} - -#if defined(OS_WINDOWS) -static void winerror_annotator(BUFFER *wb, const char *key, struct log_field *lf) { - DWORD errnum = log_field_to_uint64(lf); - - if (errnum == 0) - return; - - char buf[1024]; - wchar_t wbuf[1024]; - DWORD size = FormatMessageW( - FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, - errnum, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - wbuf, - (DWORD)(sizeof(wbuf) / sizeof(wchar_t) - 1), - NULL - ); - - if (size > 0) { - // Remove \r\n at the end - while (size > 0 && (wbuf[size - 1] == L'\r' || wbuf[size - 1] == L'\n')) - wbuf[--size] = L'\0'; - - // Convert wide string to UTF-8 - int utf8_size = WideCharToMultiByte(CP_UTF8, 0, wbuf, -1, buf, sizeof(buf), NULL, NULL); - if (utf8_size == 0) - snprintf(buf, sizeof(buf) - 1, "unknown error code"); - buf[sizeof(buf) - 1] = '\0'; - } - else - snprintf(buf, sizeof(buf) - 1, "unknown error code"); - - if (buffer_strlen(wb)) - buffer_fast_strcat(wb, " ", 1); - - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=\"", 2); - buffer_print_int64(wb, errnum); - buffer_fast_strcat(wb, ", ", 2); - buffer_json_strcat(wb, buf); - buffer_fast_strcat(wb, "\"", 1); -} -#endif - -static void priority_annotator(BUFFER *wb, const char *key, struct log_field *lf) { - uint64_t pri = log_field_to_uint64(lf); - - if(buffer_strlen(wb)) - buffer_fast_strcat(wb, " ", 1); - - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - buffer_strcat(wb, nd_log_id2priority(pri)); -} - -static bool needs_quotes_for_logfmt(const char *s) -{ - static bool safe_for_logfmt[256] = { - [' '] = true, ['!'] = true, ['"'] = false, ['#'] = true, ['$'] = true, ['%'] = true, ['&'] = true, - ['\''] = true, ['('] = true, [')'] = true, ['*'] = true, ['+'] = true, [','] = true, ['-'] = true, - ['.'] = true, ['/'] = true, ['0'] = true, ['1'] = true, ['2'] = true, ['3'] = true, ['4'] = true, - ['5'] = true, ['6'] = true, ['7'] = true, ['8'] = true, ['9'] = true, [':'] = true, [';'] = true, - ['<'] = true, ['='] = true, ['>'] = true, ['?'] = true, ['@'] = true, ['A'] = true, ['B'] = true, - ['C'] = true, ['D'] = true, ['E'] = true, ['F'] = true, ['G'] = true, ['H'] = true, ['I'] = true, - ['J'] = true, ['K'] = true, ['L'] = true, ['M'] = true, ['N'] = true, ['O'] = true, ['P'] = true, - ['Q'] = true, ['R'] = true, ['S'] = true, ['T'] = true, ['U'] = true, ['V'] = true, ['W'] = true, - ['X'] = true, ['Y'] = true, ['Z'] = true, ['['] = true, ['\\'] = false, [']'] = true, ['^'] = true, - ['_'] = true, ['`'] = true, ['a'] = true, ['b'] = true, ['c'] = true, ['d'] = true, ['e'] = true, - ['f'] = true, ['g'] = true, ['h'] = true, ['i'] = true, ['j'] = true, ['k'] = true, ['l'] = true, - ['m'] = true, ['n'] = true, ['o'] = true, ['p'] = true, ['q'] = true, ['r'] = true, ['s'] = true, - ['t'] = true, ['u'] = true, ['v'] = true, ['w'] = true, ['x'] = true, ['y'] = true, ['z'] = true, - ['{'] = true, ['|'] = true, ['}'] = true, ['~'] = true, [0x7f] = true, - }; - - if(!*s) - return true; - - while(*s) { - if(*s == '=' || isspace((uint8_t)*s) || !safe_for_logfmt[(uint8_t)*s]) - return true; - - s++; - } - - return false; -} - -static void string_to_logfmt(BUFFER *wb, const char *s) -{ - bool spaces = needs_quotes_for_logfmt(s); - - if(spaces) - buffer_fast_strcat(wb, "\"", 1); - - buffer_json_strcat(wb, s); - - if(spaces) - buffer_fast_strcat(wb, "\"", 1); -} - -static void nd_logger_logfmt(BUFFER *wb, struct log_field *fields, size_t fields_max) -{ - - // --- FIELD_PARSER_VERSIONS --- - // - // IMPORTANT: - // THERE ARE 6 VERSIONS OF THIS CODE - // - // 1. journal (direct socket API), - // 2. journal (libsystemd API), - // 3. logfmt, - // 4. json, - // 5. convert to uint64 - // 6. convert to int64 - // - // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES - - CLEAN_BUFFER *tmp = NULL; - - for (size_t i = 0; i < fields_max; i++) { - if (!fields[i].entry.set || !fields[i].logfmt) - continue; - - const char *key = fields[i].logfmt; - - if(fields[i].logfmt_annotator) - fields[i].logfmt_annotator(wb, key, &fields[i]); - else { - if(buffer_strlen(wb)) - buffer_fast_strcat(wb, " ", 1); - - switch(fields[i].entry.type) { - case NDFT_TXT: - if(*fields[i].entry.txt) { - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - string_to_logfmt(wb, fields[i].entry.txt); - } - break; - case NDFT_STR: - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - string_to_logfmt(wb, string2str(fields[i].entry.str)); - break; - case NDFT_BFR: - if(buffer_strlen(fields[i].entry.bfr)) { - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - string_to_logfmt(wb, buffer_tostring(fields[i].entry.bfr)); - } - break; - case NDFT_U64: - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - buffer_print_uint64(wb, fields[i].entry.u64); - break; - case NDFT_I64: - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - buffer_print_int64(wb, fields[i].entry.i64); - break; - case NDFT_DBL: - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - buffer_print_netdata_double(wb, fields[i].entry.dbl); - break; - case NDFT_UUID: - if(!uuid_is_null(*fields[i].entry.uuid)) { - char u[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(*fields[i].entry.uuid, u); - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - buffer_fast_strcat(wb, u, sizeof(u) - 1); - } - break; - case NDFT_CALLBACK: { - if(!tmp) - tmp = buffer_create(1024, NULL); - else - buffer_flush(tmp); - if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) { - buffer_strcat(wb, key); - buffer_fast_strcat(wb, "=", 1); - string_to_logfmt(wb, buffer_tostring(tmp)); - } - } - break; - default: - buffer_strcat(wb, "UNHANDLED"); - break; - } - } - } -} - -// ---------------------------------------------------------------------------- -// journal logger - -bool nd_log_journal_socket_available(void) { - if(netdata_configured_host_prefix && *netdata_configured_host_prefix) { - char filename[FILENAME_MAX + 1]; - - snprintfz(filename, sizeof(filename), "%s%s", - netdata_configured_host_prefix, "/run/systemd/journal/socket"); - - if(is_path_unix_socket(filename)) - return true; - } - - return is_path_unix_socket("/run/systemd/journal/socket"); -} - -static bool nd_logger_journal_libsystemd(struct log_field *fields __maybe_unused, size_t fields_max __maybe_unused) { -#ifdef HAVE_SYSTEMD - - // --- FIELD_PARSER_VERSIONS --- - // - // IMPORTANT: - // THERE ARE 6 VERSIONS OF THIS CODE - // - // 1. journal (direct socket API), - // 2. journal (libsystemd API), - // 3. logfmt, - // 4. json, - // 5. convert to uint64 - // 6. convert to int64 - // - // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES - - struct iovec iov[fields_max]; - int iov_count = 0; - - memset(iov, 0, sizeof(iov)); - - CLEAN_BUFFER *tmp = NULL; - - for (size_t i = 0; i < fields_max; i++) { - if (!fields[i].entry.set || !fields[i].journal) - continue; - - const char *key = fields[i].journal; - char *value = NULL; - int rc = 0; - switch (fields[i].entry.type) { - case NDFT_TXT: - if(*fields[i].entry.txt) - rc = asprintf(&value, "%s=%s", key, fields[i].entry.txt); - break; - case NDFT_STR: - rc = asprintf(&value, "%s=%s", key, string2str(fields[i].entry.str)); - break; - case NDFT_BFR: - if(buffer_strlen(fields[i].entry.bfr)) - rc = asprintf(&value, "%s=%s", key, buffer_tostring(fields[i].entry.bfr)); - break; - case NDFT_U64: - rc = asprintf(&value, "%s=%" PRIu64, key, fields[i].entry.u64); - break; - case NDFT_I64: - rc = asprintf(&value, "%s=%" PRId64, key, fields[i].entry.i64); - break; - case NDFT_DBL: - rc = asprintf(&value, "%s=%f", key, fields[i].entry.dbl); - break; - case NDFT_UUID: - if(!uuid_is_null(*fields[i].entry.uuid)) { - char u[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(*fields[i].entry.uuid, u); - rc = asprintf(&value, "%s=%s", key, u); - } - break; - case NDFT_CALLBACK: { - if(!tmp) - tmp = buffer_create(1024, NULL); - else - buffer_flush(tmp); - if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) - rc = asprintf(&value, "%s=%s", key, buffer_tostring(tmp)); - } - break; - default: - rc = asprintf(&value, "%s=%s", key, "UNHANDLED"); - break; - } - - if (rc != -1 && value) { - iov[iov_count].iov_base = value; - iov[iov_count].iov_len = strlen(value); - iov_count++; - } - } - - int r = sd_journal_sendv(iov, iov_count); - - // Clean up allocated memory - for (int i = 0; i < iov_count; i++) { - if (iov[i].iov_base != NULL) { - free(iov[i].iov_base); - } - } - - return r == 0; -#else - return false; -#endif -} - -static bool nd_logger_journal_direct(struct log_field *fields, size_t fields_max) { - if(!nd_log.journal_direct.initialized) - return false; - - // --- FIELD_PARSER_VERSIONS --- - // - // IMPORTANT: - // THERE ARE 6 VERSIONS OF THIS CODE - // - // 1. journal (direct socket API), - // 2. journal (libsystemd API), - // 3. logfmt, - // 4. json, - // 5. convert to uint64 - // 6. convert to int64 - // - // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES - - CLEAN_BUFFER *wb = buffer_create(4096, NULL); - CLEAN_BUFFER *tmp = NULL; - - for (size_t i = 0; i < fields_max; i++) { - if (!fields[i].entry.set || !fields[i].journal) - continue; - - const char *key = fields[i].journal; - - const char *s = NULL; - switch(fields[i].entry.type) { - case NDFT_TXT: - s = fields[i].entry.txt; - break; - case NDFT_STR: - s = string2str(fields[i].entry.str); - break; - case NDFT_BFR: - s = buffer_tostring(fields[i].entry.bfr); - break; - case NDFT_U64: - buffer_strcat(wb, key); - buffer_putc(wb, '='); - buffer_print_uint64(wb, fields[i].entry.u64); - buffer_putc(wb, '\n'); - break; - case NDFT_I64: - buffer_strcat(wb, key); - buffer_putc(wb, '='); - buffer_print_int64(wb, fields[i].entry.i64); - buffer_putc(wb, '\n'); - break; - case NDFT_DBL: - buffer_strcat(wb, key); - buffer_putc(wb, '='); - buffer_print_netdata_double(wb, fields[i].entry.dbl); - buffer_putc(wb, '\n'); - break; - case NDFT_UUID: - if(!uuid_is_null(*fields[i].entry.uuid)) { - char u[UUID_COMPACT_STR_LEN]; - uuid_unparse_lower_compact(*fields[i].entry.uuid, u); - buffer_strcat(wb, key); - buffer_putc(wb, '='); - buffer_fast_strcat(wb, u, sizeof(u) - 1); - buffer_putc(wb, '\n'); - } - break; - case NDFT_CALLBACK: { - if(!tmp) - tmp = buffer_create(1024, NULL); - else - buffer_flush(tmp); - if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) - s = buffer_tostring(tmp); - else - s = NULL; - } - break; - default: - s = "UNHANDLED"; - break; - } - - if(s && *s) { - buffer_strcat(wb, key); - if(!strchr(s, '\n')) { - buffer_putc(wb, '='); - buffer_strcat(wb, s); - buffer_putc(wb, '\n'); - } - else { - buffer_putc(wb, '\n'); - size_t size = strlen(s); - uint64_t le_size = htole64(size); - buffer_memcat(wb, &le_size, sizeof(le_size)); - buffer_memcat(wb, s, size); - buffer_putc(wb, '\n'); - } - } - } - - return journal_direct_send(nd_log.journal_direct.fd, buffer_tostring(wb), buffer_strlen(wb)); -} - -// ---------------------------------------------------------------------------- -// syslog logger - uses logfmt - -static bool nd_logger_syslog(int priority, ND_LOG_FORMAT format __maybe_unused, struct log_field *fields, size_t fields_max) { - CLEAN_BUFFER *wb = buffer_create(1024, NULL); - - nd_logger_logfmt(wb, fields, fields_max); - syslog(priority, "%s", buffer_tostring(wb)); - - return true; -} - -// ---------------------------------------------------------------------------- -// file logger - uses logfmt - -static bool nd_logger_file(FILE *fp, ND_LOG_FORMAT format, struct log_field *fields, size_t fields_max) { - BUFFER *wb = buffer_create(1024, NULL); - - if(format == NDLF_JSON) - nd_logger_json(wb, fields, fields_max); - else - nd_logger_logfmt(wb, fields, fields_max); - - int r = fprintf(fp, "%s\n", buffer_tostring(wb)); - fflush(fp); - - buffer_free(wb); - return r > 0; -} - -// ---------------------------------------------------------------------------- -// logger router - -static ND_LOG_METHOD nd_logger_select_output(ND_LOG_SOURCES source, FILE **fpp, SPINLOCK **spinlock) { - *spinlock = NULL; - ND_LOG_METHOD output = nd_log.sources[source].method; - - switch(output) { - case NDLM_JOURNAL: - if(unlikely(!nd_log.journal_direct.initialized && !nd_log.journal.initialized)) { - output = NDLM_FILE; - *fpp = stderr; - *spinlock = &nd_log.std_error.spinlock; - } - else { - *fpp = NULL; - *spinlock = NULL; - } - break; - - case NDLM_SYSLOG: - if(unlikely(!nd_log.syslog.initialized)) { - output = NDLM_FILE; - *spinlock = &nd_log.std_error.spinlock; - *fpp = stderr; - } - else { - *spinlock = NULL; - *fpp = NULL; - } - break; - - case NDLM_FILE: - if(!nd_log.sources[source].fp) { - *fpp = stderr; - *spinlock = &nd_log.std_error.spinlock; - } - else { - *fpp = nd_log.sources[source].fp; - *spinlock = &nd_log.sources[source].spinlock; - } - break; - - case NDLM_STDOUT: - output = NDLM_FILE; - *fpp = stdout; - *spinlock = &nd_log.std_output.spinlock; - break; - - default: - case NDLM_DEFAULT: - case NDLM_STDERR: - output = NDLM_FILE; - *fpp = stderr; - *spinlock = &nd_log.std_error.spinlock; - break; - - case NDLM_DISABLED: - case NDLM_DEVNULL: - output = NDLM_DISABLED; - *fpp = NULL; - *spinlock = NULL; - break; - } - - return output; -} - -// ---------------------------------------------------------------------------- -// high level logger - -static void nd_logger_log_fields(SPINLOCK *spinlock, FILE *fp, bool limit, ND_LOG_FIELD_PRIORITY priority, - ND_LOG_METHOD output, struct nd_log_source *source, - struct log_field *fields, size_t fields_max) { - if(spinlock) - spinlock_lock(spinlock); - - // check the limits - if(limit && nd_log_limit_reached(source)) - goto cleanup; - - if(output == NDLM_JOURNAL) { - if(!nd_logger_journal_direct(fields, fields_max) && !nd_logger_journal_libsystemd(fields, fields_max)) { - // we can't log to journal, let's log to stderr - if(spinlock) - spinlock_unlock(spinlock); - - output = NDLM_FILE; - spinlock = &nd_log.std_error.spinlock; - fp = stderr; - - if(spinlock) - spinlock_lock(spinlock); - } - } - - if(output == NDLM_SYSLOG) - nd_logger_syslog(priority, source->format, fields, fields_max); - - if(output == NDLM_FILE) - nd_logger_file(fp, source->format, fields, fields_max); - - -cleanup: - if(spinlock) - spinlock_unlock(spinlock); -} - -static void nd_logger_unset_all_thread_fields(void) { - size_t fields_max = THREAD_FIELDS_MAX; - for(size_t i = 0; i < fields_max ; i++) - thread_log_fields[i].entry.set = false; -} - -static void nd_logger_merge_log_stack_to_thread_fields(void) { - for(size_t c = 0; c < thread_log_stack_next ;c++) { - struct log_stack_entry *lgs = thread_log_stack_base[c]; - - for(size_t i = 0; lgs[i].id != NDF_STOP ; i++) { - if(lgs[i].id >= _NDF_MAX || !lgs[i].set) - continue; - - struct log_stack_entry *e = &lgs[i]; - ND_LOG_STACK_FIELD_TYPE type = lgs[i].type; - - // do not add empty / unset fields - if((type == NDFT_TXT && (!e->txt || !*e->txt)) || - (type == NDFT_BFR && (!e->bfr || !buffer_strlen(e->bfr))) || - (type == NDFT_STR && !e->str) || - (type == NDFT_UUID && (!e->uuid || uuid_is_null(*e->uuid))) || - (type == NDFT_CALLBACK && !e->cb.formatter) || - type == NDFT_UNSET) - continue; - - thread_log_fields[lgs[i].id].entry = *e; - } - } -} - -static void nd_logger(const char *file, const char *function, const unsigned long line, - ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, bool limit, - int saved_errno, size_t saved_winerror __maybe_unused, const char *fmt, va_list ap) { - - SPINLOCK *spinlock; - FILE *fp; - ND_LOG_METHOD output = nd_logger_select_output(source, &fp, &spinlock); - if(output != NDLM_FILE && output != NDLM_JOURNAL && output != NDLM_SYSLOG) - return; - - // mark all fields as unset - nd_logger_unset_all_thread_fields(); - - // flatten the log stack into the fields - nd_logger_merge_log_stack_to_thread_fields(); - - // set the common fields that are automatically set by the logging subsystem - - if(likely(!thread_log_fields[NDF_INVOCATION_ID].entry.set)) - thread_log_fields[NDF_INVOCATION_ID].entry = ND_LOG_FIELD_UUID(NDF_INVOCATION_ID, &nd_log.invocation_id); - - if(likely(!thread_log_fields[NDF_LOG_SOURCE].entry.set)) - thread_log_fields[NDF_LOG_SOURCE].entry = ND_LOG_FIELD_TXT(NDF_LOG_SOURCE, nd_log_id2source(source)); - else { - ND_LOG_SOURCES src = source; - - if(thread_log_fields[NDF_LOG_SOURCE].entry.type == NDFT_TXT) - src = nd_log_source2id(thread_log_fields[NDF_LOG_SOURCE].entry.txt, source); - else if(thread_log_fields[NDF_LOG_SOURCE].entry.type == NDFT_U64) - src = thread_log_fields[NDF_LOG_SOURCE].entry.u64; - - if(src != source && src < _NDLS_MAX) { - source = src; - output = nd_logger_select_output(source, &fp, &spinlock); - if(output != NDLM_FILE && output != NDLM_JOURNAL && output != NDLM_SYSLOG) - return; - } - } - - if(likely(!thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry.set)) - thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry = ND_LOG_FIELD_TXT(NDF_SYSLOG_IDENTIFIER, program_name); - - if(likely(!thread_log_fields[NDF_LINE].entry.set)) { - thread_log_fields[NDF_LINE].entry = ND_LOG_FIELD_U64(NDF_LINE, line); - thread_log_fields[NDF_FILE].entry = ND_LOG_FIELD_TXT(NDF_FILE, file); - thread_log_fields[NDF_FUNC].entry = ND_LOG_FIELD_TXT(NDF_FUNC, function); - } - - if(likely(!thread_log_fields[NDF_PRIORITY].entry.set)) { - thread_log_fields[NDF_PRIORITY].entry = ND_LOG_FIELD_U64(NDF_PRIORITY, priority); - } - - if(likely(!thread_log_fields[NDF_TID].entry.set)) - thread_log_fields[NDF_TID].entry = ND_LOG_FIELD_U64(NDF_TID, gettid_cached()); - - if(likely(!thread_log_fields[NDF_THREAD_TAG].entry.set)) { - const char *thread_tag = nd_thread_tag(); - thread_log_fields[NDF_THREAD_TAG].entry = ND_LOG_FIELD_TXT(NDF_THREAD_TAG, thread_tag); - - // TODO: fix the ND_MODULE in logging by setting proper module name in threads -// if(!thread_log_fields[NDF_MODULE].entry.set) -// thread_log_fields[NDF_MODULE].entry = ND_LOG_FIELD_CB(NDF_MODULE, thread_tag_to_module, (void *)thread_tag); - } - - if(likely(!thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry.set)) - thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry = ND_LOG_FIELD_U64(NDF_TIMESTAMP_REALTIME_USEC, now_realtime_usec()); - - if(saved_errno != 0 && !thread_log_fields[NDF_ERRNO].entry.set) - thread_log_fields[NDF_ERRNO].entry = ND_LOG_FIELD_I64(NDF_ERRNO, saved_errno); - -#if defined(OS_WINDOWS) - if(saved_winerror != 0 && !thread_log_fields[NDF_WINERROR].entry.set) - thread_log_fields[NDF_WINERROR].entry = ND_LOG_FIELD_U64(NDF_WINERROR, saved_winerror); -#endif - - CLEAN_BUFFER *wb = NULL; - if(fmt && !thread_log_fields[NDF_MESSAGE].entry.set) { - wb = buffer_create(1024, NULL); - buffer_vsprintf(wb, fmt, ap); - thread_log_fields[NDF_MESSAGE].entry = ND_LOG_FIELD_TXT(NDF_MESSAGE, buffer_tostring(wb)); - } - - nd_logger_log_fields(spinlock, fp, limit, priority, output, &nd_log.sources[source], - thread_log_fields, THREAD_FIELDS_MAX); - - if(nd_log.sources[source].pending_msg) { - // log a pending message - - nd_logger_unset_all_thread_fields(); - - thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry = (struct log_stack_entry){ - .set = true, - .type = NDFT_U64, - .u64 = now_realtime_usec(), - }; - - thread_log_fields[NDF_LOG_SOURCE].entry = (struct log_stack_entry){ - .set = true, - .type = NDFT_TXT, - .txt = nd_log_id2source(source), - }; - - thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry = (struct log_stack_entry){ - .set = true, - .type = NDFT_TXT, - .txt = program_name, - }; - - thread_log_fields[NDF_MESSAGE].entry = (struct log_stack_entry){ - .set = true, - .type = NDFT_TXT, - .txt = nd_log.sources[source].pending_msg, - }; - - nd_logger_log_fields(spinlock, fp, false, priority, output, - &nd_log.sources[source], - thread_log_fields, THREAD_FIELDS_MAX); - - freez((void *)nd_log.sources[source].pending_msg); - nd_log.sources[source].pending_msg = NULL; - } - - errno_clear(); -} - -static ND_LOG_SOURCES nd_log_validate_source(ND_LOG_SOURCES source) { - if(source >= _NDLS_MAX) - source = NDLS_DAEMON; - - if(nd_log.overwrite_process_source) - source = nd_log.overwrite_process_source; - - return source; -} - -// ---------------------------------------------------------------------------- -// public API for loggers - -void netdata_logger(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file, const char *function, unsigned long line, const char *fmt, ... ) -{ - int saved_errno = errno; - - size_t saved_winerror = 0; -#if defined(OS_WINDOWS) - saved_winerror = GetLastError(); -#endif - - source = nd_log_validate_source(source); - - if (source != NDLS_DEBUG && priority > nd_log.sources[source].min_priority) - return; - - va_list args; - va_start(args, fmt); - nd_logger(file, function, line, source, priority, - source == NDLS_DAEMON || source == NDLS_COLLECTORS, - saved_errno, saved_winerror, fmt, args); - va_end(args); -} - -void netdata_logger_with_limit(ERROR_LIMIT *erl, ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... ) { - int saved_errno = errno; - - size_t saved_winerror = 0; -#if defined(OS_WINDOWS) - saved_winerror = GetLastError(); -#endif - - source = nd_log_validate_source(source); - - if (source != NDLS_DEBUG && priority > nd_log.sources[source].min_priority) - return; - - if(erl->sleep_ut) - sleep_usec(erl->sleep_ut); - - spinlock_lock(&erl->spinlock); - - erl->count++; - time_t now = now_boottime_sec(); - if(now - erl->last_logged < erl->log_every) { - spinlock_unlock(&erl->spinlock); - return; - } - - spinlock_unlock(&erl->spinlock); - - va_list args; - va_start(args, fmt); - nd_logger(file, function, line, source, priority, - source == NDLS_DAEMON || source == NDLS_COLLECTORS, - saved_errno, saved_winerror, fmt, args); - va_end(args); - erl->last_logged = now; - erl->count = 0; -} - -void netdata_logger_fatal( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) { - int saved_errno = errno; - - size_t saved_winerror = 0; -#if defined(OS_WINDOWS) - saved_winerror = GetLastError(); -#endif - - ND_LOG_SOURCES source = NDLS_DAEMON; - source = nd_log_validate_source(source); - - va_list args; - va_start(args, fmt); - nd_logger(file, function, line, source, NDLP_ALERT, true, saved_errno, saved_winerror, fmt, args); - va_end(args); - - char date[LOG_DATE_LENGTH]; - log_date(date, LOG_DATE_LENGTH, now_realtime_sec()); - - char action_data[70+1]; - snprintfz(action_data, 70, "%04lu@%-10.10s:%-15.15s/%d", line, file, function, saved_errno); - - const char *thread_tag = nd_thread_tag(); - const char *tag_to_send = thread_tag; - - // anonymize thread names - if(strncmp(thread_tag, THREAD_TAG_STREAM_RECEIVER, strlen(THREAD_TAG_STREAM_RECEIVER)) == 0) - tag_to_send = THREAD_TAG_STREAM_RECEIVER; - if(strncmp(thread_tag, THREAD_TAG_STREAM_SENDER, strlen(THREAD_TAG_STREAM_SENDER)) == 0) - tag_to_send = THREAD_TAG_STREAM_SENDER; - - char action_result[60+1]; - snprintfz(action_result, 60, "%s:%s", program_name, tag_to_send); - -#if !defined(ENABLE_SENTRY) && defined(HAVE_BACKTRACE) - int fd = nd_log.sources[NDLS_DAEMON].fd; - if(fd == -1) - fd = STDERR_FILENO; - - int nptrs; - void *buffer[10000]; - - nptrs = backtrace(buffer, sizeof(buffer)); - if(nptrs) - backtrace_symbols_fd(buffer, nptrs, fd); -#endif - -#ifdef NETDATA_INTERNAL_CHECKS - abort(); -#endif - - netdata_cleanup_and_exit(1, "FATAL", action_result, action_data); -} - -// ---------------------------------------------------------------------------- -// log limits - -void nd_log_limits_reset(void) { - usec_t now_ut = now_monotonic_usec(); - - spinlock_lock(&nd_log.std_output.spinlock); - spinlock_lock(&nd_log.std_error.spinlock); - - for(size_t i = 0; i < _NDLS_MAX ;i++) { - spinlock_lock(&nd_log.sources[i].spinlock); - nd_log.sources[i].limits.prevented = 0; - nd_log.sources[i].limits.counter = 0; - nd_log.sources[i].limits.started_monotonic_ut = now_ut; - nd_log.sources[i].limits.logs_per_period = nd_log.sources[i].limits.logs_per_period_backup; - spinlock_unlock(&nd_log.sources[i].spinlock); - } - - spinlock_unlock(&nd_log.std_output.spinlock); - spinlock_unlock(&nd_log.std_error.spinlock); -} - -void nd_log_limits_unlimited(void) { - nd_log_limits_reset(); - for(size_t i = 0; i < _NDLS_MAX ;i++) { - nd_log.sources[i].limits.logs_per_period = 0; - } -} - -static bool nd_log_limit_reached(struct nd_log_source *source) { - if(source->limits.throttle_period == 0 || source->limits.logs_per_period == 0) - return false; - - usec_t now_ut = now_monotonic_usec(); - if(!source->limits.started_monotonic_ut) - source->limits.started_monotonic_ut = now_ut; - - source->limits.counter++; - - if(now_ut - source->limits.started_monotonic_ut > (usec_t)source->limits.throttle_period) { - if(source->limits.prevented) { - BUFFER *wb = buffer_create(1024, NULL); - buffer_sprintf(wb, - "LOG FLOOD PROTECTION: resuming logging " - "(prevented %"PRIu32" logs in the last %"PRIu32" seconds).", - source->limits.prevented, - source->limits.throttle_period); - - if(source->pending_msg) - freez((void *)source->pending_msg); - - source->pending_msg = strdupz(buffer_tostring(wb)); - - buffer_free(wb); - } - - // restart the period accounting - source->limits.started_monotonic_ut = now_ut; - source->limits.counter = 1; - source->limits.prevented = 0; - - // log this error - return false; - } - - if(source->limits.counter > source->limits.logs_per_period) { - if(!source->limits.prevented) { - BUFFER *wb = buffer_create(1024, NULL); - buffer_sprintf(wb, - "LOG FLOOD PROTECTION: too many logs (%"PRIu32" logs in %"PRId64" seconds, threshold is set to %"PRIu32" logs " - "in %"PRIu32" seconds). Preventing more logs from process '%s' for %"PRId64" seconds.", - source->limits.counter, - (int64_t)((now_ut - source->limits.started_monotonic_ut) / USEC_PER_SEC), - source->limits.logs_per_period, - source->limits.throttle_period, - program_name, - (int64_t)(((source->limits.started_monotonic_ut + (source->limits.throttle_period * USEC_PER_SEC) - now_ut)) / USEC_PER_SEC) - ); - - if(source->pending_msg) - freez((void *)source->pending_msg); - - source->pending_msg = strdupz(buffer_tostring(wb)); - - buffer_free(wb); - } - - source->limits.prevented++; - - // prevent logging this error -#ifdef NETDATA_INTERNAL_CHECKS - return false; -#else - return true; -#endif - } - - return false; -} diff --git a/src/libnetdata/log/nd_log-annotators.c b/src/libnetdata/log/nd_log-annotators.c new file mode 100644 index 00000000000000..92e9bf3100771d --- /dev/null +++ b/src/libnetdata/log/nd_log-annotators.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +const char *timestamp_usec_annotator(struct log_field *lf) { + usec_t ut = log_field_to_uint64(lf); + + if(!ut) + return NULL; + + static __thread char datetime[RFC3339_MAX_LENGTH]; + rfc3339_datetime_ut(datetime, sizeof(datetime), ut, 3, false); + return datetime; +} + +const char *errno_annotator(struct log_field *lf) { + int64_t errnum = log_field_to_int64(lf); + + if(errnum == 0) + return NULL; + + static __thread char buf[256]; + size_t len = print_uint64(buf, errnum); + buf[len++] = ','; + buf[len++] = ' '; + + char *msg_to = &buf[len]; + size_t msg_size = sizeof(buf) - len; + + const char *s = errno2str((int)errnum, msg_to, msg_size); + if(s != msg_to) + strncpyz(msg_to, s, msg_size - 1); + + return buf; +} + +#if defined(OS_WINDOWS) +const char *winerror_annotator(struct log_field *lf) { + DWORD errnum = log_field_to_uint64(lf); + + if (errnum == 0) + return NULL; + + static __thread char buf[256]; + size_t len = print_uint64(buf, errnum); + buf[len++] = ','; + buf[len++] = ' '; + + char *msg_to = &buf[len]; + size_t msg_size = sizeof(buf) - len; + + wchar_t wbuf[1024]; + DWORD size = FormatMessageW( + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, + errnum, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + wbuf, + (DWORD)(sizeof(wbuf) / sizeof(wchar_t) - 1), + NULL + ); + + if (size > 0) { + // Remove \r\n at the end + while (size > 0 && (wbuf[size - 1] == L'\r' || wbuf[size - 1] == L'\n')) + wbuf[--size] = L'\0'; + + // Convert wide string to UTF-8 + int utf8_size = WideCharToMultiByte(CP_UTF8, 0, wbuf, -1, msg_to, (int)msg_size, NULL, NULL); + if (utf8_size == 0) + snprintf(msg_to, msg_size - 1, "unknown error code"); + msg_to[msg_size - 1] = '\0'; + } + else + snprintf(msg_to, msg_size - 1, "unknown error code"); + + return buf; +} +#endif + +const char *priority_annotator(struct log_field *lf) { + uint64_t pri = log_field_to_uint64(lf); + return nd_log_id2priority(pri); +} diff --git a/src/libnetdata/log/nd_log-common.h b/src/libnetdata/log/nd_log-common.h new file mode 100644 index 00000000000000..d06bbbd16e147c --- /dev/null +++ b/src/libnetdata/log/nd_log-common.h @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_ND_LOG_COMMON_H +#define NETDATA_ND_LOG_COMMON_H + +#include + +typedef enum __attribute__((__packed__)) { + NDLS_UNSET = 0, // internal use only + NDLS_ACCESS, // access.log + NDLS_ACLK, // aclk.log + NDLS_COLLECTORS, // collector.log + NDLS_DAEMON, // error.log + NDLS_HEALTH, // health.log + NDLS_DEBUG, // debug.log + + // terminator + _NDLS_MAX, +} ND_LOG_SOURCES; + +typedef enum __attribute__((__packed__)) { + NDLP_EMERG = LOG_EMERG, // from syslog.h + NDLP_ALERT = LOG_ALERT, // from syslog.h + NDLP_CRIT = LOG_CRIT, // from syslog.h + NDLP_ERR = LOG_ERR, // from syslog.h + NDLP_WARNING = LOG_WARNING, // from syslog.h + NDLP_NOTICE = LOG_NOTICE, // from syslog.h + NDLP_INFO = LOG_INFO, // from syslog.h + NDLP_DEBUG = LOG_DEBUG, // from syslog.h + + // terminator + _NDLP_MAX, +} ND_LOG_FIELD_PRIORITY; + +typedef enum __attribute__((__packed__)) { + // KEEP THESE IN THE SAME ORDER AS in thread_log_fields (log.c) + // so that it easy to audit for missing fields + + // NEVER RENUMBER THIS LIST + // The Windows Events Log has them at fixed positions + + NDF_STOP = 0, + NDF_TIMESTAMP_REALTIME_USEC = 1, // the timestamp of the log message - added automatically + NDF_SYSLOG_IDENTIFIER = 2, // the syslog identifier of the application - added automatically + NDF_LOG_SOURCE = 3, // DAEMON, COLLECTORS, HEALTH, MSGID_ACCESS, ACLK - set at the log call + NDF_PRIORITY = 4, // the syslog priority (severity) - set at the log call + NDF_ERRNO = 5, // the ERRNO at the time of the log call - added automatically + NDF_WINERROR = 6, // Windows GetLastError() + NDF_INVOCATION_ID = 7, // the INVOCATION_ID of Netdata - added automatically + NDF_LINE = 8, // the source code file line number - added automatically + NDF_FILE = 9, // the source code filename - added automatically + NDF_FUNC = 10, // the source code function - added automatically + NDF_TID = 11, // the thread ID of the thread logging - added automatically + NDF_THREAD_TAG = 12, // the thread tag of the thread logging - added automatically + NDF_MESSAGE_ID = 13, // for specific events + NDF_MODULE = 14, // for internal plugin module, all other get the NDF_THREAD_TAG + + NDF_NIDL_NODE = 15, // the node / rrdhost currently being worked + NDF_NIDL_INSTANCE = 16, // the instance / rrdset currently being worked + NDF_NIDL_CONTEXT = 17, // the context of the instance currently being worked + NDF_NIDL_DIMENSION = 18, // the dimension / rrddim currently being worked + + // web server, aclk and stream receiver + NDF_SRC_TRANSPORT = 19, // the transport we received the request, one of: http, https, pluginsd + + // Netdata Cloud Related + NDF_ACCOUNT_ID = 20, + NDF_USER_NAME = 21, + NDF_USER_ROLE = 22, + NDF_USER_ACCESS = 23, + + // web server and stream receiver + NDF_SRC_IP = 24, // the streaming / web server source IP + NDF_SRC_PORT = 25, // the streaming / web server source Port + NDF_SRC_FORWARDED_HOST = 26, + NDF_SRC_FORWARDED_FOR = 27, + NDF_SRC_CAPABILITIES = 28, // the stream receiver capabilities + + // stream sender (established links) + NDF_DST_TRANSPORT = 29, // the transport we send the request, one of: http, https + NDF_DST_IP = 30, // the destination streaming IP + NDF_DST_PORT = 31, // the destination streaming Port + NDF_DST_CAPABILITIES = 32, // the destination streaming capabilities + + // web server, aclk and stream receiver + NDF_REQUEST_METHOD = 33, // for http like requests, the http request method + NDF_RESPONSE_CODE = 34, // for http like requests, the http response code, otherwise a status string + + // web server (all), aclk (queries) + NDF_CONNECTION_ID = 35, // the web server connection ID + NDF_TRANSACTION_ID = 36, // the web server and API transaction ID + NDF_RESPONSE_SENT_BYTES = 37, // for http like requests, the response bytes + NDF_RESPONSE_SIZE_BYTES = 38, // for http like requests, the uncompressed response size + NDF_RESPONSE_PREPARATION_TIME_USEC = 39, // for http like requests, the preparation time + NDF_RESPONSE_SENT_TIME_USEC = 40, // for http like requests, the time to send the response back + NDF_RESPONSE_TOTAL_TIME_USEC = 41, // for http like requests, the total time to complete the response + + // health alerts + NDF_ALERT_ID = 42, + NDF_ALERT_UNIQUE_ID = 43, + NDF_ALERT_EVENT_ID = 44, + NDF_ALERT_TRANSITION_ID = 45, + NDF_ALERT_CONFIG_HASH = 46, + NDF_ALERT_NAME = 47, + NDF_ALERT_CLASS = 48, + NDF_ALERT_COMPONENT = 49, + NDF_ALERT_TYPE = 50, + NDF_ALERT_EXEC = 51, + NDF_ALERT_RECIPIENT = 52, + NDF_ALERT_DURATION = 53, + NDF_ALERT_VALUE = 54, + NDF_ALERT_VALUE_OLD = 55, + NDF_ALERT_STATUS = 56, + NDF_ALERT_STATUS_OLD = 57, + NDF_ALERT_SOURCE = 58, + NDF_ALERT_UNITS = 59, + NDF_ALERT_SUMMARY = 60, + NDF_ALERT_INFO = 61, + NDF_ALERT_NOTIFICATION_REALTIME_USEC = 62, + // NDF_ALERT_FLAGS, + + // put new items here + // leave the request URL and the message last + + NDF_REQUEST = 63, // the request we are currently working on + NDF_MESSAGE = 64, // the log message, if any + + // terminator + _NDF_MAX, +} ND_LOG_FIELD_ID; + +typedef enum __attribute__((__packed__)) { + NDFT_UNSET = 0, + NDFT_TXT, + NDFT_STR, + NDFT_BFR, + NDFT_U64, + NDFT_I64, + NDFT_DBL, + NDFT_UUID, + NDFT_CALLBACK, + + // terminator + _NDFT_MAX, +} ND_LOG_STACK_FIELD_TYPE; + +#endif //NETDATA_ND_LOG_COMMON_H diff --git a/src/libnetdata/log/nd_log-config.c b/src/libnetdata/log/nd_log-config.c new file mode 100644 index 00000000000000..c8e17402e74dbc --- /dev/null +++ b/src/libnetdata/log/nd_log-config.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +void nd_log_set_user_settings(ND_LOG_SOURCES source, const char *setting) { + char buf[FILENAME_MAX + 100]; + if(setting && *setting) + strncpyz(buf, setting, sizeof(buf) - 1); + else + buf[0] = '\0'; + + struct nd_log_source *ls = &nd_log.sources[source]; + char *output = strrchr(buf, '@'); + + if(!output) + // all of it is the output + output = buf; + else { + // we found an '@', the next char is the output + *output = '\0'; + output++; + + // parse the other params + char *remaining = buf; + while(remaining) { + char *value = strsep_skip_consecutive_separators(&remaining, ","); + if (!value || !*value) continue; + + char *name = strsep_skip_consecutive_separators(&value, "="); + if (!name || !*name) continue; + + if(strcmp(name, "logfmt") == 0) + ls->format = NDLF_LOGFMT; + else if(strcmp(name, "json") == 0) + ls->format = NDLF_JSON; + else if(strcmp(name, "journal") == 0) + ls->format = NDLF_JOURNAL; +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + else if(strcmp(name, ETW_NAME) == 0) + ls->format = NDLF_ETW; +#endif +#if defined(HAVE_WEL) + else if(strcmp(name, WEL_NAME) == 0) + ls->format = NDLF_WEL; +#endif +#endif + else if(strcmp(name, "level") == 0 && value && *value) + ls->min_priority = nd_log_priority2id(value); + else if(strcmp(name, "protection") == 0 && value && *value) { + if(strcmp(value, "off") == 0 || strcmp(value, "none") == 0) { + ls->limits = ND_LOG_LIMITS_UNLIMITED; + ls->limits.counter = 0; + ls->limits.prevented = 0; + } + else { + ls->limits = ND_LOG_LIMITS_DEFAULT; + + char *slash = strchr(value, '/'); + if(slash) { + *slash = '\0'; + slash++; + ls->limits.logs_per_period = ls->limits.logs_per_period_backup = str2u(value); + + int period; + if(!duration_parse_seconds(slash, &period)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Error while parsing period '%s'", slash); + period = ND_LOG_DEFAULT_THROTTLE_PERIOD; + } + + ls->limits.throttle_period = period; + } + else { + ls->limits.logs_per_period = ls->limits.logs_per_period_backup = str2u(value); + ls->limits.throttle_period = ND_LOG_DEFAULT_THROTTLE_PERIOD; + } + } + } + else + nd_log(NDLS_DAEMON, NDLP_ERR, + "Error while parsing configuration of log source '%s'. " + "In config '%s', '%s' is not understood.", + nd_log_id2source(source), setting, name); + } + } + + if(!output || !*output || strcmp(output, "none") == 0 || strcmp(output, "off") == 0) { + ls->method = NDLM_DISABLED; + ls->filename = "/dev/null"; + } + else if(strcmp(output, "journal") == 0) { + ls->method = NDLM_JOURNAL; + ls->filename = NULL; + } +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + else if(strcmp(output, ETW_NAME) == 0) { + ls->method = NDLM_ETW; + ls->filename = NULL; + } +#endif +#if defined(HAVE_WEL) + else if(strcmp(output, WEL_NAME) == 0) { + ls->method = NDLM_WEL; + ls->filename = NULL; + } +#endif +#endif + else if(strcmp(output, "syslog") == 0) { + ls->method = NDLM_SYSLOG; + ls->filename = NULL; + } + else if(strcmp(output, "/dev/null") == 0) { + ls->method = NDLM_DEVNULL; + ls->filename = "/dev/null"; + } + else if(strcmp(output, "system") == 0) { + if(ls->fd == STDERR_FILENO) { + ls->method = NDLM_STDERR; + ls->filename = NULL; + ls->fd = STDERR_FILENO; + } + else { + ls->method = NDLM_STDOUT; + ls->filename = NULL; + ls->fd = STDOUT_FILENO; + } + } + else if(strcmp(output, "stderr") == 0) { + ls->method = NDLM_STDERR; + ls->filename = NULL; + ls->fd = STDERR_FILENO; + } + else if(strcmp(output, "stdout") == 0) { + ls->method = NDLM_STDOUT; + ls->filename = NULL; + ls->fd = STDOUT_FILENO; + } + else { + ls->method = NDLM_FILE; + ls->filename = strdupz(output); + } + +#if defined(NETDATA_INTERNAL_CHECKS) || defined(NETDATA_DEV_MODE) + ls->min_priority = NDLP_DEBUG; +#endif + + if(source == NDLS_COLLECTORS) { + // set the method for the collector processes we will spawn + + ND_LOG_METHOD method = NDLM_STDERR; + ND_LOG_FORMAT format = NDLF_LOGFMT; + ND_LOG_FIELD_PRIORITY priority = ls->min_priority; + + if(IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(ls->method)) { + method = ls->method; + format = ls->format; + } + + nd_setenv("NETDATA_LOG_METHOD", nd_log_id2method(method), 1); + nd_setenv("NETDATA_LOG_FORMAT", nd_log_id2format(format), 1); + nd_setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1); + } +} + +void nd_log_set_priority_level(const char *setting) { + if(!setting || !*setting) + setting = "info"; + + ND_LOG_FIELD_PRIORITY priority = nd_log_priority2id(setting); + +#if defined(NETDATA_INTERNAL_CHECKS) || defined(NETDATA_DEV_MODE) + priority = NDLP_DEBUG; +#endif + + for (size_t i = 0; i < _NDLS_MAX; i++) { + if (i != NDLS_DEBUG) + nd_log.sources[i].min_priority = priority; + } + + // the right one + nd_setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1); +} + +void nd_log_set_facility(const char *facility) { + if(!facility || !*facility) + facility = "daemon"; + + nd_log.syslog.facility = nd_log_facility2id(facility); + nd_setenv("NETDATA_SYSLOG_FACILITY", nd_log_id2facility(nd_log.syslog.facility), 1); +} + +void nd_log_set_flood_protection(size_t logs, time_t period) { + nd_log.sources[NDLS_DAEMON].limits.logs_per_period = + nd_log.sources[NDLS_DAEMON].limits.logs_per_period_backup; + nd_log.sources[NDLS_COLLECTORS].limits.logs_per_period = + nd_log.sources[NDLS_COLLECTORS].limits.logs_per_period_backup = logs; + + nd_log.sources[NDLS_DAEMON].limits.throttle_period = + nd_log.sources[NDLS_COLLECTORS].limits.throttle_period = period; + + char buf[100]; + snprintfz(buf, sizeof(buf), "%" PRIu64, (uint64_t )period); + nd_setenv("NETDATA_ERRORS_THROTTLE_PERIOD", buf, 1); + snprintfz(buf, sizeof(buf), "%" PRIu64, (uint64_t )logs); + nd_setenv("NETDATA_ERRORS_PER_PERIOD", buf, 1); +} diff --git a/src/libnetdata/log/nd_log-field-formatters.c b/src/libnetdata/log/nd_log-field-formatters.c new file mode 100644 index 00000000000000..e1b3c0d08d8a27 --- /dev/null +++ b/src/libnetdata/log/nd_log-field-formatters.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +int64_t log_field_to_int64(struct log_field *lf) { + + // --- FIELD_PARSER_VERSIONS --- + // + // IMPORTANT: + // THERE ARE 6 VERSIONS OF THIS CODE + // + // 1. journal (direct socket API), + // 2. journal (libsystemd API), + // 3. logfmt, + // 4. json, + // 5. convert to uint64 + // 6. convert to int64 + // + // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES + + CLEAN_BUFFER *tmp = NULL; + const char *s = NULL; + + switch(lf->entry.type) { + default: + case NDFT_UUID: + case NDFT_UNSET: + return 0; + + case NDFT_TXT: + s = lf->entry.txt; + break; + + case NDFT_STR: + s = string2str(lf->entry.str); + break; + + case NDFT_BFR: + s = buffer_tostring(lf->entry.bfr); + break; + + case NDFT_CALLBACK: + tmp = buffer_create(0, NULL); + + if(lf->entry.cb.formatter(tmp, lf->entry.cb.formatter_data)) + s = buffer_tostring(tmp); + else + s = NULL; + break; + + case NDFT_U64: + return (int64_t)lf->entry.u64; + + case NDFT_I64: + return (int64_t)lf->entry.i64; + + case NDFT_DBL: + return (int64_t)lf->entry.dbl; + } + + if(s && *s) + return str2ll(s, NULL); + + return 0; +} + +uint64_t log_field_to_uint64(struct log_field *lf) { + + // --- FIELD_PARSER_VERSIONS --- + // + // IMPORTANT: + // THERE ARE 6 VERSIONS OF THIS CODE + // + // 1. journal (direct socket API), + // 2. journal (libsystemd API), + // 3. logfmt, + // 4. json, + // 5. convert to uint64 + // 6. convert to int64 + // + // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES + + CLEAN_BUFFER *tmp = NULL; + const char *s = NULL; + + switch(lf->entry.type) { + default: + case NDFT_UUID: + case NDFT_UNSET: + return 0; + + case NDFT_TXT: + s = lf->entry.txt; + break; + + case NDFT_STR: + s = string2str(lf->entry.str); + break; + + case NDFT_BFR: + s = buffer_tostring(lf->entry.bfr); + break; + + case NDFT_CALLBACK: + tmp = buffer_create(0, NULL); + + if(lf->entry.cb.formatter(tmp, lf->entry.cb.formatter_data)) + s = buffer_tostring(tmp); + else + s = NULL; + break; + + case NDFT_U64: + return lf->entry.u64; + + case NDFT_I64: + return lf->entry.i64; + + case NDFT_DBL: + return (uint64_t) lf->entry.dbl; + } + + if(s && *s) + return str2uint64_t(s, NULL); + + return 0; +} diff --git a/src/libnetdata/log/nd_log-format-json.c b/src/libnetdata/log/nd_log-format-json.c new file mode 100644 index 00000000000000..c25bf19c5dd335 --- /dev/null +++ b/src/libnetdata/log/nd_log-format-json.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +void nd_logger_json(BUFFER *wb, struct log_field *fields, size_t fields_max) { + + // --- FIELD_PARSER_VERSIONS --- + // + // IMPORTANT: + // THERE ARE 6 VERSIONS OF THIS CODE + // + // 1. journal (direct socket API), + // 2. journal (libsystemd API), + // 3. logfmt, + // 4. json, + // 5. convert to uint64 + // 6. convert to int64 + // + // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES + + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + CLEAN_BUFFER *tmp = NULL; + + for (size_t i = 0; i < fields_max; i++) { + if (!fields[i].entry.set || !fields[i].logfmt) + continue; + + const char *key = fields[i].logfmt; + + const char *s = NULL; + switch(fields[i].entry.type) { + case NDFT_TXT: + s = fields[i].entry.txt; + break; + case NDFT_STR: + s = string2str(fields[i].entry.str); + break; + case NDFT_BFR: + s = buffer_tostring(fields[i].entry.bfr); + break; + case NDFT_U64: + buffer_json_member_add_uint64(wb, key, fields[i].entry.u64); + break; + case NDFT_I64: + buffer_json_member_add_int64(wb, key, fields[i].entry.i64); + break; + case NDFT_DBL: + buffer_json_member_add_double(wb, key, fields[i].entry.dbl); + break; + case NDFT_UUID: + if(!uuid_is_null(*fields[i].entry.uuid)) { + char u[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(*fields[i].entry.uuid, u); + buffer_json_member_add_string(wb, key, u); + } + break; + case NDFT_CALLBACK: { + if(!tmp) + tmp = buffer_create(1024, NULL); + else + buffer_flush(tmp); + if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) + s = buffer_tostring(tmp); + else + s = NULL; + } + break; + default: + s = "UNHANDLED"; + break; + } + + if(s && *s) + buffer_json_member_add_string(wb, key, s); + } + + buffer_json_finalize(wb); +} diff --git a/src/libnetdata/log/nd_log-format-logfmt.c b/src/libnetdata/log/nd_log-format-logfmt.c new file mode 100644 index 00000000000000..d65211dfcbc00b --- /dev/null +++ b/src/libnetdata/log/nd_log-format-logfmt.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +static bool needs_quotes_for_logfmt(const char *s) +{ + static bool safe_for_logfmt[256] = { + [' '] = true, ['!'] = true, ['"'] = false, ['#'] = true, ['$'] = true, ['%'] = true, ['&'] = true, + ['\''] = true, ['('] = true, [')'] = true, ['*'] = true, ['+'] = true, [','] = true, ['-'] = true, + ['.'] = true, ['/'] = true, ['0'] = true, ['1'] = true, ['2'] = true, ['3'] = true, ['4'] = true, + ['5'] = true, ['6'] = true, ['7'] = true, ['8'] = true, ['9'] = true, [':'] = true, [';'] = true, + ['<'] = true, ['='] = true, ['>'] = true, ['?'] = true, ['@'] = true, ['A'] = true, ['B'] = true, + ['C'] = true, ['D'] = true, ['E'] = true, ['F'] = true, ['G'] = true, ['H'] = true, ['I'] = true, + ['J'] = true, ['K'] = true, ['L'] = true, ['M'] = true, ['N'] = true, ['O'] = true, ['P'] = true, + ['Q'] = true, ['R'] = true, ['S'] = true, ['T'] = true, ['U'] = true, ['V'] = true, ['W'] = true, + ['X'] = true, ['Y'] = true, ['Z'] = true, ['['] = true, ['\\'] = false, [']'] = true, ['^'] = true, + ['_'] = true, ['`'] = true, ['a'] = true, ['b'] = true, ['c'] = true, ['d'] = true, ['e'] = true, + ['f'] = true, ['g'] = true, ['h'] = true, ['i'] = true, ['j'] = true, ['k'] = true, ['l'] = true, + ['m'] = true, ['n'] = true, ['o'] = true, ['p'] = true, ['q'] = true, ['r'] = true, ['s'] = true, + ['t'] = true, ['u'] = true, ['v'] = true, ['w'] = true, ['x'] = true, ['y'] = true, ['z'] = true, + ['{'] = true, ['|'] = true, ['}'] = true, ['~'] = true, [0x7f] = true, + }; + + if(!*s) + return true; + + while(*s) { + if(*s == '=' || isspace((uint8_t)*s) || !safe_for_logfmt[(uint8_t)*s]) + return true; + + s++; + } + + return false; +} + +static void string_to_logfmt(BUFFER *wb, const char *s) +{ + bool spaces = needs_quotes_for_logfmt(s); + + if(spaces) + buffer_fast_strcat(wb, "\"", 1); + + buffer_json_strcat(wb, s); + + if(spaces) + buffer_fast_strcat(wb, "\"", 1); +} + +void nd_logger_logfmt(BUFFER *wb, struct log_field *fields, size_t fields_max) { + + // --- FIELD_PARSER_VERSIONS --- + // + // IMPORTANT: + // THERE ARE 6 VERSIONS OF THIS CODE + // + // 1. journal (direct socket API), + // 2. journal (libsystemd API), + // 3. logfmt, + // 4. json, + // 5. convert to uint64 + // 6. convert to int64 + // + // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES + + CLEAN_BUFFER *tmp = NULL; + + for (size_t i = 0; i < fields_max; i++) { + if (!fields[i].entry.set || !fields[i].logfmt) + continue; + + const char *key = fields[i].logfmt; + + if(fields[i].annotator) { + const char *s = fields[i].annotator(&fields[i]); + if(!s) continue; + + if(buffer_strlen(wb)) + buffer_fast_strcat(wb, " ", 1); + + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + string_to_logfmt(wb, s); + } + else { + if(buffer_strlen(wb)) + buffer_fast_strcat(wb, " ", 1); + + switch(fields[i].entry.type) { + case NDFT_TXT: + if(*fields[i].entry.txt) { + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + string_to_logfmt(wb, fields[i].entry.txt); + } + break; + case NDFT_STR: + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + string_to_logfmt(wb, string2str(fields[i].entry.str)); + break; + case NDFT_BFR: + if(buffer_strlen(fields[i].entry.bfr)) { + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + string_to_logfmt(wb, buffer_tostring(fields[i].entry.bfr)); + } + break; + case NDFT_U64: + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + buffer_print_uint64(wb, fields[i].entry.u64); + break; + case NDFT_I64: + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + buffer_print_int64(wb, fields[i].entry.i64); + break; + case NDFT_DBL: + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + buffer_print_netdata_double(wb, fields[i].entry.dbl); + break; + case NDFT_UUID: + if(!uuid_is_null(*fields[i].entry.uuid)) { + char u[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(*fields[i].entry.uuid, u); + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + buffer_fast_strcat(wb, u, sizeof(u) - 1); + } + break; + case NDFT_CALLBACK: { + if(!tmp) + tmp = buffer_create(1024, NULL); + else + buffer_flush(tmp); + if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) { + buffer_strcat(wb, key); + buffer_fast_strcat(wb, "=", 1); + string_to_logfmt(wb, buffer_tostring(tmp)); + } + } + break; + default: + buffer_strcat(wb, "UNHANDLED"); + break; + } + } + } +} diff --git a/src/libnetdata/log/nd_log-init.c b/src/libnetdata/log/nd_log-init.c new file mode 100644 index 00000000000000..f1527b7444b313 --- /dev/null +++ b/src/libnetdata/log/nd_log-init.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +// -------------------------------------------------------------------------------------------------------------------- + +__attribute__((constructor)) void initialize_invocation_id(void) { + // check for a NETDATA_INVOCATION_ID + if(uuid_parse_flexi(getenv("NETDATA_INVOCATION_ID"), nd_log.invocation_id) != 0) { + // not found, check for systemd set INVOCATION_ID + if(uuid_parse_flexi(getenv("INVOCATION_ID"), nd_log.invocation_id) != 0) { + // not found, generate a new one + uuid_generate_random(nd_log.invocation_id); + } + } + + char uuid[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(nd_log.invocation_id, uuid); + nd_setenv("NETDATA_INVOCATION_ID", uuid, 1); +} + +// -------------------------------------------------------------------------------------------------------------------- + +void nd_log_initialize_for_external_plugins(const char *name) { + // if we don't run under Netdata, log to stderr, + // otherwise, use the logging method Netdata wants us to use. +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + nd_setenv("NETDATA_LOG_METHOD", ETW_NAME, 0); + nd_setenv("NETDATA_LOG_FORMAT", ETW_NAME, 0); +#elif defined(HAVE_WEL) + nd_setenv("NETDATA_LOG_METHOD", WEL_NAME, 0); + nd_setenv("NETDATA_LOG_FORMAT", WEL_NAME, 0); +#else + nd_setenv("NETDATA_LOG_METHOD", "stderr", 0); + nd_setenv("NETDATA_LOG_FORMAT", "logfmt", 0); +#endif +#else + nd_setenv("NETDATA_LOG_METHOD", "stderr", 0); + nd_setenv("NETDATA_LOG_FORMAT", "logfmt", 0); +#endif + + nd_log.overwrite_process_source = NDLS_COLLECTORS; + program_name = name; + + for(size_t i = 0; i < _NDLS_MAX ;i++) { + nd_log.sources[i].method = STDERR_FILENO; + nd_log.sources[i].fd = -1; + nd_log.sources[i].fp = NULL; + } + + nd_log_set_priority_level(getenv("NETDATA_LOG_LEVEL")); + nd_log_set_facility(getenv("NETDATA_SYSLOG_FACILITY")); + + time_t period = 1200; + size_t logs = 200; + const char *s = getenv("NETDATA_ERRORS_THROTTLE_PERIOD"); + if(s && *s >= '0' && *s <= '9') { + period = str2l(s); + if(period < 0) period = 0; + } + + s = getenv("NETDATA_ERRORS_PER_PERIOD"); + if(s && *s >= '0' && *s <= '9') + logs = str2u(s); + + nd_log_set_flood_protection(logs, period); + + if(!netdata_configured_host_prefix) { + s = getenv("NETDATA_HOST_PREFIX"); + if(s && *s) + netdata_configured_host_prefix = (char *)s; + } + + ND_LOG_METHOD method = nd_log_method2id(getenv("NETDATA_LOG_METHOD")); + ND_LOG_FORMAT format = nd_log_format2id(getenv("NETDATA_LOG_FORMAT")); + + if(!IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(method)) { + if(is_stderr_connected_to_journal()) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "NETDATA_LOG_METHOD is not set. Using journal."); + method = NDLM_JOURNAL; + } + else { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "NETDATA_LOG_METHOD is not set. Using stderr."); + method = NDLM_STDERR; + } + } + + switch(method) { + case NDLM_JOURNAL: + if(!nd_log_journal_direct_init(getenv("NETDATA_SYSTEMD_JOURNAL_PATH")) || + !nd_log_journal_direct_init(NULL) || !nd_log_journal_systemd_init()) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to initialize journal. Using stderr."); + method = NDLM_STDERR; + } + break; + +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + case NDLM_ETW: + if(!nd_log_init_etw()) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to initialize Events Tracing for Windows (ETW). Using stderr."); + method = NDLM_STDERR; + } + break; +#endif +#if defined(HAVE_WEL) + case NDLM_WEL: + if(!nd_log_init_wel()) { + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to initialize Windows Event Log (WEL). Using stderr."); + method = NDLM_STDERR; + } + break; +#endif +#endif + + case NDLM_SYSLOG: + nd_log_init_syslog(); + break; + + default: + method = NDLM_STDERR; + break; + } + + for(size_t i = 0; i < _NDLS_MAX ;i++) { + nd_log.sources[i].method = method; + nd_log.sources[i].format = format; + nd_log.sources[i].fd = -1; + nd_log.sources[i].fp = NULL; + } + + // nd_log(NDLS_COLLECTORS, NDLP_NOTICE, "FINAL_LOG_METHOD: %s", nd_log_id2method(method)); +} + +// -------------------------------------------------------------------------------------------------------------------- + +void nd_log_open(struct nd_log_source *e, ND_LOG_SOURCES source) { + if(e->method == NDLM_DEFAULT) + nd_log_set_user_settings(source, e->filename); + + if((e->method == NDLM_FILE && !e->filename) || + (e->method == NDLM_DEVNULL && e->fd == -1)) + e->method = NDLM_DISABLED; + + if(e->fp) + fflush(e->fp); + + switch(e->method) { + case NDLM_SYSLOG: + nd_log_init_syslog(); + break; + + case NDLM_JOURNAL: + nd_log_journal_direct_init(NULL); + nd_log_journal_systemd_init(); + break; + +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + case NDLM_ETW: + nd_log_init_etw(); + break; +#endif +#if defined(HAVE_WEL) + case NDLM_WEL: + nd_log_init_wel(); + break; +#endif +#endif + + case NDLM_STDOUT: + e->fp = stdout; + e->fd = STDOUT_FILENO; + break; + + case NDLM_DISABLED: + break; + + case NDLM_DEFAULT: + case NDLM_STDERR: + e->method = NDLM_STDERR; + e->fp = stderr; + e->fd = STDERR_FILENO; + break; + + case NDLM_DEVNULL: + case NDLM_FILE: { + int fd = open(e->filename, O_WRONLY | O_APPEND | O_CREAT, 0664); + if(fd == -1) { + if(e->fd != STDOUT_FILENO && e->fd != STDERR_FILENO) { + e->fd = STDERR_FILENO; + e->method = NDLM_STDERR; + netdata_log_error("Cannot open log file '%s'. Falling back to stderr.", e->filename); + } + else + netdata_log_error("Cannot open log file '%s'. Leaving fd %d as-is.", e->filename, e->fd); + } + else { + if (!nd_log_replace_existing_fd(e, fd)) { + if(e->fd == STDOUT_FILENO || e->fd == STDERR_FILENO) { + if(e->fd == STDOUT_FILENO) + e->method = NDLM_STDOUT; + else if(e->fd == STDERR_FILENO) + e->method = NDLM_STDERR; + + // we have dup2() fd, so we can close the one we opened + if(fd != STDOUT_FILENO && fd != STDERR_FILENO) + close(fd); + } + else + e->fd = fd; + } + } + + // at this point we have e->fd set properly + + if(e->fd == STDOUT_FILENO) + e->fp = stdout; + else if(e->fd == STDERR_FILENO) + e->fp = stderr; + + if(!e->fp) { + e->fp = fdopen(e->fd, "a"); + if (!e->fp) { + netdata_log_error("Cannot fdopen() fd %d ('%s')", e->fd, e->filename); + + if(e->fd != STDOUT_FILENO && e->fd != STDERR_FILENO) + close(e->fd); + + e->fp = stderr; + e->fd = STDERR_FILENO; + } + } + else { + if (setvbuf(e->fp, NULL, _IOLBF, 0) != 0) + netdata_log_error("Cannot set line buffering on fd %d ('%s')", e->fd, e->filename); + } + } + break; + } +} + +// -------------------------------------------------------------------------------------------------------------------- + +void nd_log_stdin_init(int fd, const char *filename) { + int f = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0664); + if(f == -1) + return; + + if(f != fd) { + dup2(f, fd); + close(f); + } +} + +void nd_log_initialize(void) { + nd_log_stdin_init(STDIN_FILENO, "/dev/null"); + + for(size_t i = 0 ; i < _NDLS_MAX ; i++) + nd_log_open(&nd_log.sources[i], i); +} + +void nd_log_reopen_log_files(bool log) { + if(log) + netdata_log_info("Reopening all log files."); + + nd_log.std_output.initialized = false; + nd_log.std_error.initialized = false; + nd_log.journal_direct.initialized = false; + nd_log.journal.initialized = false; + nd_log_initialize(); + + if(log) + netdata_log_info("Log files re-opened."); +} + +void nd_log_reopen_log_files_for_spawn_server(void) { + gettid_uncached(); + + if(nd_log.syslog.initialized) { + closelog(); + nd_log.syslog.initialized = false; + nd_log_init_syslog(); + } + + if(nd_log.journal_direct.initialized) { + close(nd_log.journal_direct.fd); + nd_log.journal_direct.fd = -1; + nd_log.journal_direct.initialized = false; + nd_log_journal_direct_init(NULL); + } + + nd_log.sources[NDLS_UNSET].method = NDLM_DISABLED; + nd_log.sources[NDLS_ACCESS].method = NDLM_DISABLED; + nd_log.sources[NDLS_ACLK].method = NDLM_DISABLED; + nd_log.sources[NDLS_DEBUG].method = NDLM_DISABLED; + nd_log.sources[NDLS_HEALTH].method = NDLM_DISABLED; + nd_log_reopen_log_files(false); +} + diff --git a/src/libnetdata/log/nd_log-internals.c b/src/libnetdata/log/nd_log-internals.c new file mode 100644 index 00000000000000..cb26b816e9f149 --- /dev/null +++ b/src/libnetdata/log/nd_log-internals.c @@ -0,0 +1,821 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +// -------------------------------------------------------------------------------------------------------------------- +// workaround strerror_r() + +#if defined(STRERROR_R_CHAR_P) +// GLIBC version of strerror_r +static const char *strerror_result(const char *a, const char *b) { (void)b; return a; } +#elif defined(HAVE_STRERROR_R) +// POSIX version of strerror_r +static const char *strerror_result(int a, const char *b) { (void)a; return b; } +#elif defined(HAVE_C__GENERIC) + +// what a trick! +// http://stackoverflow.com/questions/479207/function-overloading-in-c +static const char *strerror_result_int(int a, const char *b) { (void)a; return b; } +static const char *strerror_result_string(const char *a, const char *b) { (void)b; return a; } + +#define strerror_result(a, b) _Generic((a), \ + int: strerror_result_int, \ + char *: strerror_result_string \ + )(a, b) + +#else +#error "cannot detect the format of function strerror_r()" +#endif + +const char *errno2str(int errnum, char *buf, size_t size) { + return strerror_result(strerror_r(errnum, buf, size), buf); +} + +// -------------------------------------------------------------------------------------------------------------------- +// logging method + +static struct { + ND_LOG_METHOD method; + const char *name; +} nd_log_methods[] = { + { .method = NDLM_DISABLED, .name = "none" }, + { .method = NDLM_DEVNULL, .name = "/dev/null" }, + { .method = NDLM_DEFAULT, .name = "default" }, + { .method = NDLM_JOURNAL, .name = "journal" }, + { .method = NDLM_SYSLOG, .name = "syslog" }, + { .method = NDLM_STDOUT, .name = "stdout" }, + { .method = NDLM_STDERR, .name = "stderr" }, + { .method = NDLM_FILE, .name = "file" }, +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + { .method = NDLM_ETW, .name = ETW_NAME }, +#endif +#if defined(HAVE_WEL) + { .method = NDLM_WEL, .name = WEL_NAME }, +#endif +#endif +}; + +ND_LOG_METHOD nd_log_method2id(const char *method) { + if(!method || !*method) + return NDLM_DEFAULT; + + size_t entries = sizeof(nd_log_methods) / sizeof(nd_log_methods[0]); + for(size_t i = 0; i < entries ;i++) { + if(strcmp(nd_log_methods[i].name, method) == 0) + return nd_log_methods[i].method; + } + + return NDLM_FILE; +} + +const char *nd_log_id2method(ND_LOG_METHOD method) { + size_t entries = sizeof(nd_log_methods) / sizeof(nd_log_methods[0]); + for(size_t i = 0; i < entries ;i++) { + if(method == nd_log_methods[i].method) + return nd_log_methods[i].name; + } + + return "unknown"; +} + +const char *nd_log_method_for_external_plugins(const char *s) { + if(s && *s) { + ND_LOG_METHOD method = nd_log_method2id(s); + if(IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(method)) + return nd_log_id2method(method); + } + + return nd_log_id2method(NDLM_STDERR); +} + +// -------------------------------------------------------------------------------------------------------------------- +// facilities +// +// sys/syslog.h (Linux) +// sys/sys/syslog.h (FreeBSD) +// bsd/sys/syslog.h (darwin-xnu) + +static struct { + int facility; + const char *name; +} nd_log_facilities[] = { + { LOG_AUTH, "auth" }, + { LOG_AUTHPRIV, "authpriv" }, + { LOG_CRON, "cron" }, + { LOG_DAEMON, "daemon" }, + { LOG_FTP, "ftp" }, + { LOG_KERN, "kern" }, + { LOG_LPR, "lpr" }, + { LOG_MAIL, "mail" }, + { LOG_NEWS, "news" }, + { LOG_SYSLOG, "syslog" }, + { LOG_USER, "user" }, + { LOG_UUCP, "uucp" }, + { LOG_LOCAL0, "local0" }, + { LOG_LOCAL1, "local1" }, + { LOG_LOCAL2, "local2" }, + { LOG_LOCAL3, "local3" }, + { LOG_LOCAL4, "local4" }, + { LOG_LOCAL5, "local5" }, + { LOG_LOCAL6, "local6" }, + { LOG_LOCAL7, "local7" }, + +#ifdef __FreeBSD__ + { LOG_CONSOLE, "console" }, + { LOG_NTP, "ntp" }, + + // FreeBSD does not consider 'security' as deprecated. + { LOG_SECURITY, "security" }, +#else + // For all other O/S 'security' is mapped to 'auth'. + { LOG_AUTH, "security" }, +#endif + +#ifdef __APPLE__ + { LOG_INSTALL, "install" }, + { LOG_NETINFO, "netinfo" }, + { LOG_RAS, "ras" }, + { LOG_REMOTEAUTH, "remoteauth" }, + { LOG_LAUNCHD, "launchd" }, + +#endif +}; + +int nd_log_facility2id(const char *facility) { + size_t entries = sizeof(nd_log_facilities) / sizeof(nd_log_facilities[0]); + for(size_t i = 0; i < entries ;i++) { + if(strcmp(nd_log_facilities[i].name, facility) == 0) + return nd_log_facilities[i].facility; + } + + return LOG_DAEMON; +} + +const char *nd_log_id2facility(int facility) { + size_t entries = sizeof(nd_log_facilities) / sizeof(nd_log_facilities[0]); + for(size_t i = 0; i < entries ;i++) { + if(nd_log_facilities[i].facility == facility) + return nd_log_facilities[i].name; + } + + return "daemon"; +} + +// -------------------------------------------------------------------------------------------------------------------- +// priorities + +static struct { + ND_LOG_FIELD_PRIORITY priority; + const char *name; +} nd_log_priorities[] = { + { .priority = NDLP_EMERG, .name = "emergency" }, + { .priority = NDLP_EMERG, .name = "emerg" }, + { .priority = NDLP_ALERT, .name = "alert" }, + { .priority = NDLP_CRIT, .name = "critical" }, + { .priority = NDLP_CRIT, .name = "crit" }, + { .priority = NDLP_ERR, .name = "error" }, + { .priority = NDLP_ERR, .name = "err" }, + { .priority = NDLP_WARNING, .name = "warning" }, + { .priority = NDLP_WARNING, .name = "warn" }, + { .priority = NDLP_NOTICE, .name = "notice" }, + { .priority = NDLP_INFO, .name = NDLP_INFO_STR }, + { .priority = NDLP_DEBUG, .name = "debug" }, +}; + +int nd_log_priority2id(const char *priority) { + size_t entries = sizeof(nd_log_priorities) / sizeof(nd_log_priorities[0]); + for(size_t i = 0; i < entries ;i++) { + if(strcmp(nd_log_priorities[i].name, priority) == 0) + return nd_log_priorities[i].priority; + } + + return NDLP_INFO; +} + +const char *nd_log_id2priority(ND_LOG_FIELD_PRIORITY priority) { + size_t entries = sizeof(nd_log_priorities) / sizeof(nd_log_priorities[0]); + for(size_t i = 0; i < entries ;i++) { + if(priority == nd_log_priorities[i].priority) + return nd_log_priorities[i].name; + } + + return NDLP_INFO_STR; +} + +// -------------------------------------------------------------------------------------------------------------------- +// log sources + +const char *nd_log_sources[] = { + [NDLS_UNSET] = "UNSET", + [NDLS_ACCESS] = "access", + [NDLS_ACLK] = "aclk", + [NDLS_COLLECTORS] = "collector", + [NDLS_DAEMON] = "daemon", + [NDLS_HEALTH] = "health", + [NDLS_DEBUG] = "debug", +}; + +size_t nd_log_source2id(const char *source, ND_LOG_SOURCES def) { + size_t entries = sizeof(nd_log_sources) / sizeof(nd_log_sources[0]); + for(size_t i = 0; i < entries ;i++) { + if(strcmp(nd_log_sources[i], source) == 0) + return i; + } + + return def; +} + + +const char *nd_log_id2source(ND_LOG_SOURCES source) { + size_t entries = sizeof(nd_log_sources) / sizeof(nd_log_sources[0]); + if(source < entries) + return nd_log_sources[source]; + + return nd_log_sources[NDLS_COLLECTORS]; +} + +// -------------------------------------------------------------------------------------------------------------------- +// log output formats + +static struct { + ND_LOG_FORMAT format; + const char *name; +} nd_log_formats[] = { + { .format = NDLF_JOURNAL, .name = "journal" }, + { .format = NDLF_LOGFMT, .name = "logfmt" }, + { .format = NDLF_JSON, .name = "json" }, +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + { .format = NDLF_ETW, .name = ETW_NAME }, +#endif +#if defined(HAVE_WEL) + { .format = NDLF_WEL, .name = WEL_NAME }, +#endif +#endif +}; + +ND_LOG_FORMAT nd_log_format2id(const char *format) { + if(!format || !*format) + return NDLF_LOGFMT; + + size_t entries = sizeof(nd_log_formats) / sizeof(nd_log_formats[0]); + for(size_t i = 0; i < entries ;i++) { + if(strcmp(nd_log_formats[i].name, format) == 0) + return nd_log_formats[i].format; + } + + return NDLF_LOGFMT; +} + +const char *nd_log_id2format(ND_LOG_FORMAT format) { + size_t entries = sizeof(nd_log_formats) / sizeof(nd_log_formats[0]); + for(size_t i = 0; i < entries ;i++) { + if(format == nd_log_formats[i].format) + return nd_log_formats[i].name; + } + + return "logfmt"; +} + +// -------------------------------------------------------------------------------------------------------------------- + +struct nd_log nd_log = { + .overwrite_process_source = 0, + .journal = { + .initialized = false, + }, + .journal_direct = { + .initialized = false, + .fd = -1, + }, + .syslog = { + .initialized = false, + .facility = LOG_DAEMON, + }, +#if defined(OS_WINDOWS) + .eventlog = { + .initialized = false, + }, +#endif + .std_output = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .initialized = false, + }, + .std_error = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .initialized = false, + }, + .sources = { + [NDLS_UNSET] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_DISABLED, + .format = NDLF_JOURNAL, + .filename = NULL, + .fd = -1, + .fp = NULL, + .min_priority = NDLP_EMERG, + .limits = ND_LOG_LIMITS_UNLIMITED, + }, + [NDLS_ACCESS] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_DEFAULT, + .format = NDLF_LOGFMT, + .filename = LOG_DIR "/access.log", + .fd = -1, + .fp = NULL, + .min_priority = NDLP_DEBUG, + .limits = ND_LOG_LIMITS_UNLIMITED, + }, + [NDLS_ACLK] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_FILE, + .format = NDLF_LOGFMT, + .filename = LOG_DIR "/aclk.log", + .fd = -1, + .fp = NULL, + .min_priority = NDLP_DEBUG, + .limits = ND_LOG_LIMITS_UNLIMITED, + }, + [NDLS_COLLECTORS] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_DEFAULT, + .format = NDLF_LOGFMT, + .filename = LOG_DIR "/collector.log", + .fd = STDERR_FILENO, + .fp = NULL, + .min_priority = NDLP_INFO, + .limits = ND_LOG_LIMITS_DEFAULT, + }, + [NDLS_DEBUG] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_DISABLED, + .format = NDLF_LOGFMT, + .filename = LOG_DIR "/debug.log", + .fd = STDOUT_FILENO, + .fp = NULL, + .min_priority = NDLP_DEBUG, + .limits = ND_LOG_LIMITS_UNLIMITED, + }, + [NDLS_DAEMON] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_DEFAULT, + .filename = LOG_DIR "/daemon.log", + .format = NDLF_LOGFMT, + .fd = -1, + .fp = NULL, + .min_priority = NDLP_INFO, + .limits = ND_LOG_LIMITS_DEFAULT, + }, + [NDLS_HEALTH] = { + .spinlock = NETDATA_SPINLOCK_INITIALIZER, + .method = NDLM_DEFAULT, + .format = NDLF_LOGFMT, + .filename = LOG_DIR "/health.log", + .fd = -1, + .fp = NULL, + .min_priority = NDLP_DEBUG, + .limits = ND_LOG_LIMITS_UNLIMITED, + }, + }, +}; + +// -------------------------------------------------------------------------------------------------------------------- + +__thread struct log_stack_entry *thread_log_stack_base[THREAD_LOG_STACK_MAX]; +__thread size_t thread_log_stack_next = 0; +__thread struct log_field thread_log_fields[_NDF_MAX] = { + // THE ORDER HERE IS IRRELEVANT (but keep them sorted by their number) + + [NDF_STOP] = { // processing will not stop on this - so it is ok to be first + .journal = NULL, + .logfmt = NULL, + .eventlog = NULL, + .annotator = NULL, + }, + [NDF_TIMESTAMP_REALTIME_USEC] = { + .journal = NULL, + .eventlog = "Timestamp", + .logfmt = "time", + .annotator = timestamp_usec_annotator, + }, + [NDF_SYSLOG_IDENTIFIER] = { + .journal = "SYSLOG_IDENTIFIER", // standard journald field + .eventlog = "Program", + .logfmt = "comm", + }, + [NDF_LOG_SOURCE] = { + .journal = "ND_LOG_SOURCE", + .eventlog = "NetdataLogSource", + .logfmt = "source", + }, + [NDF_PRIORITY] = { + .journal = "PRIORITY", // standard journald field + .eventlog = "Level", + .logfmt = "level", + .annotator = priority_annotator, + }, + [NDF_ERRNO] = { + .journal = "ERRNO", // standard journald field + .eventlog = "UnixErrno", + .logfmt = "errno", + .annotator = errno_annotator, + }, + [NDF_WINERROR] = { +#if defined(OS_WINDOWS) + .journal = "WINERROR", + .eventlog = "WindowsLastError", + .logfmt = "winerror", + .annotator = winerror_annotator, +#endif + }, + [NDF_INVOCATION_ID] = { + .journal = "INVOCATION_ID", // standard journald field + .eventlog = "InvocationID", + .logfmt = NULL, + }, + [NDF_LINE] = { + .journal = "CODE_LINE", // standard journald field + .eventlog = "CodeLine", + .logfmt = NULL, + }, + [NDF_FILE] = { + .journal = "CODE_FILE", // standard journald field + .eventlog = "CodeFile", + .logfmt = NULL, + }, + [NDF_FUNC] = { + .journal = "CODE_FUNC", // standard journald field + .eventlog = "CodeFunction", + .logfmt = NULL, + }, + [NDF_TID] = { + .journal = "TID", // standard journald field + .eventlog = "ThreadID", + .logfmt = "tid", + }, + [NDF_THREAD_TAG] = { + .journal = "THREAD_TAG", + .eventlog = "ThreadName", + .logfmt = "thread", + }, + [NDF_MESSAGE_ID] = { + .journal = "MESSAGE_ID", + .eventlog = "MessageID", + .logfmt = "msg_id", + }, + [NDF_MODULE] = { + .journal = "ND_MODULE", + .eventlog = "Module", + .logfmt = "module", + }, + [NDF_NIDL_NODE] = { + .journal = "ND_NIDL_NODE", + .eventlog = "Node", + .logfmt = "node", + }, + [NDF_NIDL_INSTANCE] = { + .journal = "ND_NIDL_INSTANCE", + .eventlog = "Instance", + .logfmt = "instance", + }, + [NDF_NIDL_CONTEXT] = { + .journal = "ND_NIDL_CONTEXT", + .eventlog = "Context", + .logfmt = "context", + }, + [NDF_NIDL_DIMENSION] = { + .journal = "ND_NIDL_DIMENSION", + .eventlog = "Dimension", + .logfmt = "dimension", + }, + [NDF_SRC_TRANSPORT] = { + .journal = "ND_SRC_TRANSPORT", + .eventlog = "SourceTransport", + .logfmt = "src_transport", + }, + [NDF_ACCOUNT_ID] = { + .journal = "ND_ACCOUNT_ID", + .eventlog = "AccountID", + .logfmt = "account", + }, + [NDF_USER_NAME] = { + .journal = "ND_USER_NAME", + .eventlog = "UserName", + .logfmt = "user", + }, + [NDF_USER_ROLE] = { + .journal = "ND_USER_ROLE", + .eventlog = "UserRole", + .logfmt = "role", + }, + [NDF_USER_ACCESS] = { + .journal = "ND_USER_PERMISSIONS", + .eventlog = "UserPermissions", + .logfmt = "permissions", + }, + [NDF_SRC_IP] = { + .journal = "ND_SRC_IP", + .eventlog = "SourceIP", + .logfmt = "src_ip", + }, + [NDF_SRC_FORWARDED_HOST] = { + .journal = "ND_SRC_FORWARDED_HOST", + .eventlog = "SourceForwardedHost", + .logfmt = "src_forwarded_host", + }, + [NDF_SRC_FORWARDED_FOR] = { + .journal = "ND_SRC_FORWARDED_FOR", + .eventlog = "SourceForwardedFor", + .logfmt = "src_forwarded_for", + }, + [NDF_SRC_PORT] = { + .journal = "ND_SRC_PORT", + .eventlog = "SourcePort", + .logfmt = "src_port", + }, + [NDF_SRC_CAPABILITIES] = { + .journal = "ND_SRC_CAPABILITIES", + .eventlog = "SourceCapabilities", + .logfmt = "src_capabilities", + }, + [NDF_DST_TRANSPORT] = { + .journal = "ND_DST_TRANSPORT", + .eventlog = "DestinationTransport", + .logfmt = "dst_transport", + }, + [NDF_DST_IP] = { + .journal = "ND_DST_IP", + .eventlog = "DestinationIP", + .logfmt = "dst_ip", + }, + [NDF_DST_PORT] = { + .journal = "ND_DST_PORT", + .eventlog = "DestinationPort", + .logfmt = "dst_port", + }, + [NDF_DST_CAPABILITIES] = { + .journal = "ND_DST_CAPABILITIES", + .eventlog = "DestinationCapabilities", + .logfmt = "dst_capabilities", + }, + [NDF_REQUEST_METHOD] = { + .journal = "ND_REQUEST_METHOD", + .eventlog = "RequestMethod", + .logfmt = "req_method", + }, + [NDF_RESPONSE_CODE] = { + .journal = "ND_RESPONSE_CODE", + .eventlog = "ResponseCode", + .logfmt = "code", + }, + [NDF_CONNECTION_ID] = { + .journal = "ND_CONNECTION_ID", + .eventlog = "ConnectionID", + .logfmt = "conn", + }, + [NDF_TRANSACTION_ID] = { + .journal = "ND_TRANSACTION_ID", + .eventlog = "TransactionID", + .logfmt = "transaction", + }, + [NDF_RESPONSE_SENT_BYTES] = { + .journal = "ND_RESPONSE_SENT_BYTES", + .eventlog = "ResponseSentBytes", + .logfmt = "sent_bytes", + }, + [NDF_RESPONSE_SIZE_BYTES] = { + .journal = "ND_RESPONSE_SIZE_BYTES", + .eventlog = "ResponseSizeBytes", + .logfmt = "size_bytes", + }, + [NDF_RESPONSE_PREPARATION_TIME_USEC] = { + .journal = "ND_RESPONSE_PREP_TIME_USEC", + .eventlog = "ResponsePreparationTimeUsec", + .logfmt = "prep_ut", + }, + [NDF_RESPONSE_SENT_TIME_USEC] = { + .journal = "ND_RESPONSE_SENT_TIME_USEC", + .eventlog = "ResponseSentTimeUsec", + .logfmt = "sent_ut", + }, + [NDF_RESPONSE_TOTAL_TIME_USEC] = { + .journal = "ND_RESPONSE_TOTAL_TIME_USEC", + .eventlog = "ResponseTotalTimeUsec", + .logfmt = "total_ut", + }, + [NDF_ALERT_ID] = { + .journal = "ND_ALERT_ID", + .eventlog = "AlertID", + .logfmt = "alert_id", + }, + [NDF_ALERT_UNIQUE_ID] = { + .journal = "ND_ALERT_UNIQUE_ID", + .eventlog = "AlertUniqueID", + .logfmt = "alert_unique_id", + }, + [NDF_ALERT_TRANSITION_ID] = { + .journal = "ND_ALERT_TRANSITION_ID", + .eventlog = "AlertTransitionID", + .logfmt = "alert_transition_id", + }, + [NDF_ALERT_EVENT_ID] = { + .journal = "ND_ALERT_EVENT_ID", + .eventlog = "AlertEventID", + .logfmt = "alert_event_id", + }, + [NDF_ALERT_CONFIG_HASH] = { + .journal = "ND_ALERT_CONFIG", + .eventlog = "AlertConfig", + .logfmt = "alert_config", + }, + [NDF_ALERT_NAME] = { + .journal = "ND_ALERT_NAME", + .eventlog = "AlertName", + .logfmt = "alert", + }, + [NDF_ALERT_CLASS] = { + .journal = "ND_ALERT_CLASS", + .eventlog = "AlertClass", + .logfmt = "alert_class", + }, + [NDF_ALERT_COMPONENT] = { + .journal = "ND_ALERT_COMPONENT", + .eventlog = "AlertComponent", + .logfmt = "alert_component", + }, + [NDF_ALERT_TYPE] = { + .journal = "ND_ALERT_TYPE", + .eventlog = "AlertType", + .logfmt = "alert_type", + }, + [NDF_ALERT_EXEC] = { + .journal = "ND_ALERT_EXEC", + .eventlog = "AlertExec", + .logfmt = "alert_exec", + }, + [NDF_ALERT_RECIPIENT] = { + .journal = "ND_ALERT_RECIPIENT", + .eventlog = "AlertRecipient", + .logfmt = "alert_recipient", + }, + [NDF_ALERT_VALUE] = { + .journal = "ND_ALERT_VALUE", + .eventlog = "AlertValue", + .logfmt = "alert_value", + }, + [NDF_ALERT_VALUE_OLD] = { + .journal = "ND_ALERT_VALUE_OLD", + .eventlog = "AlertOldValue", + .logfmt = "alert_value_old", + }, + [NDF_ALERT_STATUS] = { + .journal = "ND_ALERT_STATUS", + .eventlog = "AlertStatus", + .logfmt = "alert_status", + }, + [NDF_ALERT_STATUS_OLD] = { + .journal = "ND_ALERT_STATUS_OLD", + .eventlog = "AlertOldStatus", + .logfmt = "alert_value_old", + }, + [NDF_ALERT_UNITS] = { + .journal = "ND_ALERT_UNITS", + .eventlog = "AlertUnits", + .logfmt = "alert_units", + }, + [NDF_ALERT_SUMMARY] = { + .journal = "ND_ALERT_SUMMARY", + .eventlog = "AlertSummary", + .logfmt = "alert_summary", + }, + [NDF_ALERT_INFO] = { + .journal = "ND_ALERT_INFO", + .eventlog = "AlertInfo", + .logfmt = "alert_info", + }, + [NDF_ALERT_DURATION] = { + .journal = "ND_ALERT_DURATION", + .eventlog = "AlertDuration", + .logfmt = "alert_duration", + }, + [NDF_ALERT_NOTIFICATION_REALTIME_USEC] = { + .journal = "ND_ALERT_NOTIFICATION_TIMESTAMP_USEC", + .eventlog = "AlertNotificationTime", + .logfmt = "alert_notification_timestamp", + .annotator = timestamp_usec_annotator, + }, + + // put new items here + // leave the request URL and the message last + + [NDF_REQUEST] = { + .journal = "ND_REQUEST", + .eventlog = "Request", + .logfmt = "request", + }, + [NDF_MESSAGE] = { + .journal = "MESSAGE", + .eventlog = "Message", + .logfmt = "msg", + }, +}; + +// -------------------------------------------------------------------------------------------------------------------- + +void log_stack_pop(void *ptr) { + if(!ptr) return; + + struct log_stack_entry *lgs = *(struct log_stack_entry (*)[])ptr; + + if(unlikely(!thread_log_stack_next || lgs != thread_log_stack_base[thread_log_stack_next - 1])) { + fatal("You cannot pop in the middle of the stack, or an item not in the stack"); + return; + } + + thread_log_stack_next--; +} + +void log_stack_push(struct log_stack_entry *lgs) { + if(!lgs || thread_log_stack_next >= THREAD_LOG_STACK_MAX) return; + thread_log_stack_base[thread_log_stack_next++] = lgs; +} + +// -------------------------------------------------------------------------------------------------------------------- + +ND_LOG_FIELD_ID nd_log_field_id_by_journal_name(const char *field, size_t len) { + for(size_t i = 0; i < THREAD_FIELDS_MAX ;i++) { + if(thread_log_fields[i].journal && strlen(thread_log_fields[i].journal) == len && strncmp(field, thread_log_fields[i].journal, len) == 0) + return i; + } + + return NDF_STOP; +} + +// -------------------------------------------------------------------------------------------------------------------- + +int nd_log_health_fd(void) { + if(nd_log.sources[NDLS_HEALTH].method == NDLM_FILE && nd_log.sources[NDLS_HEALTH].fd != -1) + return nd_log.sources[NDLS_HEALTH].fd; + + return STDERR_FILENO; +} + +int nd_log_collectors_fd(void) { + if(nd_log.sources[NDLS_COLLECTORS].method == NDLM_FILE && nd_log.sources[NDLS_COLLECTORS].fd != -1) + return nd_log.sources[NDLS_COLLECTORS].fd; + + return STDERR_FILENO; +} + +// -------------------------------------------------------------------------------------------------------------------- + +void log_date(char *buffer, size_t len, time_t now) { + if(unlikely(!buffer || !len)) + return; + + time_t t = now; + struct tm *tmp, tmbuf; + + tmp = localtime_r(&t, &tmbuf); + + if (unlikely(!tmp)) { + buffer[0] = '\0'; + return; + } + + if (unlikely(strftime(buffer, len, "%Y-%m-%d %H:%M:%S", tmp) == 0)) + buffer[0] = '\0'; + + buffer[len - 1] = '\0'; +} + +// -------------------------------------------------------------------------------------------------------------------- + +bool nd_log_replace_existing_fd(struct nd_log_source *e, int new_fd) { + if(new_fd == -1 || e->fd == -1 || + (e->fd == STDOUT_FILENO && nd_log.std_output.initialized) || + (e->fd == STDERR_FILENO && nd_log.std_error.initialized)) + return false; + + if(new_fd != e->fd) { + int t = dup2(new_fd, e->fd); + + bool ret = true; + if (t == -1) { + netdata_log_error("Cannot dup2() new fd %d to old fd %d for '%s'", new_fd, e->fd, e->filename); + ret = false; + } + else + close(new_fd); + + if(e->fd == STDOUT_FILENO) + nd_log.std_output.initialized = true; + else if(e->fd == STDERR_FILENO) + nd_log.std_error.initialized = true; + + return ret; + } + + return false; +} diff --git a/src/libnetdata/log/nd_log-internals.h b/src/libnetdata/log/nd_log-internals.h new file mode 100644 index 00000000000000..03ac40405f69a3 --- /dev/null +++ b/src/libnetdata/log/nd_log-internals.h @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_ND_LOG_INTERNALS_H +#define NETDATA_ND_LOG_INTERNALS_H + +#include "../libnetdata.h" + +#if defined(OS_WINDOWS) +#include +#endif + +#ifdef __FreeBSD__ +#include +#endif + +#ifdef __APPLE__ +#include +#endif + +#if !defined(ENABLE_SENTRY) && defined(HAVE_BACKTRACE) +#include +#endif + +#ifdef HAVE_SYSTEMD +#include +#endif + +const char *errno2str(int errnum, char *buf, size_t size); + +// -------------------------------------------------------------------------------------------------------------------- +// ND_LOG_METHOD + +typedef enum __attribute__((__packed__)) { + NDLM_DISABLED = 0, + NDLM_DEVNULL, + NDLM_DEFAULT, + NDLM_JOURNAL, + NDLM_SYSLOG, + NDLM_STDOUT, + NDLM_STDERR, + NDLM_FILE, +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + NDLM_ETW, +#endif +#if defined(HAVE_WEL) + NDLM_WEL, +#endif +#endif +} ND_LOG_METHOD; + +// all the log methods are finally mapped to these +#if defined(HAVE_ETW) +#define ETW_CONDITION(ndlo) ((ndlo) == NDLM_ETW) +#else +#define ETW_CONDITION(ndlo) (false) +#endif + +#if defined(HAVE_WEL) +#define WEL_CONDITION(ndlo) ((ndlo) == NDLM_WEL) +#else +#define WEL_CONDITION(ndlo) (false) +#endif + +#define IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(ndlo) ((ndlo) == NDLM_JOURNAL || (ndlo) == NDLM_SYSLOG || (ndlo) == NDLM_STDERR || ETW_CONDITION(ndlo) || WEL_CONDITION(ndlo)) +#define IS_FINAL_LOG_METHOD(ndlo) ((ndlo) == NDLM_FILE || (ndlo) == NDLM_JOURNAL || (ndlo) == NDLM_SYSLOG || ETW_CONDITION(ndlo) || WEL_CONDITION(ndlo)) + +ND_LOG_METHOD nd_log_method2id(const char *method); +const char *nd_log_id2method(ND_LOG_METHOD method); + +// -------------------------------------------------------------------------------------------------------------------- +// ND_LOG_FORMAT + +typedef enum __attribute__((__packed__)) { + NDLF_JOURNAL, + NDLF_LOGFMT, + NDLF_JSON, +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + NDLF_ETW, // Event Tracing for Windows +#endif +#if defined(HAVE_WEL) + NDLF_WEL, // Windows Event Log +#endif +#endif +} ND_LOG_FORMAT; + +#define ETW_NAME "etw" +#define WEL_NAME "wel" + +const char *nd_log_id2format(ND_LOG_FORMAT format); +ND_LOG_FORMAT nd_log_format2id(const char *format); + +size_t nd_log_source2id(const char *source, ND_LOG_SOURCES def); +const char *nd_log_id2source(ND_LOG_SOURCES source); + +const char *nd_log_id2priority(ND_LOG_FIELD_PRIORITY priority); +int nd_log_priority2id(const char *priority); + +const char *nd_log_id2facility(int facility); +int nd_log_facility2id(const char *facility); + +#include "nd_log_limit.h" + +struct nd_log_source { + SPINLOCK spinlock; + ND_LOG_METHOD method; + ND_LOG_FORMAT format; + const char *filename; + int fd; + FILE *fp; + + ND_LOG_FIELD_PRIORITY min_priority; + const char *pending_msg; + struct nd_log_limit limits; + +#if defined(OS_WINDOWS) + ND_LOG_SOURCES source; + HANDLE hEventLog; + USHORT channelID; + UCHAR Opcode; + USHORT Task; + ULONGLONG Keyword; +#endif +}; + +struct nd_log { + nd_uuid_t invocation_id; + + ND_LOG_SOURCES overwrite_process_source; + + struct nd_log_source sources[_NDLS_MAX]; + + struct { + bool initialized; + } journal; + + struct { + bool initialized; + int fd; + char filename[FILENAME_MAX + 1]; + } journal_direct; + + struct { + bool initialized; + int facility; + } syslog; + + struct { + bool etw; // when set use etw, otherwise wel + bool initialized; + } eventlog; + + struct { + SPINLOCK spinlock; + bool initialized; + } std_output; + + struct { + SPINLOCK spinlock; + bool initialized; + } std_error; + +}; + +// -------------------------------------------------------------------------------------------------------------------- + +struct log_field; +typedef const char *(*annotator_t)(struct log_field *lf); + +struct log_field { + const char *journal; + const char *logfmt; + const char *eventlog; + annotator_t annotator; + struct log_stack_entry entry; +}; + +#define THREAD_LOG_STACK_MAX 50 +#define THREAD_FIELDS_MAX (sizeof(thread_log_fields) / sizeof(thread_log_fields[0])) + +extern __thread struct log_stack_entry *thread_log_stack_base[THREAD_LOG_STACK_MAX]; +extern __thread size_t thread_log_stack_next; +extern __thread struct log_field thread_log_fields[_NDF_MAX]; + +// -------------------------------------------------------------------------------------------------------------------- + +extern struct nd_log nd_log; +bool nd_log_replace_existing_fd(struct nd_log_source *e, int new_fd); +void nd_log_open(struct nd_log_source *e, ND_LOG_SOURCES source); +void nd_log_stdin_init(int fd, const char *filename); + +// -------------------------------------------------------------------------------------------------------------------- +// annotators + +struct log_field; +const char *errno_annotator(struct log_field *lf); +const char *priority_annotator(struct log_field *lf); +const char *timestamp_usec_annotator(struct log_field *lf); + +#if defined(OS_WINDOWS) +const char *winerror_annotator(struct log_field *lf); +#endif + +// -------------------------------------------------------------------------------------------------------------------- +// field formatters + +uint64_t log_field_to_uint64(struct log_field *lf); +int64_t log_field_to_int64(struct log_field *lf); + +// -------------------------------------------------------------------------------------------------------------------- +// common text formatters + +void nd_logger_logfmt(BUFFER *wb, struct log_field *fields, size_t fields_max); +void nd_logger_json(BUFFER *wb, struct log_field *fields, size_t fields_max); + +// -------------------------------------------------------------------------------------------------------------------- +// output to syslog + +void nd_log_init_syslog(void); +void nd_log_reset_syslog(void); +bool nd_logger_syslog(int priority, ND_LOG_FORMAT format, struct log_field *fields, size_t fields_max); + +// -------------------------------------------------------------------------------------------------------------------- +// output to systemd-journal + +bool nd_log_journal_systemd_init(void); +bool nd_log_journal_direct_init(const char *path); +bool nd_logger_journal_direct(struct log_field *fields, size_t fields_max); +bool nd_logger_journal_libsystemd(struct log_field *fields, size_t fields_max); + +// -------------------------------------------------------------------------------------------------------------------- +// output to file + +bool nd_logger_file(FILE *fp, ND_LOG_FORMAT format, struct log_field *fields, size_t fields_max); + +// -------------------------------------------------------------------------------------------------------------------- +// output to windows events log + +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) +bool nd_log_init_etw(void); +bool nd_logger_etw(struct nd_log_source *source, struct log_field *fields, size_t fields_max); +#endif +#if defined(HAVE_WEL) +bool nd_log_init_wel(void); +bool nd_logger_wel(struct nd_log_source *source, struct log_field *fields, size_t fields_max); +#endif +#endif + +#endif //NETDATA_ND_LOG_INTERNALS_H diff --git a/src/libnetdata/log/nd_log-to-file.c b/src/libnetdata/log/nd_log-to-file.c new file mode 100644 index 00000000000000..2de76536b1e3f3 --- /dev/null +++ b/src/libnetdata/log/nd_log-to-file.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +void chown_open_file(int fd, uid_t uid, gid_t gid) { + if(fd == -1) return; + + struct stat buf; + + if(fstat(fd, &buf) == -1) { + netdata_log_error("Cannot fstat() fd %d", fd); + return; + } + + if((buf.st_uid != uid || buf.st_gid != gid) && S_ISREG(buf.st_mode)) { + if(fchown(fd, uid, gid) == -1) + netdata_log_error("Cannot fchown() fd %d.", fd); + } +} + +void nd_log_chown_log_files(uid_t uid, gid_t gid) { + for(size_t i = 0 ; i < _NDLS_MAX ; i++) { + if(nd_log.sources[i].fd != -1 && nd_log.sources[i].fd != STDIN_FILENO) + chown_open_file(nd_log.sources[i].fd, uid, gid); + } +} + +bool nd_logger_file(FILE *fp, ND_LOG_FORMAT format, struct log_field *fields, size_t fields_max) { + BUFFER *wb = buffer_create(1024, NULL); + + if(format == NDLF_JSON) + nd_logger_json(wb, fields, fields_max); + else + nd_logger_logfmt(wb, fields, fields_max); + + int r = fprintf(fp, "%s\n", buffer_tostring(wb)); + fflush(fp); + + buffer_free(wb); + return r > 0; +} diff --git a/src/libnetdata/log/nd_log-to-syslog.c b/src/libnetdata/log/nd_log-to-syslog.c new file mode 100644 index 00000000000000..2903bf591711e1 --- /dev/null +++ b/src/libnetdata/log/nd_log-to-syslog.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +void nd_log_init_syslog(void) { + if(nd_log.syslog.initialized) + return; + + openlog(program_name, LOG_PID, nd_log.syslog.facility); + nd_log.syslog.initialized = true; +} + +bool nd_logger_syslog(int priority, ND_LOG_FORMAT format __maybe_unused, struct log_field *fields, size_t fields_max) { + CLEAN_BUFFER *wb = buffer_create(1024, NULL); + + nd_logger_logfmt(wb, fields, fields_max); + syslog(priority, "%s", buffer_tostring(wb)); + + return true; +} diff --git a/src/libnetdata/log/nd_log-to-systemd-journal.c b/src/libnetdata/log/nd_log-to-systemd-journal.c new file mode 100644 index 00000000000000..b574e693cfdd64 --- /dev/null +++ b/src/libnetdata/log/nd_log-to-systemd-journal.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +bool nd_log_journal_systemd_init(void) { +#ifdef HAVE_SYSTEMD + nd_log.journal.initialized = true; +#else + nd_log.journal.initialized = false; +#endif + + return nd_log.journal.initialized; +} + +bool nd_log_journal_socket_available(void) { + if(netdata_configured_host_prefix && *netdata_configured_host_prefix) { + char filename[FILENAME_MAX + 1]; + + snprintfz(filename, sizeof(filename), "%s%s", + netdata_configured_host_prefix, "/run/systemd/journal/socket"); + + if(is_path_unix_socket(filename)) + return true; + } + + return is_path_unix_socket("/run/systemd/journal/socket"); +} + +static void nd_log_journal_direct_set_env(void) { + if(nd_log.sources[NDLS_COLLECTORS].method == NDLM_JOURNAL) + nd_setenv("NETDATA_SYSTEMD_JOURNAL_PATH", nd_log.journal_direct.filename, 1); +} + +bool nd_log_journal_direct_init(const char *path) { + if(nd_log.journal_direct.initialized) { + nd_log_journal_direct_set_env(); + return true; + } + + int fd; + char filename[FILENAME_MAX + 1]; + if(!is_path_unix_socket(path)) { + + journal_construct_path(filename, sizeof(filename), netdata_configured_host_prefix, "netdata"); + if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) { + + journal_construct_path(filename, sizeof(filename), netdata_configured_host_prefix, NULL); + if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) { + + journal_construct_path(filename, sizeof(filename), NULL, "netdata"); + if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) { + + journal_construct_path(filename, sizeof(filename), NULL, NULL); + if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) + return false; + } + } + } + } + else { + snprintfz(filename, sizeof(filename), "%s", path); + fd = journal_direct_fd(filename); + } + + if(fd < 0) + return false; + + nd_log.journal_direct.fd = fd; + nd_log.journal_direct.initialized = true; + + strncpyz(nd_log.journal_direct.filename, filename, sizeof(nd_log.journal_direct.filename) - 1); + nd_log_journal_direct_set_env(); + + return true; +} + +bool nd_logger_journal_libsystemd(struct log_field *fields __maybe_unused, size_t fields_max __maybe_unused) { +#ifdef HAVE_SYSTEMD + + // --- FIELD_PARSER_VERSIONS --- + // + // IMPORTANT: + // THERE ARE 6 VERSIONS OF THIS CODE + // + // 1. journal (direct socket API), + // 2. journal (libsystemd API), + // 3. logfmt, + // 4. json, + // 5. convert to uint64 + // 6. convert to int64 + // + // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES + + struct iovec iov[fields_max]; + int iov_count = 0; + + memset(iov, 0, sizeof(iov)); + + CLEAN_BUFFER *tmp = NULL; + + for (size_t i = 0; i < fields_max; i++) { + if (!fields[i].entry.set || !fields[i].journal) + continue; + + const char *key = fields[i].journal; + char *value = NULL; + int rc = 0; + switch (fields[i].entry.type) { + case NDFT_TXT: + if(*fields[i].entry.txt) + rc = asprintf(&value, "%s=%s", key, fields[i].entry.txt); + break; + case NDFT_STR: + rc = asprintf(&value, "%s=%s", key, string2str(fields[i].entry.str)); + break; + case NDFT_BFR: + if(buffer_strlen(fields[i].entry.bfr)) + rc = asprintf(&value, "%s=%s", key, buffer_tostring(fields[i].entry.bfr)); + break; + case NDFT_U64: + rc = asprintf(&value, "%s=%" PRIu64, key, fields[i].entry.u64); + break; + case NDFT_I64: + rc = asprintf(&value, "%s=%" PRId64, key, fields[i].entry.i64); + break; + case NDFT_DBL: + rc = asprintf(&value, "%s=%f", key, fields[i].entry.dbl); + break; + case NDFT_UUID: + if(!uuid_is_null(*fields[i].entry.uuid)) { + char u[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(*fields[i].entry.uuid, u); + rc = asprintf(&value, "%s=%s", key, u); + } + break; + case NDFT_CALLBACK: { + if(!tmp) + tmp = buffer_create(1024, NULL); + else + buffer_flush(tmp); + if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) + rc = asprintf(&value, "%s=%s", key, buffer_tostring(tmp)); + } + break; + default: + rc = asprintf(&value, "%s=%s", key, "UNHANDLED"); + break; + } + + if (rc != -1 && value) { + iov[iov_count].iov_base = value; + iov[iov_count].iov_len = strlen(value); + iov_count++; + } + } + + int r = sd_journal_sendv(iov, iov_count); + + // Clean up allocated memory + for (int i = 0; i < iov_count; i++) { + if (iov[i].iov_base != NULL) { + free(iov[i].iov_base); + } + } + + return r == 0; +#else + return false; +#endif +} + +bool nd_logger_journal_direct(struct log_field *fields, size_t fields_max) { + if(!nd_log.journal_direct.initialized) + return false; + + // --- FIELD_PARSER_VERSIONS --- + // + // IMPORTANT: + // THERE ARE 6 VERSIONS OF THIS CODE + // + // 1. journal (direct socket API), + // 2. journal (libsystemd API), + // 3. logfmt, + // 4. json, + // 5. convert to uint64 + // 6. convert to int64 + // + // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES + + CLEAN_BUFFER *wb = buffer_create(4096, NULL); + CLEAN_BUFFER *tmp = NULL; + + for (size_t i = 0; i < fields_max; i++) { + if (!fields[i].entry.set || !fields[i].journal) + continue; + + const char *key = fields[i].journal; + + const char *s = NULL; + switch(fields[i].entry.type) { + case NDFT_TXT: + s = fields[i].entry.txt; + break; + case NDFT_STR: + s = string2str(fields[i].entry.str); + break; + case NDFT_BFR: + s = buffer_tostring(fields[i].entry.bfr); + break; + case NDFT_U64: + buffer_strcat(wb, key); + buffer_putc(wb, '='); + buffer_print_uint64(wb, fields[i].entry.u64); + buffer_putc(wb, '\n'); + break; + case NDFT_I64: + buffer_strcat(wb, key); + buffer_putc(wb, '='); + buffer_print_int64(wb, fields[i].entry.i64); + buffer_putc(wb, '\n'); + break; + case NDFT_DBL: + buffer_strcat(wb, key); + buffer_putc(wb, '='); + buffer_print_netdata_double(wb, fields[i].entry.dbl); + buffer_putc(wb, '\n'); + break; + case NDFT_UUID: + if(!uuid_is_null(*fields[i].entry.uuid)) { + char u[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(*fields[i].entry.uuid, u); + buffer_strcat(wb, key); + buffer_putc(wb, '='); + buffer_fast_strcat(wb, u, sizeof(u) - 1); + buffer_putc(wb, '\n'); + } + break; + case NDFT_CALLBACK: { + if(!tmp) + tmp = buffer_create(1024, NULL); + else + buffer_flush(tmp); + if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) + s = buffer_tostring(tmp); + else + s = NULL; + } + break; + default: + s = "UNHANDLED"; + break; + } + + if(s && *s) { + buffer_strcat(wb, key); + if(!strchr(s, '\n')) { + buffer_putc(wb, '='); + buffer_strcat(wb, s); + buffer_putc(wb, '\n'); + } + else { + buffer_putc(wb, '\n'); + size_t size = strlen(s); + uint64_t le_size = htole64(size); + buffer_memcat(wb, &le_size, sizeof(le_size)); + buffer_memcat(wb, s, size); + buffer_putc(wb, '\n'); + } + } + } + + return journal_direct_send(nd_log.journal_direct.fd, buffer_tostring(wb), buffer_strlen(wb)); +} diff --git a/src/libnetdata/log/nd_log-to-windows-common.h b/src/libnetdata/log/nd_log-to-windows-common.h new file mode 100644 index 00000000000000..2b2833ed1da145 --- /dev/null +++ b/src/libnetdata/log/nd_log-to-windows-common.h @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_ND_LOG_TO_WINDOWS_COMMON_H +#define NETDATA_ND_LOG_TO_WINDOWS_COMMON_H + +// Helper macro to create wide string literals +#define WIDEN2(x) L ## x +#define WIDEN(x) WIDEN2(x) + +#define NETDATA_ETW_PROVIDER_GUID_STR "{96c5ca72-9bd8-4634-81e5-000014e7da7a}" +#define NETDATA_ETW_PROVIDER_GUID_STR_W WIDEN(NETDATA_ETW_PROVIDER_GUID) + +#define NETDATA_CHANNEL_NAME "Netdata" +#define NETDATA_CHANNEL_NAME_W WIDEN(NETDATA_CHANNEL_NAME) + +#define NETDATA_WEL_CHANNEL_NAME "NetdataWEL" +#define NETDATA_WEL_CHANNEL_NAME_W WIDEN(NETDATA_WEL_CHANNEL_NAME) + +#define NETDATA_ETW_CHANNEL_NAME "Netdata" +#define NETDATA_ETW_CHANNEL_NAME_W WIDEN(NETDATA_ETW_CHANNEL_NAME) + +#define NETDATA_ETW_PROVIDER_NAME "Netdata" +#define NETDATA_ETW_PROVIDER_NAME_W WIDEN(NETDATA_ETW_PROVIDER_NAME) + +#define NETDATA_WEL_PROVIDER_PREFIX "Netdata" +#define NETDATA_WEL_PROVIDER_PREFIX_W WIDEN(NETDATA_WEL_PROVIDER_PREFIX) + +#define NETDATA_WEL_PROVIDER_ACCESS NETDATA_WEL_PROVIDER_PREFIX "Access" +#define NETDATA_WEL_PROVIDER_ACCESS_W WIDEN(NETDATA_WEL_PROVIDER_ACCESS) + +#define NETDATA_WEL_PROVIDER_ACLK NETDATA_WEL_PROVIDER_PREFIX "Aclk" +#define NETDATA_WEL_PROVIDER_ACLK_W WIDEN(NETDATA_WEL_PROVIDER_ACLK) + +#define NETDATA_WEL_PROVIDER_COLLECTORS NETDATA_WEL_PROVIDER_PREFIX "Collectors" +#define NETDATA_WEL_PROVIDER_COLLECTORS_W WIDEN(NETDATA_WEL_PROVIDER_COLLECTORS) + +#define NETDATA_WEL_PROVIDER_DAEMON NETDATA_WEL_PROVIDER_PREFIX "Daemon" +#define NETDATA_WEL_PROVIDER_DAEMON_W WIDEN(NETDATA_WEL_PROVIDER_DAEMON) + +#define NETDATA_WEL_PROVIDER_HEALTH NETDATA_WEL_PROVIDER_PREFIX "Health" +#define NETDATA_WEL_PROVIDER_HEALTH_W WIDEN(NETDATA_WEL_PROVIDER_HEALTH) + + +#define NETDATA_ETW_SUBCHANNEL_ACCESS "Access" +#define NETDATA_ETW_SUBCHANNEL_ACCESS_W WIDEN(NETDATA_ETW_SUBCHANNEL_ACCESS) + +#define NETDATA_ETW_SUBCHANNEL_ACLK "Aclk" +#define NETDATA_ETW_SUBCHANNEL_ACLK_W WIDEN(NETDATA_ETW_SUBCHANNEL_ACLK) + +#define NETDATA_ETW_SUBCHANNEL_COLLECTORS "Collectors" +#define NETDATA_ETW_SUBCHANNEL_COLLECTORS_W WIDEN(NETDATA_ETW_SUBCHANNEL_COLLECTORS) + +#define NETDATA_ETW_SUBCHANNEL_DAEMON "Daemon" +#define NETDATA_ETW_SUBCHANNEL_DAEMON_W WIDEN(NETDATA_ETW_SUBCHANNEL_DAEMON) + +#define NETDATA_ETW_SUBCHANNEL_HEALTH "Health" +#define NETDATA_ETW_SUBCHANNEL_HEALTH_W WIDEN(NETDATA_ETW_SUBCHANNEL_HEALTH) + +// Define shift values +#define EVENT_ID_SEV_SHIFT 30 +#define EVENT_ID_C_SHIFT 29 +#define EVENT_ID_R_SHIFT 28 +#define EVENT_ID_FACILITY_SHIFT 16 +#define EVENT_ID_CODE_SHIFT 0 + +#define EVENT_ID_PRIORITY_SHIFT 0 // Shift 0 bits +#define EVENT_ID_SOURCE_SHIFT 4 // Shift 4 bits + +// Define masks +#define EVENT_ID_SEV_MASK 0xC0000000 // Bits 31-30 +#define EVENT_ID_C_MASK 0x20000000 // Bit 29 +#define EVENT_ID_R_MASK 0x10000000 // Bit 28 +#define EVENT_ID_FACILITY_MASK 0x0FFF0000 // Bits 27-16 +#define EVENT_ID_CODE_MASK 0x0000FFFF // Bits 15-0 + +#define EVENT_ID_PRIORITY_MASK 0x000F // Bits 0-3 +#define EVENT_ID_SOURCE_MASK 0x00F0 // Bits 4-7 + +typedef enum __attribute__((packed)) { + MSGID_MESSAGE_ONLY = 1, + MSGID_MESSAGE_ERRNO, + MSGID_REQUEST_ONLY, + MSGID_ALERT_TRANSITION, + MSGID_ACCESS, + MSGID_ACCESS_FORWARDER, + MSGID_ACCESS_USER, + MSGID_ACCESS_FORWARDER_USER, + MSGID_ACCESS_MESSAGE, + MSGID_ACCESS_MESSAGE_REQUEST, + MSGID_ACCESS_MESSAGE_USER, + + // terminator + _MSGID_MAX, +} MESSAGE_ID; + +static inline uint32_t get_event_type_from_priority(ND_LOG_FIELD_PRIORITY priority) { + switch (priority) { + case NDLP_EMERG: + case NDLP_ALERT: + case NDLP_CRIT: + case NDLP_ERR: + return EVENTLOG_ERROR_TYPE; + + case NDLP_WARNING: + return EVENTLOG_WARNING_TYPE; + + case NDLP_NOTICE: + case NDLP_INFO: + case NDLP_DEBUG: + default: + return EVENTLOG_INFORMATION_TYPE; + } +} + +static inline uint8_t get_severity_from_priority(ND_LOG_FIELD_PRIORITY priority) { + switch (priority) { + case NDLP_EMERG: + case NDLP_ALERT: + case NDLP_CRIT: + case NDLP_ERR: + return STATUS_SEVERITY_ERROR; + + case NDLP_WARNING: + return STATUS_SEVERITY_WARNING; + + case NDLP_NOTICE: + case NDLP_INFO: + case NDLP_DEBUG: + default: + return STATUS_SEVERITY_INFORMATIONAL; + } +} + +static inline uint8_t get_level_from_priority(ND_LOG_FIELD_PRIORITY priority) { + switch (priority) { + // return 0 = log an event regardless of any filtering applied + + case NDLP_EMERG: + case NDLP_ALERT: + case NDLP_CRIT: + return 1; + + case NDLP_ERR: + return 2; + + case NDLP_WARNING: + return 3; + + case NDLP_NOTICE: + case NDLP_INFO: + return 4; + + case NDLP_DEBUG: + default: + return 5; + } +} + +static inline const char *get_level_from_priority_str(ND_LOG_FIELD_PRIORITY priority) { + switch (priority) { + // return "win:LogAlways" to log an event regardless of any filtering applied + + case NDLP_EMERG: + case NDLP_ALERT: + case NDLP_CRIT: + return "win:Critical"; + + case NDLP_ERR: + return "win:Error"; + + case NDLP_WARNING: + return "win:Warning"; + + case NDLP_NOTICE: + case NDLP_INFO: + return "win:Informational"; + + case NDLP_DEBUG: + default: + return "win:Verbose"; + } +} + +static inline uint16_t construct_event_code(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, MESSAGE_ID messageID) { + return (source << 12 | priority << 8 | messageID << 0); +} + +#endif //NETDATA_ND_LOG_TO_WINDOWS_COMMON_H diff --git a/src/libnetdata/log/nd_log-to-windows-events.c b/src/libnetdata/log/nd_log-to-windows-events.c new file mode 100644 index 00000000000000..61d87ad1b7ac9f --- /dev/null +++ b/src/libnetdata/log/nd_log-to-windows-events.c @@ -0,0 +1,560 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log-internals.h" + +#if defined(OS_WINDOWS) && (defined(HAVE_ETW) || defined(HAVE_WEL)) +#include +#include +#include +#include +#include +#include + +// -------------------------------------------------------------------------------------------------------------------- +// construct an event id + +// load message resources generated header +#include "wevt_netdata.h" + +// include the common definitions with the message resources and manifest generator +#include "nd_log-to-windows-common.h" + +#if defined(HAVE_ETW) +// we need the manifest, only in ETW mode + +// eliminate compiler warnings and load manifest generated header +#undef EXTERN_C +#define EXTERN_C +#undef __declspec +#define __declspec(x) +#include "wevt_netdata_manifest.h" + +static REGHANDLE regHandle; +#endif + +// Function to construct EventID +static DWORD complete_event_id(DWORD facility, DWORD severity, DWORD event_code) { + DWORD event_id = 0; + + // Set Severity + event_id |= ((DWORD)(severity) << EVENT_ID_SEV_SHIFT) & EVENT_ID_SEV_MASK; + + // Set Customer Code Flag (C) + event_id |= (0x0 << EVENT_ID_C_SHIFT) & EVENT_ID_C_MASK; + + // Set Reserved Bit (R) - typically 0 + event_id |= (0x0 << EVENT_ID_R_SHIFT) & EVENT_ID_R_MASK; + + // Set Facility + event_id |= ((DWORD)(facility) << EVENT_ID_FACILITY_SHIFT) & EVENT_ID_FACILITY_MASK; + + // Set Code + event_id |= ((DWORD)(event_code) << EVENT_ID_CODE_SHIFT) & EVENT_ID_CODE_MASK; + + return event_id; +} + +DWORD construct_event_id(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, MESSAGE_ID messageID) { + DWORD event_code = construct_event_code(source, priority, messageID); + return complete_event_id(FACILITY_NETDATA, get_severity_from_priority(priority), event_code); +} + +static bool check_event_id(ND_LOG_SOURCES source __maybe_unused, ND_LOG_FIELD_PRIORITY priority __maybe_unused, MESSAGE_ID messageID __maybe_unused, DWORD event_code __maybe_unused) { +#ifdef NETDATA_INTERNAL_CHECKS + DWORD generated = construct_event_id(source, priority, messageID); + if(generated != event_code) { + + // this is just used for a break point, to see the values in hex + char current[UINT64_HEX_MAX_LENGTH]; + print_uint64_hex(current, generated); + + char wanted[UINT64_HEX_MAX_LENGTH]; + print_uint64_hex(wanted, event_code); + + const char *got = current; + const char *good = wanted; + internal_fatal(true, "EventIDs mismatch, expected %s, got %s", good, got); + } +#endif + + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- +// initialization + +// Define provider names per source (only when not using ETW) +static const wchar_t *wel_provider_per_source[_NDLS_MAX] = { + [NDLS_UNSET] = NULL, // not used, linked to NDLS_DAEMON + [NDLS_ACCESS] = NETDATA_WEL_PROVIDER_ACCESS_W, // + [NDLS_ACLK] = NETDATA_WEL_PROVIDER_ACLK_W, // + [NDLS_COLLECTORS] = NETDATA_WEL_PROVIDER_COLLECTORS_W,// + [NDLS_DAEMON] = NETDATA_WEL_PROVIDER_DAEMON_W, // + [NDLS_HEALTH] = NETDATA_WEL_PROVIDER_HEALTH_W, // + [NDLS_DEBUG] = NULL, // used, linked to NDLS_DAEMON +}; + +bool wel_replace_program_with_wevt_netdata_dll(wchar_t *str, size_t size) { + const wchar_t *replacement = L"\\wevt_netdata.dll"; + + // Find the last occurrence of '\\' to isolate the filename + wchar_t *lastBackslash = wcsrchr(str, L'\\'); + + if (lastBackslash != NULL) { + // Calculate new length after replacement + size_t newLen = (lastBackslash - str) + wcslen(replacement); + + // Ensure new length does not exceed buffer size + if (newLen >= size) + return false; // Not enough space in the buffer + + // Terminate the string at the last backslash + *lastBackslash = L'\0'; + + // Append the replacement filename + wcsncat(str, replacement, size - wcslen(str) - 1); + + // Check if the new file exists + if (GetFileAttributesW(str) != INVALID_FILE_ATTRIBUTES) + return true; // The file exists + else + return false; // The file does not exist + } + + return false; // No backslash found (likely invalid input) +} + +static bool wel_add_to_registry(const wchar_t *channel, const wchar_t *provider, DWORD defaultMaxSize) { + // Build the registry path: SYSTEM\CurrentControlSet\Services\EventLog\\ + wchar_t key[MAX_PATH]; + if(!provider) + swprintf(key, MAX_PATH, L"SYSTEM\\CurrentControlSet\\Services\\EventLog\\%ls", channel); + else + swprintf(key, MAX_PATH, L"SYSTEM\\CurrentControlSet\\Services\\EventLog\\%ls\\%ls", channel, provider); + + HKEY hRegKey; + DWORD disposition; + LONG result = RegCreateKeyExW(HKEY_LOCAL_MACHINE, key, + 0, NULL, REG_OPTION_NON_VOLATILE, KEY_SET_VALUE, NULL, &hRegKey, &disposition); + + if (result != ERROR_SUCCESS) + return false; // Could not create the registry key + + // Check if MaxSize is already set + DWORD maxSize = 0; + DWORD size = sizeof(maxSize); + if (RegQueryValueExW(hRegKey, L"MaxSize", NULL, NULL, (LPBYTE)&maxSize, &size) != ERROR_SUCCESS) { + // MaxSize is not set, set it to the default value + RegSetValueExW(hRegKey, L"MaxSize", 0, REG_DWORD, (const BYTE*)&defaultMaxSize, sizeof(defaultMaxSize)); + } + + wchar_t modulePath[MAX_PATH]; + if (GetModuleFileNameW(NULL, modulePath, MAX_PATH) == 0) { + RegCloseKey(hRegKey); + return false; + } + + if(wel_replace_program_with_wevt_netdata_dll(modulePath, _countof(modulePath))) { + RegSetValueExW(hRegKey, L"EventMessageFile", 0, REG_EXPAND_SZ, + (LPBYTE)modulePath, (wcslen(modulePath) + 1) * sizeof(wchar_t)); + + DWORD types_supported = EVENTLOG_SUCCESS | EVENTLOG_ERROR_TYPE | EVENTLOG_WARNING_TYPE | EVENTLOG_INFORMATION_TYPE; + RegSetValueExW(hRegKey, L"TypesSupported", 0, REG_DWORD, (LPBYTE)&types_supported, sizeof(DWORD)); + } + + RegCloseKey(hRegKey); + return true; +} + +#if defined(HAVE_ETW) +static void etw_set_source_meta(struct nd_log_source *source, USHORT channelID, const EVENT_DESCRIPTOR *ed) { + // It turns out that the keyword varies per only per channel! + // so, to log with the right keyword, Task, Opcode we copy the ids from the header + // the messages compiler (mc.exe) generated from the manifest. + + source->channelID = channelID; + source->Opcode = ed->Opcode; + source->Task = ed->Task; + source->Keyword = ed->Keyword; +} + +static bool etw_register_provider(void) { + // Register the ETW provider + if (EventRegister(&NETDATA_ETW_PROVIDER_GUID, NULL, NULL, ®Handle) != ERROR_SUCCESS) + return false; + + etw_set_source_meta(&nd_log.sources[NDLS_DAEMON], CHANNEL_DAEMON, &ED_DAEMON_INFO_MESSAGE_ONLY); + etw_set_source_meta(&nd_log.sources[NDLS_COLLECTORS], CHANNEL_COLLECTORS, &ED_COLLECTORS_INFO_MESSAGE_ONLY); + etw_set_source_meta(&nd_log.sources[NDLS_ACCESS], CHANNEL_ACCESS, &ED_ACCESS_INFO_MESSAGE_ONLY); + etw_set_source_meta(&nd_log.sources[NDLS_HEALTH], CHANNEL_HEALTH, &ED_HEALTH_INFO_MESSAGE_ONLY); + etw_set_source_meta(&nd_log.sources[NDLS_ACLK], CHANNEL_ACLK, &ED_ACLK_INFO_MESSAGE_ONLY); + etw_set_source_meta(&nd_log.sources[NDLS_UNSET], CHANNEL_DAEMON, &ED_DAEMON_INFO_MESSAGE_ONLY); + etw_set_source_meta(&nd_log.sources[NDLS_DEBUG], CHANNEL_DAEMON, &ED_DAEMON_INFO_MESSAGE_ONLY); + + return true; +} +#endif + +bool nd_log_init_windows(void) { + if(nd_log.eventlog.initialized) + return true; + + // validate we have the right keys + if( + !check_event_id(NDLS_COLLECTORS, NDLP_INFO, MSGID_MESSAGE_ONLY, MC_COLLECTORS_INFO_MESSAGE_ONLY) || + !check_event_id(NDLS_DAEMON, NDLP_ERR, MSGID_MESSAGE_ONLY, MC_DAEMON_ERR_MESSAGE_ONLY) || + !check_event_id(NDLS_ACCESS, NDLP_WARNING, MSGID_ACCESS_USER, MC_ACCESS_WARN_ACCESS_USER) || + !check_event_id(NDLS_HEALTH, NDLP_CRIT, MSGID_ALERT_TRANSITION, MC_HEALTH_CRIT_ALERT_TRANSITION) || + !check_event_id(NDLS_DEBUG, NDLP_ALERT, MSGID_ACCESS_FORWARDER_USER, MC_DEBUG_ALERT_ACCESS_FORWARDER_USER)) + return false; + +#if defined(HAVE_ETW) + if(nd_log.eventlog.etw && !etw_register_provider()) + return false; +#endif + +// if(!nd_log.eventlog.etw && !wel_add_to_registry(NETDATA_WEL_CHANNEL_NAME_W, NULL, 50 * 1024 * 1024)) +// return false; + + // Loop through each source and add it to the registry + for(size_t i = 0; i < _NDLS_MAX; i++) { + nd_log.sources[i].source = i; + + const wchar_t *sub_channel = wel_provider_per_source[i]; + + if(!sub_channel) + // we will map these to NDLS_DAEMON + continue; + + DWORD defaultMaxSize = 0; + switch (i) { + case NDLS_ACLK: + defaultMaxSize = 5 * 1024 * 1024; + break; + + case NDLS_HEALTH: + defaultMaxSize = 35 * 1024 * 1024; + break; + + default: + case NDLS_ACCESS: + case NDLS_COLLECTORS: + case NDLS_DAEMON: + defaultMaxSize = 20 * 1024 * 1024; + break; + } + + if(!nd_log.eventlog.etw) { + if(!wel_add_to_registry(NETDATA_WEL_CHANNEL_NAME_W, sub_channel, defaultMaxSize)) + return false; + + // when not using a manifest, each source is a provider + nd_log.sources[i].hEventLog = RegisterEventSourceW(NULL, sub_channel); + if (!nd_log.sources[i].hEventLog) + return false; + } + } + + if(!nd_log.eventlog.etw) { + // Map the unset ones to NDLS_DAEMON + for (size_t i = 0; i < _NDLS_MAX; i++) { + if (!nd_log.sources[i].hEventLog) + nd_log.sources[i].hEventLog = nd_log.sources[NDLS_DAEMON].hEventLog; + } + } + + nd_log.eventlog.initialized = true; + return true; +} + +bool nd_log_init_etw(void) { + nd_log.eventlog.etw = true; + return nd_log_init_windows(); +} + +bool nd_log_init_wel(void) { + nd_log.eventlog.etw = false; + return nd_log_init_windows(); +} + +// -------------------------------------------------------------------------------------------------------------------- +// we pass all our fields to the windows events logs +// numbered the same way we have them in memory. +// +// to avoid runtime memory allocations, we use a static allocations with ready to use buffers +// which are immediately available for logging. + +#define SMALL_WIDE_BUFFERS_SIZE 256 +#define MEDIUM_WIDE_BUFFERS_SIZE 2048 +#define BIG_WIDE_BUFFERS_SIZE 16384 +static wchar_t small_wide_buffers[_NDF_MAX][SMALL_WIDE_BUFFERS_SIZE]; +static wchar_t medium_wide_buffers[2][MEDIUM_WIDE_BUFFERS_SIZE]; +static wchar_t big_wide_buffers[2][BIG_WIDE_BUFFERS_SIZE]; + +static struct { + size_t size; + wchar_t *buf; +} fields_buffers[_NDF_MAX] = { 0 }; + +#if defined(HAVE_ETW) +static EVENT_DATA_DESCRIPTOR etw_eventData[_NDF_MAX - 1]; +#endif + +static LPCWSTR wel_messages[_NDF_MAX - 1]; + +__attribute__((constructor)) void wevents_initialize_buffers(void) { + for(size_t i = 0; i < _NDF_MAX ;i++) { + fields_buffers[i].buf = small_wide_buffers[i]; + fields_buffers[i].size = SMALL_WIDE_BUFFERS_SIZE; + } + + fields_buffers[NDF_NIDL_INSTANCE].buf = medium_wide_buffers[0]; + fields_buffers[NDF_NIDL_INSTANCE].size = MEDIUM_WIDE_BUFFERS_SIZE; + + fields_buffers[NDF_REQUEST].buf = big_wide_buffers[0]; + fields_buffers[NDF_REQUEST].size = BIG_WIDE_BUFFERS_SIZE; + fields_buffers[NDF_MESSAGE].buf = big_wide_buffers[1]; + fields_buffers[NDF_MESSAGE].size = BIG_WIDE_BUFFERS_SIZE; + + for(size_t i = 1; i < _NDF_MAX ;i++) + wel_messages[i - 1] = fields_buffers[i].buf; +} + +// -------------------------------------------------------------------------------------------------------------------- + +#define is_field_set(fields, fields_max, field) ((field) < (fields_max) && (fields)[field].entry.set) + +static const char *get_field_value_unsafe(struct log_field *fields, ND_LOG_FIELD_ID i, size_t fields_max, BUFFER **tmp) { + if(!is_field_set(fields, fields_max, i) || !fields[i].eventlog) + return ""; + + static char number_str[MAX(MAX(UINT64_MAX_LENGTH, DOUBLE_MAX_LENGTH), UUID_STR_LEN)]; + + const char *s = NULL; + if (fields[i].annotator) + s = fields[i].annotator(&fields[i]); + + else + switch (fields[i].entry.type) { + case NDFT_TXT: + s = fields[i].entry.txt; + break; + case NDFT_STR: + s = string2str(fields[i].entry.str); + break; + case NDFT_BFR: + s = buffer_tostring(fields[i].entry.bfr); + break; + case NDFT_U64: + print_uint64(number_str, fields[i].entry.u64); + s = number_str; + break; + case NDFT_I64: + print_int64(number_str, fields[i].entry.i64); + s = number_str; + break; + case NDFT_DBL: + print_netdata_double(number_str, fields[i].entry.dbl); + s = number_str; + break; + case NDFT_UUID: + if (!uuid_is_null(*fields[i].entry.uuid)) { + uuid_unparse_lower(*fields[i].entry.uuid, number_str); + s = number_str; + } + break; + case NDFT_CALLBACK: + if (!*tmp) + *tmp = buffer_create(1024, NULL); + else + buffer_flush(*tmp); + + if (fields[i].entry.cb.formatter(*tmp, fields[i].entry.cb.formatter_data)) + s = buffer_tostring(*tmp); + else + s = NULL; + break; + + default: + s = "UNHANDLED"; + break; + } + + if(!s || !*s) return ""; + return s; +} +static void etw_replace_percent_with_unicode(wchar_t *s, size_t size) { + size_t original_len = wcslen(s); + + // Traverse the string, replacing '%' with the Unicode fullwidth percent sign + for (size_t i = 0; i < original_len && i < size - 1; i++) { + if (s[i] == L'%' && iswdigit(s[i + 1])) { + // s[i] = 0xFF05; // Replace '%' with fullwidth percent sign '%' + // s[i] = 0x29BC; // ⦼ + s[i] = 0x2105; // â„… + } + } + + // Ensure null termination if needed + s[size - 1] = L'\0'; +} + +static void wevt_generate_all_fields_unsafe(struct log_field *fields, size_t fields_max, BUFFER **tmp) { + for (size_t i = 0; i < fields_max; i++) { + fields_buffers[i].buf[0] = L'\0'; + + if (!fields[i].entry.set || !fields[i].eventlog) + continue; + + const char *s = get_field_value_unsafe(fields, i, fields_max, tmp); + if (s && *s) { + utf8_to_utf16(fields_buffers[i].buf, (int) fields_buffers[i].size, s, -1); + + if(nd_log.eventlog.etw) + // UNBELIEVABLE! they do recursive parameter expansion in ETW... + etw_replace_percent_with_unicode(fields_buffers[i].buf, fields_buffers[i].size); + } + } +} + +static bool has_user_role_permissions(struct log_field *fields, size_t fields_max, BUFFER **tmp) { + const char *t; + + t = get_field_value_unsafe(fields, NDF_USER_NAME, fields_max, tmp); + if (*t) return true; + + t = get_field_value_unsafe(fields, NDF_USER_ROLE, fields_max, tmp); + if (*t && strcmp(t, "none") != 0) return true; + + t = get_field_value_unsafe(fields, NDF_USER_ACCESS, fields_max, tmp); + if (*t && strcmp(t, "0x0") != 0) return true; + + return false; +} + +static bool nd_logger_windows(struct nd_log_source *source, struct log_field *fields, size_t fields_max) { + if (!nd_log.eventlog.initialized) + return false; + + ND_LOG_FIELD_PRIORITY priority = NDLP_INFO; + if (fields[NDF_PRIORITY].entry.set) + priority = (ND_LOG_FIELD_PRIORITY) fields[NDF_PRIORITY].entry.u64; + + DWORD wType = get_event_type_from_priority(priority); + (void) wType; + + CLEAN_BUFFER *tmp = NULL; + + static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; + spinlock_lock(&spinlock); + wevt_generate_all_fields_unsafe(fields, fields_max, &tmp); + + MESSAGE_ID messageID; + switch (source->source) { + default: + case NDLS_DEBUG: + case NDLS_DAEMON: + case NDLS_COLLECTORS: + messageID = MSGID_MESSAGE_ONLY; + break; + + case NDLS_HEALTH: + messageID = MSGID_ALERT_TRANSITION; + break; + + case NDLS_ACCESS: + if (is_field_set(fields, fields_max, NDF_MESSAGE)) { + messageID = MSGID_ACCESS_MESSAGE; + + if (has_user_role_permissions(fields, fields_max, &tmp)) + messageID = MSGID_ACCESS_MESSAGE_USER; + else if (*get_field_value_unsafe(fields, NDF_REQUEST, fields_max, &tmp)) + messageID = MSGID_ACCESS_MESSAGE_REQUEST; + } else if (is_field_set(fields, fields_max, NDF_RESPONSE_CODE)) { + messageID = MSGID_ACCESS; + + if (*get_field_value_unsafe(fields, NDF_SRC_FORWARDED_FOR, fields_max, &tmp)) + messageID = MSGID_ACCESS_FORWARDER; + + if (has_user_role_permissions(fields, fields_max, &tmp)) { + if (messageID == MSGID_ACCESS) + messageID = MSGID_ACCESS_USER; + else + messageID = MSGID_ACCESS_FORWARDER_USER; + } + } else + messageID = MSGID_REQUEST_ONLY; + break; + + case NDLS_ACLK: + messageID = MSGID_MESSAGE_ONLY; + break; + } + + if (messageID == MSGID_MESSAGE_ONLY && ( + *get_field_value_unsafe(fields, NDF_ERRNO, fields_max, &tmp) || + *get_field_value_unsafe(fields, NDF_WINERROR, fields_max, &tmp))) { + messageID = MSGID_MESSAGE_ERRNO; + } + + DWORD eventID = construct_event_id(source->source, priority, messageID); + + // wType + // + // without a manifest => this determines the Level of the event + // with a manifest => Level from the manifest is used (wType ignored) + // [however it is good to have, in case the manifest is not accessible somehow] + // + + // wCategory + // + // without a manifest => numeric Task values appear + // with a manifest => Task from the manifest is used (wCategory ignored) + + BOOL rc; +#if defined(HAVE_ETW) + if (nd_log.eventlog.etw) { + // metadata based logging - ETW + + for (size_t i = 1; i < _NDF_MAX; i++) + EventDataDescCreate(&etw_eventData[i - 1], fields_buffers[i].buf, + (wcslen(fields_buffers[i].buf) + 1) * sizeof(WCHAR)); + + EVENT_DESCRIPTOR EventDesc = { + .Id = eventID & EVENT_ID_CODE_MASK, // ETW needs the raw event id + .Version = 0, + .Channel = source->channelID, + .Level = get_level_from_priority(priority), + .Opcode = source->Opcode, + .Task = source->Task, + .Keyword = source->Keyword, + }; + + rc = ERROR_SUCCESS == EventWrite(regHandle, &EventDesc, _NDF_MAX - 1, etw_eventData); + + } + else +#endif + { + // eventID based logging - WEL + rc = ReportEventW(source->hEventLog, wType, 0, eventID, NULL, _NDF_MAX - 1, 0, wel_messages, NULL); + } + + spinlock_unlock(&spinlock); + + return rc == TRUE; +} + +#if defined(HAVE_ETW) +bool nd_logger_etw(struct nd_log_source *source, struct log_field *fields, size_t fields_max) { + return nd_logger_windows(source, fields, fields_max); +} +#endif + +#if defined(HAVE_WEL) +bool nd_logger_wel(struct nd_log_source *source, struct log_field *fields, size_t fields_max) { + return nd_logger_windows(source, fields, fields_max); +} +#endif + +#endif diff --git a/src/libnetdata/log/nd_log.c b/src/libnetdata/log/nd_log.c new file mode 100644 index 00000000000000..a605fe4604e5f9 --- /dev/null +++ b/src/libnetdata/log/nd_log.c @@ -0,0 +1,465 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +// do not REMOVE this, it is used by systemd-journal includes to prevent saving the file, function, line of the +// source code that makes the calls, allowing our loggers to log the lines of source code that actually log +#define SD_JOURNAL_SUPPRESS_LOCATION + +#include "../libnetdata.h" +#include "nd_log-internals.h" + +const char *program_name = ""; +uint64_t debug_flags = 0; +int aclklog_enabled = 0; + +// -------------------------------------------------------------------------------------------------------------------- + +void errno_clear(void) { + errno = 0; + +#if defined(OS_WINDOWS) + SetLastError(ERROR_SUCCESS); +#endif +} + +// -------------------------------------------------------------------------------------------------------------------- +// logger router + +static ND_LOG_METHOD nd_logger_select_output(ND_LOG_SOURCES source, FILE **fpp, SPINLOCK **spinlock) { + *spinlock = NULL; + ND_LOG_METHOD output = nd_log.sources[source].method; + + switch(output) { + case NDLM_JOURNAL: + if(unlikely(!nd_log.journal_direct.initialized && !nd_log.journal.initialized)) { + output = NDLM_FILE; + *fpp = stderr; + *spinlock = &nd_log.std_error.spinlock; + } + else { + *fpp = NULL; + *spinlock = NULL; + } + break; + +#if defined(OS_WINDOWS) && (defined(HAVE_ETW) || defined(HAVE_WEL)) +#if defined(HAVE_ETW) + case NDLM_ETW: +#endif +#if defined(HAVE_WEL) + case NDLM_WEL: +#endif + if(unlikely(!nd_log.eventlog.initialized)) { + output = NDLM_FILE; + *fpp = stderr; + *spinlock = &nd_log.std_error.spinlock; + } + else { + *fpp = NULL; + *spinlock = NULL; + } + break; +#endif + + case NDLM_SYSLOG: + if(unlikely(!nd_log.syslog.initialized)) { + output = NDLM_FILE; + *spinlock = &nd_log.std_error.spinlock; + *fpp = stderr; + } + else { + *spinlock = NULL; + *fpp = NULL; + } + break; + + case NDLM_FILE: + if(!nd_log.sources[source].fp) { + *fpp = stderr; + *spinlock = &nd_log.std_error.spinlock; + } + else { + *fpp = nd_log.sources[source].fp; + *spinlock = &nd_log.sources[source].spinlock; + } + break; + + case NDLM_STDOUT: + output = NDLM_FILE; + *fpp = stdout; + *spinlock = &nd_log.std_output.spinlock; + break; + + default: + case NDLM_DEFAULT: + case NDLM_STDERR: + output = NDLM_FILE; + *fpp = stderr; + *spinlock = &nd_log.std_error.spinlock; + break; + + case NDLM_DISABLED: + case NDLM_DEVNULL: + output = NDLM_DISABLED; + *fpp = NULL; + *spinlock = NULL; + break; + } + + return output; +} + +// -------------------------------------------------------------------------------------------------------------------- +// high level logger + +static void nd_logger_log_fields(SPINLOCK *spinlock, FILE *fp, bool limit, ND_LOG_FIELD_PRIORITY priority, + ND_LOG_METHOD output, struct nd_log_source *source, + struct log_field *fields, size_t fields_max) { + if(spinlock) + spinlock_lock(spinlock); + + // check the limits + if(limit && nd_log_limit_reached(source)) + goto cleanup; + + if(output == NDLM_JOURNAL) { + if(!nd_logger_journal_direct(fields, fields_max) && !nd_logger_journal_libsystemd(fields, fields_max)) { + // we can't log to journal, let's log to stderr + if(spinlock) + spinlock_unlock(spinlock); + + output = NDLM_FILE; + spinlock = &nd_log.std_error.spinlock; + fp = stderr; + + if(spinlock) + spinlock_lock(spinlock); + } + } + +#if defined(OS_WINDOWS) +#if defined(HAVE_ETW) + if(output == NDLM_ETW) { + if(!nd_logger_etw(source, fields, fields_max)) { + // we can't log to windows events, let's log to stderr + if(spinlock) + spinlock_unlock(spinlock); + + output = NDLM_FILE; + spinlock = &nd_log.std_error.spinlock; + fp = stderr; + + if(spinlock) + spinlock_lock(spinlock); + } + } +#endif +#if defined(HAVE_WEL) + if(output == NDLM_WEL) { + if(!nd_logger_wel(source, fields, fields_max)) { + // we can't log to windows events, let's log to stderr + if(spinlock) + spinlock_unlock(spinlock); + + output = NDLM_FILE; + spinlock = &nd_log.std_error.spinlock; + fp = stderr; + + if(spinlock) + spinlock_lock(spinlock); + } + } +#endif +#endif + + if(output == NDLM_SYSLOG) + nd_logger_syslog(priority, source->format, fields, fields_max); + + if(output == NDLM_FILE) + nd_logger_file(fp, source->format, fields, fields_max); + + +cleanup: + if(spinlock) + spinlock_unlock(spinlock); +} + +static void nd_logger_unset_all_thread_fields(void) { + size_t fields_max = THREAD_FIELDS_MAX; + for(size_t i = 0; i < fields_max ; i++) + thread_log_fields[i].entry.set = false; +} + +static void nd_logger_merge_log_stack_to_thread_fields(void) { + for(size_t c = 0; c < thread_log_stack_next ;c++) { + struct log_stack_entry *lgs = thread_log_stack_base[c]; + + for(size_t i = 0; lgs[i].id != NDF_STOP ; i++) { + if(lgs[i].id >= _NDF_MAX || !lgs[i].set) + continue; + + struct log_stack_entry *e = &lgs[i]; + ND_LOG_STACK_FIELD_TYPE type = lgs[i].type; + + // do not add empty / unset fields + if((type == NDFT_TXT && (!e->txt || !*e->txt)) || + (type == NDFT_BFR && (!e->bfr || !buffer_strlen(e->bfr))) || + (type == NDFT_STR && !e->str) || + (type == NDFT_UUID && (!e->uuid || uuid_is_null(*e->uuid))) || + (type == NDFT_CALLBACK && !e->cb.formatter) || + type == NDFT_UNSET) + continue; + + thread_log_fields[lgs[i].id].entry = *e; + } + } +} + +static void nd_logger(const char *file, const char *function, const unsigned long line, + ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, bool limit, + int saved_errno, size_t saved_winerror __maybe_unused, const char *fmt, va_list ap) { + + SPINLOCK *spinlock; + FILE *fp; + ND_LOG_METHOD output = nd_logger_select_output(source, &fp, &spinlock); + if(!IS_FINAL_LOG_METHOD(output)) + return; + + // mark all fields as unset + nd_logger_unset_all_thread_fields(); + + // flatten the log stack into the fields + nd_logger_merge_log_stack_to_thread_fields(); + + // set the common fields that are automatically set by the logging subsystem + + if(likely(!thread_log_fields[NDF_INVOCATION_ID].entry.set)) + thread_log_fields[NDF_INVOCATION_ID].entry = ND_LOG_FIELD_UUID(NDF_INVOCATION_ID, &nd_log.invocation_id); + + if(likely(!thread_log_fields[NDF_LOG_SOURCE].entry.set)) + thread_log_fields[NDF_LOG_SOURCE].entry = ND_LOG_FIELD_TXT(NDF_LOG_SOURCE, nd_log_id2source(source)); + else { + ND_LOG_SOURCES src = source; + + if(thread_log_fields[NDF_LOG_SOURCE].entry.type == NDFT_TXT) + src = nd_log_source2id(thread_log_fields[NDF_LOG_SOURCE].entry.txt, source); + else if(thread_log_fields[NDF_LOG_SOURCE].entry.type == NDFT_U64) + src = thread_log_fields[NDF_LOG_SOURCE].entry.u64; + + if(src != source && src < _NDLS_MAX) { + source = src; + output = nd_logger_select_output(source, &fp, &spinlock); + if(output != NDLM_FILE && output != NDLM_JOURNAL && output != NDLM_SYSLOG) + return; + } + } + + if(likely(!thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry.set)) + thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry = ND_LOG_FIELD_TXT(NDF_SYSLOG_IDENTIFIER, program_name); + + if(likely(!thread_log_fields[NDF_LINE].entry.set)) { + thread_log_fields[NDF_LINE].entry = ND_LOG_FIELD_U64(NDF_LINE, line); + thread_log_fields[NDF_FILE].entry = ND_LOG_FIELD_TXT(NDF_FILE, file); + thread_log_fields[NDF_FUNC].entry = ND_LOG_FIELD_TXT(NDF_FUNC, function); + } + + if(likely(!thread_log_fields[NDF_PRIORITY].entry.set)) { + thread_log_fields[NDF_PRIORITY].entry = ND_LOG_FIELD_U64(NDF_PRIORITY, priority); + } + + if(likely(!thread_log_fields[NDF_TID].entry.set)) + thread_log_fields[NDF_TID].entry = ND_LOG_FIELD_U64(NDF_TID, gettid_cached()); + + if(likely(!thread_log_fields[NDF_THREAD_TAG].entry.set)) { + const char *thread_tag = nd_thread_tag(); + thread_log_fields[NDF_THREAD_TAG].entry = ND_LOG_FIELD_TXT(NDF_THREAD_TAG, thread_tag); + + // TODO: fix the ND_MODULE in logging by setting proper module name in threads +// if(!thread_log_fields[NDF_MODULE].entry.set) +// thread_log_fields[NDF_MODULE].entry = ND_LOG_FIELD_CB(NDF_MODULE, thread_tag_to_module, (void *)thread_tag); + } + + if(likely(!thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry.set)) + thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry = ND_LOG_FIELD_U64(NDF_TIMESTAMP_REALTIME_USEC, now_realtime_usec()); + + if(saved_errno != 0 && !thread_log_fields[NDF_ERRNO].entry.set) + thread_log_fields[NDF_ERRNO].entry = ND_LOG_FIELD_I64(NDF_ERRNO, saved_errno); + + if(saved_winerror != 0 && !thread_log_fields[NDF_WINERROR].entry.set) + thread_log_fields[NDF_WINERROR].entry = ND_LOG_FIELD_U64(NDF_WINERROR, saved_winerror); + + CLEAN_BUFFER *wb = NULL; + if(fmt && !thread_log_fields[NDF_MESSAGE].entry.set) { + wb = buffer_create(1024, NULL); + buffer_vsprintf(wb, fmt, ap); + thread_log_fields[NDF_MESSAGE].entry = ND_LOG_FIELD_TXT(NDF_MESSAGE, buffer_tostring(wb)); + } + + nd_logger_log_fields(spinlock, fp, limit, priority, output, &nd_log.sources[source], + thread_log_fields, THREAD_FIELDS_MAX); + + if(nd_log.sources[source].pending_msg) { + // log a pending message + + nd_logger_unset_all_thread_fields(); + + thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry = (struct log_stack_entry){ + .set = true, + .type = NDFT_U64, + .u64 = now_realtime_usec(), + }; + + thread_log_fields[NDF_LOG_SOURCE].entry = (struct log_stack_entry){ + .set = true, + .type = NDFT_TXT, + .txt = nd_log_id2source(source), + }; + + thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry = (struct log_stack_entry){ + .set = true, + .type = NDFT_TXT, + .txt = program_name, + }; + + thread_log_fields[NDF_MESSAGE].entry = (struct log_stack_entry){ + .set = true, + .type = NDFT_TXT, + .txt = nd_log.sources[source].pending_msg, + }; + + nd_logger_log_fields(spinlock, fp, false, priority, output, + &nd_log.sources[source], + thread_log_fields, THREAD_FIELDS_MAX); + + freez((void *)nd_log.sources[source].pending_msg); + nd_log.sources[source].pending_msg = NULL; + } + + errno_clear(); +} + +static ND_LOG_SOURCES nd_log_validate_source(ND_LOG_SOURCES source) { + if(source >= _NDLS_MAX) + source = NDLS_DAEMON; + + if(nd_log.overwrite_process_source) + source = nd_log.overwrite_process_source; + + return source; +} + +// -------------------------------------------------------------------------------------------------------------------- +// public API for loggers + +void netdata_logger(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file, const char *function, unsigned long line, const char *fmt, ... ) +{ + int saved_errno = errno; + + size_t saved_winerror = 0; +#if defined(OS_WINDOWS) + saved_winerror = GetLastError(); +#endif + + source = nd_log_validate_source(source); + + if (source != NDLS_DEBUG && priority > nd_log.sources[source].min_priority) + return; + + va_list args; + va_start(args, fmt); + nd_logger(file, function, line, source, priority, + source == NDLS_DAEMON || source == NDLS_COLLECTORS, + saved_errno, saved_winerror, fmt, args); + va_end(args); +} + +void netdata_logger_with_limit(ERROR_LIMIT *erl, ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... ) { + int saved_errno = errno; + + size_t saved_winerror = 0; +#if defined(OS_WINDOWS) + saved_winerror = GetLastError(); +#endif + + source = nd_log_validate_source(source); + + if (source != NDLS_DEBUG && priority > nd_log.sources[source].min_priority) + return; + + if(erl->sleep_ut) + sleep_usec(erl->sleep_ut); + + spinlock_lock(&erl->spinlock); + + erl->count++; + time_t now = now_boottime_sec(); + if(now - erl->last_logged < erl->log_every) { + spinlock_unlock(&erl->spinlock); + return; + } + + spinlock_unlock(&erl->spinlock); + + va_list args; + va_start(args, fmt); + nd_logger(file, function, line, source, priority, + source == NDLS_DAEMON || source == NDLS_COLLECTORS, + saved_errno, saved_winerror, fmt, args); + va_end(args); + erl->last_logged = now; + erl->count = 0; +} + +void netdata_logger_fatal( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) { + int saved_errno = errno; + + size_t saved_winerror = 0; +#if defined(OS_WINDOWS) + saved_winerror = GetLastError(); +#endif + + ND_LOG_SOURCES source = NDLS_DAEMON; + source = nd_log_validate_source(source); + + va_list args; + va_start(args, fmt); + nd_logger(file, function, line, source, NDLP_ALERT, true, saved_errno, saved_winerror, fmt, args); + va_end(args); + + char date[LOG_DATE_LENGTH]; + log_date(date, LOG_DATE_LENGTH, now_realtime_sec()); + + char action_data[70+1]; + snprintfz(action_data, 70, "%04lu@%-10.10s:%-15.15s/%d", line, file, function, saved_errno); + + const char *thread_tag = nd_thread_tag(); + const char *tag_to_send = thread_tag; + + // anonymize thread names + if(strncmp(thread_tag, THREAD_TAG_STREAM_RECEIVER, strlen(THREAD_TAG_STREAM_RECEIVER)) == 0) + tag_to_send = THREAD_TAG_STREAM_RECEIVER; + if(strncmp(thread_tag, THREAD_TAG_STREAM_SENDER, strlen(THREAD_TAG_STREAM_SENDER)) == 0) + tag_to_send = THREAD_TAG_STREAM_SENDER; + + char action_result[60+1]; + snprintfz(action_result, 60, "%s:%s", program_name, tag_to_send); + +#if !defined(ENABLE_SENTRY) && defined(HAVE_BACKTRACE) + int fd = nd_log.sources[NDLS_DAEMON].fd; + if(fd == -1) + fd = STDERR_FILENO; + + int nptrs; + void *buffer[10000]; + + nptrs = backtrace(buffer, sizeof(buffer)); + if(nptrs) + backtrace_symbols_fd(buffer, nptrs, fd); +#endif + +#ifdef NETDATA_INTERNAL_CHECKS + abort(); +#endif + + netdata_cleanup_and_exit(1, "FATAL", action_result, action_data); +} + diff --git a/src/libnetdata/log/log.h b/src/libnetdata/log/nd_log.h similarity index 58% rename from src/libnetdata/log/log.h rename to src/libnetdata/log/nd_log.h index 79df798a5f09b8..1e9634d51dd418 100644 --- a/src/libnetdata/log/log.h +++ b/src/libnetdata/log/nd_log.h @@ -1,149 +1,18 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#ifndef NETDATA_LOG_H -#define NETDATA_LOG_H 1 +#ifndef NETDATA_ND_LOG_H +#define NETDATA_ND_LOG_H 1 # ifdef __cplusplus extern "C" { # endif #include "../libnetdata.h" +#include "nd_log-common.h" #define ND_LOG_DEFAULT_THROTTLE_LOGS 1000 #define ND_LOG_DEFAULT_THROTTLE_PERIOD 60 -typedef enum __attribute__((__packed__)) { - NDLS_UNSET = 0, // internal use only - NDLS_ACCESS, // access.log - NDLS_ACLK, // aclk.log - NDLS_COLLECTORS, // collector.log - NDLS_DAEMON, // error.log - NDLS_HEALTH, // health.log - NDLS_DEBUG, // debug.log - - // terminator - _NDLS_MAX, -} ND_LOG_SOURCES; - -typedef enum __attribute__((__packed__)) { - NDLP_EMERG = LOG_EMERG, - NDLP_ALERT = LOG_ALERT, - NDLP_CRIT = LOG_CRIT, - NDLP_ERR = LOG_ERR, - NDLP_WARNING = LOG_WARNING, - NDLP_NOTICE = LOG_NOTICE, - NDLP_INFO = LOG_INFO, - NDLP_DEBUG = LOG_DEBUG, -} ND_LOG_FIELD_PRIORITY; - -typedef enum __attribute__((__packed__)) { - // KEEP THESE IN THE SAME ORDER AS in thread_log_fields (log.c) - // so that it easy to audit for missing fields - - NDF_STOP = 0, - NDF_TIMESTAMP_REALTIME_USEC, // the timestamp of the log message - added automatically - NDF_SYSLOG_IDENTIFIER, // the syslog identifier of the application - added automatically - NDF_LOG_SOURCE, // DAEMON, COLLECTORS, HEALTH, ACCESS, ACLK - set at the log call - NDF_PRIORITY, // the syslog priority (severity) - set at the log call - NDF_ERRNO, // the ERRNO at the time of the log call - added automatically -#if defined(OS_WINDOWS) - NDF_WINERROR, // Windows GetLastError() -#endif - NDF_INVOCATION_ID, // the INVOCATION_ID of Netdata - added automatically - NDF_LINE, // the source code file line number - added automatically - NDF_FILE, // the source code filename - added automatically - NDF_FUNC, // the source code function - added automatically - NDF_TID, // the thread ID of the thread logging - added automatically - NDF_THREAD_TAG, // the thread tag of the thread logging - added automatically - NDF_MESSAGE_ID, // for specific events - NDF_MODULE, // for internal plugin module, all other get the NDF_THREAD_TAG - - NDF_NIDL_NODE, // the node / rrdhost currently being worked - NDF_NIDL_INSTANCE, // the instance / rrdset currently being worked - NDF_NIDL_CONTEXT, // the context of the instance currently being worked - NDF_NIDL_DIMENSION, // the dimension / rrddim currently being worked - - // web server, aclk and stream receiver - NDF_SRC_TRANSPORT, // the transport we received the request, one of: http, https, pluginsd - - // Netdata Cloud Related - NDF_ACCOUNT_ID, - NDF_USER_NAME, - NDF_USER_ROLE, - NDF_USER_ACCESS, - - // web server and stream receiver - NDF_SRC_IP, // the streaming / web server source IP - NDF_SRC_PORT, // the streaming / web server source Port - NDF_SRC_FORWARDED_HOST, - NDF_SRC_FORWARDED_FOR, - NDF_SRC_CAPABILITIES, // the stream receiver capabilities - - // stream sender (established links) - NDF_DST_TRANSPORT, // the transport we send the request, one of: http, https - NDF_DST_IP, // the destination streaming IP - NDF_DST_PORT, // the destination streaming Port - NDF_DST_CAPABILITIES, // the destination streaming capabilities - - // web server, aclk and stream receiver - NDF_REQUEST_METHOD, // for http like requests, the http request method - NDF_RESPONSE_CODE, // for http like requests, the http response code, otherwise a status string - - // web server (all), aclk (queries) - NDF_CONNECTION_ID, // the web server connection ID - NDF_TRANSACTION_ID, // the web server and API transaction ID - NDF_RESPONSE_SENT_BYTES, // for http like requests, the response bytes - NDF_RESPONSE_SIZE_BYTES, // for http like requests, the uncompressed response size - NDF_RESPONSE_PREPARATION_TIME_USEC, // for http like requests, the preparation time - NDF_RESPONSE_SENT_TIME_USEC, // for http like requests, the time to send the response back - NDF_RESPONSE_TOTAL_TIME_USEC, // for http like requests, the total time to complete the response - - // health alerts - NDF_ALERT_ID, - NDF_ALERT_UNIQUE_ID, - NDF_ALERT_EVENT_ID, - NDF_ALERT_TRANSITION_ID, - NDF_ALERT_CONFIG_HASH, - NDF_ALERT_NAME, - NDF_ALERT_CLASS, - NDF_ALERT_COMPONENT, - NDF_ALERT_TYPE, - NDF_ALERT_EXEC, - NDF_ALERT_RECIPIENT, - NDF_ALERT_DURATION, - NDF_ALERT_VALUE, - NDF_ALERT_VALUE_OLD, - NDF_ALERT_STATUS, - NDF_ALERT_STATUS_OLD, - NDF_ALERT_SOURCE, - NDF_ALERT_UNITS, - NDF_ALERT_SUMMARY, - NDF_ALERT_INFO, - NDF_ALERT_NOTIFICATION_REALTIME_USEC, - // NDF_ALERT_FLAGS, - - // put new items here - // leave the request URL and the message last - - NDF_REQUEST, // the request we are currently working on - NDF_MESSAGE, // the log message, if any - - // terminator - _NDF_MAX, -} ND_LOG_FIELD_ID; - -typedef enum __attribute__((__packed__)) { - NDFT_UNSET = 0, - NDFT_TXT, - NDFT_STR, - NDFT_BFR, - NDFT_U64, - NDFT_I64, - NDFT_DBL, - NDFT_UUID, - NDFT_CALLBACK, -} ND_LOG_STACK_FIELD_TYPE; - void errno_clear(void); void nd_log_set_user_settings(ND_LOG_SOURCES source, const char *setting); void nd_log_set_facility(const char *facility); @@ -156,7 +25,7 @@ void nd_log_set_flood_protection(size_t logs, time_t period); void nd_log_initialize_for_external_plugins(const char *name); void nd_log_reopen_log_files_for_spawn_server(void); bool nd_log_journal_socket_available(void); -ND_LOG_FIELD_ID nd_log_field_id_by_name(const char *field, size_t len); +ND_LOG_FIELD_ID nd_log_field_id_by_journal_name(const char *field, size_t len); int nd_log_priority2id(const char *priority); const char *nd_log_id2priority(ND_LOG_FIELD_PRIORITY priority); const char *nd_log_method_for_external_plugins(const char *s); @@ -306,4 +175,4 @@ void netdata_logger_fatal( const char *file, const char *function, unsigned long } # endif -#endif /* NETDATA_LOG_H */ +#endif /* NETDATA_ND_LOG_H */ diff --git a/src/libnetdata/log/nd_log_limit.c b/src/libnetdata/log/nd_log_limit.c new file mode 100644 index 00000000000000..272138196c6ae5 --- /dev/null +++ b/src/libnetdata/log/nd_log_limit.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "nd_log_limit.h" + +void nd_log_limits_reset(void) { + usec_t now_ut = now_monotonic_usec(); + + spinlock_lock(&nd_log.std_output.spinlock); + spinlock_lock(&nd_log.std_error.spinlock); + + for(size_t i = 0; i < _NDLS_MAX ;i++) { + spinlock_lock(&nd_log.sources[i].spinlock); + nd_log.sources[i].limits.prevented = 0; + nd_log.sources[i].limits.counter = 0; + nd_log.sources[i].limits.started_monotonic_ut = now_ut; + nd_log.sources[i].limits.logs_per_period = nd_log.sources[i].limits.logs_per_period_backup; + spinlock_unlock(&nd_log.sources[i].spinlock); + } + + spinlock_unlock(&nd_log.std_output.spinlock); + spinlock_unlock(&nd_log.std_error.spinlock); +} + +void nd_log_limits_unlimited(void) { + nd_log_limits_reset(); + for(size_t i = 0; i < _NDLS_MAX ;i++) { + nd_log.sources[i].limits.logs_per_period = 0; + } +} + +bool nd_log_limit_reached(struct nd_log_source *source) { + if(source->limits.throttle_period == 0 || source->limits.logs_per_period == 0) + return false; + + usec_t now_ut = now_monotonic_usec(); + if(!source->limits.started_monotonic_ut) + source->limits.started_monotonic_ut = now_ut; + + source->limits.counter++; + + if(now_ut - source->limits.started_monotonic_ut > (usec_t)source->limits.throttle_period) { + if(source->limits.prevented) { + BUFFER *wb = buffer_create(1024, NULL); + buffer_sprintf(wb, + "LOG FLOOD PROTECTION: resuming logging " + "(prevented %"PRIu32" logs in the last %"PRIu32" seconds).", + source->limits.prevented, + source->limits.throttle_period); + + if(source->pending_msg) + freez((void *)source->pending_msg); + + source->pending_msg = strdupz(buffer_tostring(wb)); + + buffer_free(wb); + } + + // restart the period accounting + source->limits.started_monotonic_ut = now_ut; + source->limits.counter = 1; + source->limits.prevented = 0; + + // log this error + return false; + } + + if(source->limits.counter > source->limits.logs_per_period) { + if(!source->limits.prevented) { + BUFFER *wb = buffer_create(1024, NULL); + buffer_sprintf(wb, + "LOG FLOOD PROTECTION: too many logs (%"PRIu32" logs in %"PRId64" seconds, threshold is set to %"PRIu32" logs " + "in %"PRIu32" seconds). Preventing more logs from process '%s' for %"PRId64" seconds.", + source->limits.counter, + (int64_t)((now_ut - source->limits.started_monotonic_ut) / USEC_PER_SEC), + source->limits.logs_per_period, + source->limits.throttle_period, + program_name, + (int64_t)(((source->limits.started_monotonic_ut + (source->limits.throttle_period * USEC_PER_SEC) - now_ut)) / USEC_PER_SEC) + ); + + if(source->pending_msg) + freez((void *)source->pending_msg); + + source->pending_msg = strdupz(buffer_tostring(wb)); + + buffer_free(wb); + } + + source->limits.prevented++; + + // prevent logging this error +#ifdef NETDATA_INTERNAL_CHECKS + return false; +#else + return true; +#endif + } + + return false; +} diff --git a/src/libnetdata/log/nd_log_limit.h b/src/libnetdata/log/nd_log_limit.h new file mode 100644 index 00000000000000..5486abde93b3af --- /dev/null +++ b/src/libnetdata/log/nd_log_limit.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_ND_LOG_LIMIT_H +#define NETDATA_ND_LOG_LIMIT_H + +#include "../libnetdata.h" + +struct nd_log_source; +bool nd_log_limit_reached(struct nd_log_source *source); + +struct nd_log_limit { + usec_t started_monotonic_ut; + uint32_t counter; + uint32_t prevented; + + uint32_t throttle_period; + uint32_t logs_per_period; + uint32_t logs_per_period_backup; +}; + +#define ND_LOG_LIMITS_DEFAULT (struct nd_log_limit){ .logs_per_period = ND_LOG_DEFAULT_THROTTLE_LOGS, .logs_per_period_backup = ND_LOG_DEFAULT_THROTTLE_LOGS, .throttle_period = ND_LOG_DEFAULT_THROTTLE_PERIOD, } +#define ND_LOG_LIMITS_UNLIMITED (struct nd_log_limit){ .logs_per_period = 0, .logs_per_period_backup = 0, .throttle_period = 0, } + +#include "nd_log-internals.h" + +#endif //NETDATA_ND_LOG_LIMIT_H diff --git a/src/libnetdata/log/nd_wevents_manifest.xml b/src/libnetdata/log/nd_wevents_manifest.xml new file mode 100644 index 00000000000000..9e326c1cbc9013 --- /dev/null +++ b/src/libnetdata/log/nd_wevents_manifest.xml @@ -0,0 +1,295 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/libnetdata/log/systemd-cat-native.c b/src/libnetdata/log/systemd-cat-native.c index e20bc7f481121a..71d74abdde5bf4 100644 --- a/src/libnetdata/log/systemd-cat-native.c +++ b/src/libnetdata/log/systemd-cat-native.c @@ -611,7 +611,7 @@ static int log_input_as_netdata(const char *newline, int timeout_ms) { if(equal) { const char *field = line->buffer; size_t field_len = equal - line->buffer; - ND_LOG_FIELD_ID id = nd_log_field_id_by_name(field, field_len); + ND_LOG_FIELD_ID id = nd_log_field_id_by_journal_name(field, field_len); if(id != NDF_STOP) { const char *value = ++equal; diff --git a/src/libnetdata/log/journal.c b/src/libnetdata/log/systemd-journal-helpers.c similarity index 99% rename from src/libnetdata/log/journal.c rename to src/libnetdata/log/systemd-journal-helpers.c index 2182212f6dcfc7..24553364b9e430 100644 --- a/src/libnetdata/log/journal.c +++ b/src/libnetdata/log/systemd-journal-helpers.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "journal.h" +#include "systemd-journal-helpers.h" bool is_path_unix_socket(const char *path) { // Check if the path is valid diff --git a/src/libnetdata/log/journal.h b/src/libnetdata/log/systemd-journal-helpers.h similarity index 75% rename from src/libnetdata/log/journal.h rename to src/libnetdata/log/systemd-journal-helpers.h index df8ece18b01f74..a85f8e85a9ffa8 100644 --- a/src/libnetdata/log/journal.h +++ b/src/libnetdata/log/systemd-journal-helpers.h @@ -2,8 +2,8 @@ #include "../libnetdata.h" -#ifndef NETDATA_LOG_JOURNAL_H -#define NETDATA_LOG_JOURNAL_H +#ifndef NETDATA_LOG_SYSTEMD_JOURNAL_HELPERS_H +#define NETDATA_LOG_SYSTEMD_JOURNAL_HELPERS_H #define JOURNAL_DIRECT_SOCKET "/run/systemd/journal/socket" @@ -15,4 +15,4 @@ bool journal_direct_send(int fd, const char *msg, size_t msg_len); bool is_path_unix_socket(const char *path); bool is_stderr_connected_to_journal(void); -#endif //NETDATA_LOG_JOURNAL_H +#endif // NETDATA_LOG_SYSTEMD_JOURNAL_HELPERS_H diff --git a/src/libnetdata/log/wevt_netdata_compile.bat b/src/libnetdata/log/wevt_netdata_compile.bat new file mode 100644 index 00000000000000..279b6c31b617e4 --- /dev/null +++ b/src/libnetdata/log/wevt_netdata_compile.bat @@ -0,0 +1,121 @@ +@echo off +setlocal enabledelayedexpansion + +echo PATH=%PATH% + +if "%~1"=="" ( + echo Error: Missing .mc file path. + goto :usage +) +if "%~2"=="" ( + echo Error: Missing destination directory. + goto :usage +) + +REM Set variables +set "SRC_DIR=%~1" +set "BIN_DIR=%~2" +set "MC_FILE=%BIN_DIR%\wevt_netdata.mc" +set "MAN_FILE=%BIN_DIR%\wevt_netdata_manifest.xml" +set "BASE_NAME=wevt_netdata" +set "SDK_PATH=C:\Program Files (x86)\Windows Kits\10\bin\10.0.26100.0\x64" +set "VS_PATH=C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.39.33519\bin\Hostx64\x64" + +if not exist "%SRC_DIR%" ( + echo Error: Source directory does not exist. + exit /b 1 +) + +if not exist "%BIN_DIR%" ( + echo Error: Destination directory does not exist. + exit /b 1 +) + +if not exist "%MC_FILE%" ( + echo Error: %MC_FILE% not found. + exit /b 1 +) + +if not exist "%MAN_FILE%" ( + echo Error: %MAN_FILE% not found. + exit /b 1 +) + +REM Add SDK paths to PATH +set "PATH=C:\Windows\System32;%SDK_PATH%;%VS_PATH%;%PATH%" + +REM Check if commands are available +where mc >nul 2>nul +if %errorlevel% neq 0 ( + echo Error: mc.exe not found in PATH. + exit /b 1 +) +where rc >nul 2>nul +if %errorlevel% neq 0 ( + echo Error: rc.exe not found in PATH. + exit /b 1 +) +where link >nul 2>nul +if %errorlevel% neq 0 ( + echo Error: link.exe not found in PATH. + exit /b 1 +) +where wevtutil >nul 2>nul +if %errorlevel% neq 0 ( + echo Error: wevtutil.exe not found in PATH. + exit /b 1 +) + +REM Change to the destination directory +cd /d "%BIN_DIR%" + +echo. +echo Running mc.exe... +mc -v -b -U "%MC_FILE%" "%MAN_FILE%" +if %errorlevel% neq 0 ( + echo Error: mc.exe failed on messages. + exit /b 1 +) + +if not exist "%BASE_NAME%.rc" ( + echo Error: %BASE_NAME%.rc not found. + exit /b 1 +) + +echo. +echo Modifying %BASE_NAME%.rc to include the manifest... +copy "%MAN_FILE%" %BASE_NAME%_manifest.man +echo 1 2004 "%BASE_NAME%_manifest.man" >> %BASE_NAME%.rc + +echo. +echo %BASE_NAME%.rc contents: +type %BASE_NAME%.rc + +echo. +echo Running rc.exe... +rc /v /fo %BASE_NAME%.res %BASE_NAME%.rc +if %errorlevel% neq 0 ( + echo Error: rc.exe failed. + exit /b 1 +) + +if not exist "%BASE_NAME%.res" ( + echo Error: %BASE_NAME%.res not found. + exit /b 1 +) + +echo. +echo Running link.exe... +link /dll /noentry /machine:x64 /out:%BASE_NAME%.dll %BASE_NAME%.res +if %errorlevel% neq 0 ( + echo Error: link.exe failed. + exit /b 1 +) + +echo. +echo Process completed successfully. +exit /b 0 + +:usage +echo Usage: %~nx0 [path_to_mc_file] [destination_directory] +exit /b 1 diff --git a/src/libnetdata/log/wevt_netdata_compile.sh b/src/libnetdata/log/wevt_netdata_compile.sh new file mode 100644 index 00000000000000..eae510645d09fb --- /dev/null +++ b/src/libnetdata/log/wevt_netdata_compile.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +mylocation=$(dirname "${0}") + +# Check if both parameters are provided +if [ $# -ne 2 ]; then + echo "Error: Incorrect number of parameters." + echo "Usage: $0 " + exit 1 +fi + +# Get the parameters +src_dir="$1" +dest_dir="$2" + +# Get the directory of this script +SCRIPT_DIR="$(dirname "$0")" + +# Create a temporary batch file +temp_bat=$(mktemp --suffix=.bat) + +# Write the contents to the temporary batch file +# Use cygpath directly within the heredoc +cat << EOF > "$temp_bat" +@echo off +set "PATH=%SYSTEMROOT%;$("${mylocation}/../../../packaging/windows/find-sdk-path.sh" --sdk -w);$("${mylocation}/../../../packaging/windows/find-sdk-path.sh" --visualstudio -w)" +call "$(cygpath -w -a "$SCRIPT_DIR/wevt_netdata_compile.bat")" "$(cygpath -w -a "$src_dir")" "$(cygpath -w -a "$dest_dir")" +EOF + +# Execute the temporary batch file +echo +echo "Executing Windows Batch File..." +echo +cat "$temp_bat" +cmd.exe //c "$(cygpath -w -a "$temp_bat")" +exit_status=$? + +# Remove the temporary batch file +rm "$temp_bat" + +# Check the exit status +if [ $exit_status -eq 0 ]; then + echo "nd_wevents_compile.bat executed successfully." +else + echo "nd_wevents_compile.bat failed with exit status $exit_status." +fi + +exit $exit_status diff --git a/src/libnetdata/log/wevt_netdata_install.bat b/src/libnetdata/log/wevt_netdata_install.bat new file mode 100644 index 00000000000000..51560759274b39 --- /dev/null +++ b/src/libnetdata/log/wevt_netdata_install.bat @@ -0,0 +1,52 @@ +@echo off +setlocal enabledelayedexpansion + +set "MAN_SRC=%~dp0wevt_netdata_manifest.xml" +set "DLL_SRC=%~dp0wevt_netdata.dll" +set "DLL_DST=%SystemRoot%\System32\wevt_netdata.dll" + +where wevtutil >nul 2>nul +if %errorlevel% neq 0 ( + echo Error: wevtutil.exe not found in PATH. + exit /b 1 +) + +echo. +echo Uninstalling previous manifest (if any)... +wevtutil um "%MAN_SRC%" + +echo. +echo Copying %DLL_SRC% to %DLL_DST% +copy /y "%DLL_SRC%" "%DLL_DST%" +if %errorlevel% neq 0 ( + echo Error: Failed to copy %DLL_SRC% to %DLL_DST% + exit /b 1 +) + +echo. +echo Granting access to %DLL_DST% for Windows Event Logging... +icacls "%DLL_DST%" /grant "NT SERVICE\EventLog":R +if %errorlevel% neq 0 ( + echo Error: Failed to grant access to %DLL_DST%. + exit /b 1 +) + +echo. +echo Importing the manifest... +wevtutil im "%MAN_SRC%" /rf:"%DLL_DST%" /mf:"%DLL_DST%" +if %errorlevel% neq 0 ( + echo Error: Failed to import the manifest. + exit /b 1 +) + +echo. +echo Verifying Netdata Publisher for Event Tracing for Windows (ETW)... +wevtutil gp "Netdata" +if %errorlevel% neq 0 ( + echo Error: Failed to get publisher Netdata. + exit /b 1 +) + +echo. +echo Netdata Event Tracing for Windows manifest installed successfully. +exit /b 0 diff --git a/src/libnetdata/log/wevt_netdata_mc_generate.c b/src/libnetdata/log/wevt_netdata_mc_generate.c new file mode 100644 index 00000000000000..5ab2bdf17926e1 --- /dev/null +++ b/src/libnetdata/log/wevt_netdata_mc_generate.c @@ -0,0 +1,518 @@ +#include +#include +#include +#include +#include +#include + +// from winnt.h +#define EVENTLOG_SUCCESS 0x0000 +#define EVENTLOG_ERROR_TYPE 0x0001 +#define EVENTLOG_WARNING_TYPE 0x0002 +#define EVENTLOG_INFORMATION_TYPE 0x0004 +#define EVENTLOG_AUDIT_SUCCESS 0x0008 +#define EVENTLOG_AUDIT_FAILURE 0x0010 + +// the severities we define in .mc file +#define STATUS_SEVERITY_INFORMATIONAL 0x1 +#define STATUS_SEVERITY_WARNING 0x2 +#define STATUS_SEVERITY_ERROR 0x3 + +#define FACILITY_APPLICATION 0x0fff + +#include "nd_log-common.h" +#include "nd_log-to-windows-common.h" + +const char *get_msg_symbol(MESSAGE_ID msg) { + switch(msg) { + case MSGID_MESSAGE_ONLY: + return "MESSAGE_ONLY"; + + case MSGID_MESSAGE_ERRNO: + return "MESSAGE_ERRNO"; + + case MSGID_REQUEST_ONLY: + return "REQUEST_ONLY"; + + case MSGID_ACCESS_MESSAGE: + return "ACCESS_MESSAGE"; + + case MSGID_ACCESS_MESSAGE_REQUEST: + return "ACCESS_MESSAGE_REQUEST"; + + case MSGID_ACCESS_MESSAGE_USER: + return "ACCESS_MESSAGE_USER"; + + case MSGID_ACCESS: + return "ACCESS"; + + case MSGID_ACCESS_USER: + return "ACCESS_USER"; + + case MSGID_ACCESS_FORWARDER: + return "ACCESS_FORWARDER"; + + case MSGID_ACCESS_FORWARDER_USER: + return "ACCESS_FORWARDER_USER"; + + case MSGID_ALERT_TRANSITION: + return "ALERT_TRANSITION"; + + default: + fprintf(stderr, "\n\nInvalid message id %d!\n\n\n", msg); + exit(1); + } +} + +const char *get_msg_format(MESSAGE_ID msg) { + switch(msg) { + case MSGID_MESSAGE_ONLY: + return "%2(%12): %64\r\n"; + + case MSGID_MESSAGE_ERRNO: + return "%2(%12): %64%n\r\n" + "%n\r\n" + " Unix Errno : %5%n\r\n" + " Windows Error: %6%n\r\n" + ; + + case MSGID_REQUEST_ONLY: + return "%2(%12): %63\r\n"; + + case MSGID_ACCESS_MESSAGE: + return "%64\r\n"; + + case MSGID_ACCESS_MESSAGE_REQUEST: + return "%64%n\r\n" + "%n\r\n" + " Request: %63%n\r\n" + ; + + case MSGID_ACCESS_MESSAGE_USER: + return "%64%n\r\n" + "%n\r\n" + " User: %21, role: %22, permissions: %23%n\r\n" + ; + + case MSGID_ACCESS: + return "%33 %63%n\r\n" + "%n\r\n" + " Response Code : %34%n\r\n" + " Transaction ID: %36%n\r\n" + " Source IP : %24%n\r\n" + ; + + case MSGID_ACCESS_USER: + return "%33 %63%n\r\n" + "%n\r\n" + " Response Code : %34%n\r\n" + " Transaction ID: %36%n\r\n" + " Source IP : %24%n\r\n" + " User : %21, role: %22, permissions: %23%n\r\n" + ; + + case MSGID_ACCESS_FORWARDER: + return "%33 %63%n\r\n" + "%n\r\n" + " Response Code : %34%n\r\n" + " Transaction ID: %36%n\r\n" + " Source IP : %24, For %27%n\r\n" + ; + + case MSGID_ACCESS_FORWARDER_USER: + return "%33 %63%n\r\n" + "%n\r\n" + " Response Code : %34%n\r\n" + " Transaction ID: %36%n\r\n" + " Source IP : %24, For %27%n\r\n" + " User : %21, role: %22, permissions: %23%n\r\n" + ; + + case MSGID_ALERT_TRANSITION: + return "Alert '%47' of instance '%16' on node '%15' transitioned from %57 to %56\r\n"; + + default: + fprintf(stderr, "\n\nInvalid message id %d!\n\n\n", msg); + exit(1); + } +} + +int main(int argc, const char **argv) { + (void)argc; (void)argv; + + const char *header = NULL, *footer = NULL, *s_header = NULL, *s_footer = NULL; + + bool manifest = false; + if(argc == 2 && strcmp(argv[1], "--manifest") == 0) { + manifest = true; + + header = "\r\n" + "\r\n" + "\r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + "\r\n" + " \r\n" + "\r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + ; + + footer = " \r\n" + " \r\n" + " \r\n" + " \r\n" + ; + + s_header = " \r\n" + " \r\n" + " \r\n" + " \r\n" + "\r\n" + " \r\n" + " \r\n" + " \r\n" + " \r\n" + " \r\n" + "\r\n" + ; + + s_footer = " \r\n" + " \r\n" + " \r\n" + "\r\n" + ; + } + else { + header = ";// THIS FILE IS AUTOMATICALLY GENERATED - DO NOT EDIT\r\n" + "\r\n" + "MessageIdTypedef=DWORD\r\n" + "\r\n" + "SeverityNames=(\r\n" + " Informational=0x1:STATUS_SEVERITY_INFORMATIONAL\r\n" + " Warning=0x2:STATUS_SEVERITY_WARNING\r\n" + " Error=0x3:STATUS_SEVERITY_ERROR\r\n" + " )\r\n" + "\r\n" + "FacilityNames=(\r\n" + " " NETDATA_CHANNEL_NAME "=0x0FFF:FACILITY_NETDATA\r\n" + " )\r\n" + "\r\n" + "LanguageNames=(\r\n" + " English=0x409:MSG00409\r\n" + " )\r\n" + "\r\n" + ; + + footer = ""; + } + + bool done[UINT16_MAX] = { 0 }; + char symbol[1024]; + + printf("%s", header); + for(size_t src = 1; src < _NDLS_MAX ;src++) { + for(size_t pri = 0; pri < _NDLP_MAX ;pri++) { + uint8_t severity = get_severity_from_priority(pri); + + for(size_t msg = 1; msg < _MSGID_MAX ;msg++) { + + if(src >= 16) { + fprintf(stderr, "\n\nSource %zu is bigger than 4 bits!\n\n", src); + return 1; + } + + if(pri >= 16) { + fprintf(stderr, "\n\nPriority %zu is bigger than 4 bits!\n\n", pri); + return 1; + } + + if(msg >= 256) { + fprintf(stderr, "\n\nMessageID %zu is bigger than 8 bits!\n\n", msg); + return 1; + } + + uint16_t eventID = construct_event_code(src, pri, msg); + if((eventID & 0xFFFF) != eventID) { + fprintf(stderr, "\n\nEventID 0x%x is bigger than 16 bits!\n\n", eventID); + return 1; + } + + if(done[eventID]) continue; + done[eventID] = true; + + const char *level = get_level_from_priority_str(pri); + const char *pri_txt; + switch(pri) { + case NDLP_EMERG: + pri_txt = "EMERG"; + break; + + case NDLP_CRIT: + pri_txt = "CRIT"; + break; + + case NDLP_ALERT: + pri_txt = "ALERT"; + break; + + case NDLP_ERR: + pri_txt = "ERR"; + break; + + case NDLP_WARNING: + pri_txt = "WARN"; + break; + + case NDLP_INFO: + pri_txt = "INFO"; + break; + + case NDLP_NOTICE: + pri_txt = "NOTICE"; + break; + + case NDLP_DEBUG: + pri_txt = "DEBUG"; + break; + + default: + fprintf(stderr, "\n\nInvalid priority %zu!\n\n\n", pri); + return 1; + } + + const char *channel; + const char *src_txt; + switch(src) { + case NDLS_COLLECTORS: + src_txt = "COLLECTORS"; + channel = NETDATA_ETW_CHANNEL_NAME "/" NETDATA_ETW_SUBCHANNEL_COLLECTORS; + break; + + case NDLS_ACCESS: + src_txt = "ACCESS"; + channel = NETDATA_ETW_CHANNEL_NAME "/" NETDATA_ETW_SUBCHANNEL_ACCESS; + break; + + case NDLS_HEALTH: + src_txt = "HEALTH"; + channel = NETDATA_ETW_CHANNEL_NAME "/" NETDATA_ETW_SUBCHANNEL_HEALTH; + break; + + case NDLS_DEBUG: + src_txt = "DEBUG"; + channel = NETDATA_ETW_CHANNEL_NAME "/" NETDATA_ETW_SUBCHANNEL_DAEMON; + break; + + case NDLS_DAEMON: + src_txt = "DAEMON"; + channel = NETDATA_ETW_CHANNEL_NAME "/" NETDATA_ETW_SUBCHANNEL_DAEMON; + break; + + case NDLS_ACLK: + src_txt = "ACLK"; + channel = NETDATA_ETW_CHANNEL_NAME "/" NETDATA_ETW_SUBCHANNEL_ACLK; + break; + + default: + fprintf(stderr, "\n\nInvalid source %zu!\n\n\n", src); + return 1; + } + + const char *msg_txt = get_msg_symbol(msg); + const char *format = get_msg_format(msg); + + const char *severity_txt; + switch (severity) { + case STATUS_SEVERITY_INFORMATIONAL: + severity_txt = "Informational"; + break; + + case STATUS_SEVERITY_ERROR: + severity_txt = "Error"; + break; + + case STATUS_SEVERITY_WARNING: + severity_txt = "Warning"; + break; + + default: + fprintf(stderr, "\n\nInvalid severity id %u!\n\n\n", severity); + return 1; + } + + if(manifest) + snprintf(symbol, sizeof(symbol), "ED_%s_%s_%s", src_txt, pri_txt, msg_txt); + else + snprintf(symbol, sizeof(symbol), "MC_%s_%s_%s", src_txt, pri_txt, msg_txt); + + if(manifest) + printf(" \r\n\r\n", + symbol, eventID, msg_txt, channel, level); + else + printf("MessageId=0x%x\r\n" + "Severity=%s\r\n" + "Facility=" NETDATA_CHANNEL_NAME "\r\n" + "SymbolicName=%s\r\n" + "Language=English\r\n" + "%s" + ".\r\n" + "\r\n", + eventID, severity_txt, symbol, format); + } + } + } + printf("%s", footer); + + if(s_header) { + printf("%s", s_header); + + for(size_t msg = 1; msg < _MSGID_MAX ;msg++) { + const char *msg_txt = get_msg_symbol(msg); + const char *format = get_msg_format(msg); + printf(" \r\n", msg_txt, format); + } + + printf("%s", s_footer); + } +} + diff --git a/src/libnetdata/os/windows-perflib/perflib.c b/src/libnetdata/os/windows-perflib/perflib.c index 940b3c6e6076d1..947397af981b92 100644 --- a/src/libnetdata/os/windows-perflib/perflib.c +++ b/src/libnetdata/os/windows-perflib/perflib.c @@ -386,44 +386,30 @@ static inline PERF_COUNTER_DEFINITION *getCounterDefinition(PERF_DATA_BLOCK *pDa // -------------------------------------------------------------------------------------------------------------------- static inline BOOL getEncodedStringToUTF8(char *dst, size_t dst_len, DWORD CodePage, char *start, DWORD length) { + static __thread wchar_t unicode[PERFLIB_MAX_NAME_LENGTH]; + WCHAR *tempBuffer; // Temporary buffer for Unicode data DWORD charsCopied = 0; - BOOL free_tempBuffer; if (CodePage == 0) { // Input is already Unicode (UTF-16) tempBuffer = (WCHAR *)start; charsCopied = length / sizeof(WCHAR); // Convert byte length to number of WCHARs - free_tempBuffer = FALSE; } else { - // Convert the multi-byte instance name to Unicode (UTF-16) - // Calculate maximum possible characters in UTF-16 - - int charCount = MultiByteToWideChar(CodePage, 0, start, (int)length, NULL, 0); - tempBuffer = (WCHAR *)malloc(charCount * sizeof(WCHAR)); - if (!tempBuffer) return FALSE; - - charsCopied = MultiByteToWideChar(CodePage, 0, start, (int)length, tempBuffer, charCount); - if (charsCopied == 0) { - free(tempBuffer); - dst[0] = '\0'; - return FALSE; - } - - free_tempBuffer = TRUE; + tempBuffer = unicode; + charsCopied = any_to_utf16(CodePage, unicode, _countof(unicode), start, (int)length); + if(!charsCopied) return FALSE; } // Now convert from Unicode (UTF-16) to UTF-8 int bytesCopied = WideCharToMultiByte(CP_UTF8, 0, tempBuffer, (int)charsCopied, dst, (int)dst_len, NULL, NULL); if (bytesCopied == 0) { - if (free_tempBuffer) free(tempBuffer); dst[0] = '\0'; // Ensure the buffer is null-terminated even on failure return FALSE; } - dst[bytesCopied] = '\0'; // Ensure buffer is null-terminated - if (free_tempBuffer) free(tempBuffer); // Free temporary buffer if used + dst[bytesCopied - 1] = '\0'; // Ensure buffer is null-terminated return TRUE; } diff --git a/src/libnetdata/os/windows-perflib/perflib.h b/src/libnetdata/os/windows-perflib/perflib.h index 0d853edcc3e07f..4394a8a152affa 100644 --- a/src/libnetdata/os/windows-perflib/perflib.h +++ b/src/libnetdata/os/windows-perflib/perflib.h @@ -25,6 +25,7 @@ const char *RegistryFindNameByID(DWORD id); const char *RegistryFindHelpByID(DWORD id); DWORD RegistryFindIDByName(const char *name); #define PERFLIB_REGISTRY_NAME_NOT_FOUND (DWORD)-1 +#define PERFLIB_MAX_NAME_LENGTH 1024 PERF_DATA_BLOCK *perflibGetPerformanceData(DWORD id); void perflibFreePerformanceData(void); diff --git a/src/libnetdata/socket/socket.h b/src/libnetdata/socket/socket.h index f7475d01560f35..2c282c4c60b15c 100644 --- a/src/libnetdata/socket/socket.h +++ b/src/libnetdata/socket/socket.h @@ -194,7 +194,7 @@ void poll_events(LISTEN_SOCKETS *sockets #define INET6_ADDRSTRLEN 46 #endif -typedef struct socket_peers { +typedef struct { struct { char ip[INET6_ADDRSTRLEN]; int port; diff --git a/src/libnetdata/spawn_server/log-forwarder.c b/src/libnetdata/spawn_server/log-forwarder.c new file mode 100644 index 00000000000000..5c4db55ea0f9c3 --- /dev/null +++ b/src/libnetdata/spawn_server/log-forwarder.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" +#include "log-forwarder.h" + +typedef struct LOG_FORWARDER_ENTRY { + int fd; + char *cmd; + pid_t pid; + BUFFER *wb; + size_t pfds_idx; + bool delete; + + struct LOG_FORWARDER_ENTRY *prev; + struct LOG_FORWARDER_ENTRY *next; +} LOG_FORWARDER_ENTRY; + +typedef struct LOG_FORWARDER { + LOG_FORWARDER_ENTRY *entries; + ND_THREAD *thread; + SPINLOCK spinlock; + int pipe_fds[2]; // Pipe for notifications + bool running; +} LOG_FORWARDER; + +static void *log_forwarder_thread_func(void *arg); + +// -------------------------------------------------------------------------------------------------------------------- +// helper functions + +static inline LOG_FORWARDER_ENTRY *log_forwarder_find_entry_unsafe(LOG_FORWARDER *lf, int fd) { + for (LOG_FORWARDER_ENTRY *entry = lf->entries; entry; entry = entry->next) { + if (entry->fd == fd) + return entry; + } + + return NULL; +} + +static inline void log_forwarder_del_entry_unsafe(LOG_FORWARDER *lf, LOG_FORWARDER_ENTRY *entry) { + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(lf->entries, entry, prev, next); + buffer_free(entry->wb); + freez(entry->cmd); + close(entry->fd); + freez(entry); +} + +static inline void log_forwarder_wake_up_worker(LOG_FORWARDER *lf) { + char ch = 0; + ssize_t bytes_written = write(lf->pipe_fds[PIPE_WRITE], &ch, 1); + if (bytes_written != 1) + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Failed to write to notification pipe"); +} + +// -------------------------------------------------------------------------------------------------------------------- +// starting / stopping + +LOG_FORWARDER *log_forwarder_start(void) { + LOG_FORWARDER *lf = callocz(1, sizeof(LOG_FORWARDER)); + + spinlock_init(&lf->spinlock); + if (pipe(lf->pipe_fds) != 0) { + freez(lf); + return NULL; + } + + // make sure read() will not block on this pipe + sock_setnonblock(lf->pipe_fds[PIPE_READ]); + + lf->running = true; + lf->thread = nd_thread_create("log-fw", NETDATA_THREAD_OPTION_JOINABLE, log_forwarder_thread_func, lf); + + return lf; +} + +static inline void mark_all_entries_for_deletion_unsafe(LOG_FORWARDER *lf) { + for(LOG_FORWARDER_ENTRY *entry = lf->entries; entry ;entry = entry->next) + entry->delete = true; +} + +void log_forwarder_stop(LOG_FORWARDER *lf) { + if(!lf || !lf->running) return; + + // Signal the thread to stop + spinlock_lock(&lf->spinlock); + lf->running = false; + + // mark them all for deletion + mark_all_entries_for_deletion_unsafe(lf); + + // Send a byte to the pipe to wake up the thread + char ch = 0; + write(lf->pipe_fds[PIPE_WRITE], &ch, 1); + spinlock_unlock(&lf->spinlock); + + // Wait for the thread to finish + close(lf->pipe_fds[PIPE_WRITE]); // force it to quit + nd_thread_join(lf->thread); + close(lf->pipe_fds[PIPE_READ]); + + freez(lf); +} + +// -------------------------------------------------------------------------------------------------------------------- +// managing entries + +void log_forwarder_add_fd(LOG_FORWARDER *lf, int fd) { + if(!lf || !lf->running || fd < 0) return; + + LOG_FORWARDER_ENTRY *entry = callocz(1, sizeof(LOG_FORWARDER_ENTRY)); + entry->fd = fd; + entry->cmd = NULL; + entry->pid = 0; + entry->pfds_idx = 0; + entry->delete = false; + entry->wb = buffer_create(0, NULL); + + spinlock_lock(&lf->spinlock); + + // Append to the entries list + DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(lf->entries, entry, prev, next); + + // Send a byte to the pipe to wake up the thread + log_forwarder_wake_up_worker(lf); + + spinlock_unlock(&lf->spinlock); +} + +bool log_forwarder_del_and_close_fd(LOG_FORWARDER *lf, int fd) { + if(!lf || !lf->running || fd < 0) return false; + + bool ret = false; + + spinlock_lock(&lf->spinlock); + + LOG_FORWARDER_ENTRY *entry = log_forwarder_find_entry_unsafe(lf, fd); + if(entry) { + entry->delete = true; + + // Send a byte to the pipe to wake up the thread + log_forwarder_wake_up_worker(lf); + + ret = true; + } + + spinlock_unlock(&lf->spinlock); + + return ret; +} + +void log_forwarder_annotate_fd_name(LOG_FORWARDER *lf, int fd, const char *cmd) { + if(!lf || !lf->running || fd < 0 || !cmd || !*cmd) return; + + spinlock_lock(&lf->spinlock); + + LOG_FORWARDER_ENTRY *entry = log_forwarder_find_entry_unsafe(lf, fd); + if (entry) { + freez(entry->cmd); + entry->cmd = strdupz(cmd); + } + + spinlock_unlock(&lf->spinlock); +} + +void log_forwarder_annotate_fd_pid(LOG_FORWARDER *lf, int fd, pid_t pid) { + if(!lf || !lf->running || fd < 0) return; + + spinlock_lock(&lf->spinlock); + + LOG_FORWARDER_ENTRY *entry = log_forwarder_find_entry_unsafe(lf, fd); + if (entry) + entry->pid = pid; + + spinlock_unlock(&lf->spinlock); +} + +// -------------------------------------------------------------------------------------------------------------------- +// log forwarder thread + +static inline void log_forwarder_log(LOG_FORWARDER *lf __maybe_unused, LOG_FORWARDER_ENTRY *entry, const char *msg) { + const char *s = msg; + while(*s && isspace((uint8_t)*s)) s++; + if(*s == '\0') return; // do not log empty lines + + ND_LOG_STACK lgs[] = { + ND_LOG_FIELD_TXT(NDF_SYSLOG_IDENTIFIER, entry->cmd ? entry->cmd : "unknown"), + ND_LOG_FIELD_I64(NDF_TID, entry->pid), + ND_LOG_FIELD_END(), + }; + ND_LOG_STACK_PUSH(lgs); + + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "STDERR: %s", msg); +} + +// returns the number of entries active +static inline size_t log_forwarder_remove_deleted_unsafe(LOG_FORWARDER *lf) { + size_t entries = 0; + + LOG_FORWARDER_ENTRY *entry = lf->entries; + while(entry) { + LOG_FORWARDER_ENTRY *next = entry->next; + + if(entry->delete) { + if (buffer_strlen(entry->wb)) + // there is something not logged in it - log it + log_forwarder_log(lf, entry, buffer_tostring(entry->wb)); + + log_forwarder_del_entry_unsafe(lf, entry); + } + else + entries++; + + entry = next; + } + + return entries; +} + +static void *log_forwarder_thread_func(void *arg) { + LOG_FORWARDER *lf = (LOG_FORWARDER *)arg; + + while (1) { + spinlock_lock(&lf->spinlock); + if (!lf->running) { + mark_all_entries_for_deletion_unsafe(lf); + log_forwarder_remove_deleted_unsafe(lf); + spinlock_unlock(&lf->spinlock); + break; + } + + // Count the number of fds + size_t nfds = 1 + log_forwarder_remove_deleted_unsafe(lf); + + struct pollfd pfds[nfds]; + + // First, the notification pipe + pfds[0].fd = lf->pipe_fds[PIPE_READ]; + pfds[0].events = POLLIN; + + int idx = 1; + for(LOG_FORWARDER_ENTRY *entry = lf->entries; entry ; entry = entry->next, idx++) { + pfds[idx].fd = entry->fd; + pfds[idx].events = POLLIN; + entry->pfds_idx = idx; + } + + spinlock_unlock(&lf->spinlock); + + int timeout = 200; // 200ms + int ret = poll(pfds, nfds, timeout); + + if (ret > 0) { + // Check the notification pipe + if (pfds[0].revents & POLLIN) { + // Read and discard the data + char buf[256]; + ssize_t bytes_read = read(lf->pipe_fds[PIPE_READ], buf, sizeof(buf)); + // Ignore the data; proceed regardless of the result + if (bytes_read == -1) { + if (errno != EAGAIN && errno != EWOULDBLOCK) { + // Handle read error if necessary + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Failed to read from notification pipe"); + return NULL; + } + } + } + + // Now check the other fds + spinlock_lock(&lf->spinlock); + + size_t to_remove = 0; + + // read or mark them for deletion + for(LOG_FORWARDER_ENTRY *entry = lf->entries; entry ; entry = entry->next) { + if (entry->pfds_idx < 1 || entry->pfds_idx >= nfds || !(pfds[entry->pfds_idx].revents & POLLIN)) + continue; + + BUFFER *wb = entry->wb; + buffer_need_bytes(wb, 1024); + + ssize_t bytes_read = read(entry->fd, &wb->buffer[wb->len], wb->size - wb->len - 1); + if(bytes_read > 0) + wb->len += bytes_read; + else if(bytes_read == 0 || (bytes_read == -1 && errno != EINTR && errno != EAGAIN)) { + // EOF or error + entry->delete = true; + to_remove++; + } + + // log as many lines are they have been received + char *start = (char *)buffer_tostring(wb); + char *newline = strchr(start, '\n'); + while(newline) { + *newline = '\0'; + log_forwarder_log(lf, entry, start); + + start = ++newline; + newline = strchr(newline, '\n'); + } + + if(start != wb->buffer) { + wb->len = strlen(start); + if (wb->len) + memmove(wb->buffer, start, wb->len); + } + + entry->pfds_idx = 0; + } + + spinlock_unlock(&lf->spinlock); + } + else if (ret == 0) { + // Timeout, nothing to do + continue; + + } + else + nd_log(NDLS_COLLECTORS, NDLP_ERR, "Log forwarder: poll() error"); + } + + return NULL; +} diff --git a/src/libnetdata/spawn_server/log-forwarder.h b/src/libnetdata/spawn_server/log-forwarder.h new file mode 100644 index 00000000000000..344601c1f00818 --- /dev/null +++ b/src/libnetdata/spawn_server/log-forwarder.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_LOG_FORWARDER_H +#define NETDATA_LOG_FORWARDER_H + +#include "../libnetdata.h" + +typedef struct LOG_FORWARDER LOG_FORWARDER; + +LOG_FORWARDER *log_forwarder_start(void); // done once, at spawn_server_create() +void log_forwarder_add_fd(LOG_FORWARDER *lf, int fd); // to add a new fd +void log_forwarder_annotate_fd_name(LOG_FORWARDER *lf, int fd, const char *cmd); // set the syslog identifier +void log_forwarder_annotate_fd_pid(LOG_FORWARDER *lf, int fd, pid_t pid); // set the pid of the child process +bool log_forwarder_del_and_close_fd(LOG_FORWARDER *lf, int fd); // to remove an fd +void log_forwarder_stop(LOG_FORWARDER *lf); // done once, at spawn_server_destroy() + +#endif //NETDATA_LOG_FORWARDER_H diff --git a/src/libnetdata/spawn_server/spawn_server_internals.h b/src/libnetdata/spawn_server/spawn_server_internals.h index 9802f0771eaaec..198442ae7b4c25 100644 --- a/src/libnetdata/spawn_server/spawn_server_internals.h +++ b/src/libnetdata/spawn_server/spawn_server_internals.h @@ -6,6 +6,7 @@ #include "../libnetdata.h" #include "spawn_server.h" #include "spawn_library.h" +#include "log-forwarder.h" #if defined(OS_WINDOWS) #define SPAWN_SERVER_VERSION_WINDOWS 1 @@ -62,6 +63,7 @@ struct spawn_server { #endif #if defined(SPAWN_SERVER_VERSION_WINDOWS) + LOG_FORWARDER *log_forwarder; #endif }; @@ -70,6 +72,7 @@ struct spawn_instance { int sock; int write_fd; int read_fd; + int stderr_fd; pid_t child_pid; #if defined(SPAWN_SERVER_VERSION_UV) diff --git a/src/libnetdata/spawn_server/spawn_server_nofork.c b/src/libnetdata/spawn_server/spawn_server_nofork.c index f345561b4525eb..be060fc7d8948b 100644 --- a/src/libnetdata/spawn_server/spawn_server_nofork.c +++ b/src/libnetdata/spawn_server/spawn_server_nofork.c @@ -59,6 +59,7 @@ static void spawn_server_run_child(SPAWN_SERVER *server, SPAWN_REQUEST *rq) { // close all open file descriptors of the parent, but keep ours os_close_all_non_std_open_fds_except(rq->fds, 4, 0); + nd_log_reopen_log_files_for_spawn_server(); // set the process name os_setproctitle("spawn-child", server->argc, server->argv); @@ -363,7 +364,6 @@ static bool spawn_server_run_callback(SPAWN_SERVER *server __maybe_unused, SPAWN else if (pid == 0) { // the child - gettid_uncached(); // make sure the logger logs valid pids spawn_server_run_child(server, rq); exit(63); } @@ -984,22 +984,24 @@ static bool spawn_server_create_listening_socket(SPAWN_SERVER *server) { } static void replace_stdio_with_dev_null() { + // we cannot log in this function - the logger is not yet initialized after fork() + int dev_null_fd = open("/dev/null", O_RDWR); if (dev_null_fd == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to open /dev/null: %s", strerror(errno)); + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to open /dev/null: %s", strerror(errno)); return; } // Redirect stdin (fd 0) if (dup2(dev_null_fd, STDIN_FILENO) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to redirect stdin to /dev/null: %s", strerror(errno)); + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to redirect stdin to /dev/null: %s", strerror(errno)); close(dev_null_fd); return; } // Redirect stdout (fd 1) if (dup2(dev_null_fd, STDOUT_FILENO) == -1) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to redirect stdout to /dev/null: %s", strerror(errno)); + // nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN SERVER: Failed to redirect stdout to /dev/null: %s", strerror(errno)); close(dev_null_fd); return; } @@ -1070,7 +1072,6 @@ SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options, const char *name pid_t pid = fork(); if (pid == 0) { // the child - the spawn server - gettid_uncached(); // make sure the logger logs valid pids { char buf[15]; diff --git a/src/libnetdata/spawn_server/spawn_server_windows.c b/src/libnetdata/spawn_server/spawn_server_windows.c index 8c7d76cd2416a6..f80925a24b5348 100644 --- a/src/libnetdata/spawn_server/spawn_server_windows.c +++ b/src/libnetdata/spawn_server/spawn_server_windows.c @@ -40,11 +40,18 @@ SPAWN_SERVER* spawn_server_create(SPAWN_SERVER_OPTIONS options __maybe_unused, c server->name = strdupz(name); else server->name = strdupz("unnamed"); + + server->log_forwarder = log_forwarder_start(); + return server; } void spawn_server_destroy(SPAWN_SERVER *server) { if (server) { + if (server->log_forwarder) { + log_forwarder_stop(server->log_forwarder); + server->log_forwarder = NULL; + } freez((void *)server->name); freez(server); } @@ -136,13 +143,13 @@ int set_fd_blocking(int fd) { // } //} -SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custom_fd __maybe_unused, const char **argv, const void *data __maybe_unused, size_t data_size __maybe_unused, SPAWN_INSTANCE_TYPE type) { +SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd __maybe_unused, int custom_fd __maybe_unused, const char **argv, const void *data __maybe_unused, size_t data_size __maybe_unused, SPAWN_INSTANCE_TYPE type) { static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; if (type != SPAWN_INSTANCE_TYPE_EXEC) return NULL; - int pipe_stdin[2] = { -1, -1 }, pipe_stdout[2] = { -1, -1 }; + int pipe_stdin[2] = { -1, -1 }, pipe_stdout[2] = { -1, -1 }, pipe_stderr[2] = { -1, -1 }; errno_clear(); @@ -166,12 +173,21 @@ SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custo goto cleanup; } + if (pipe(pipe_stderr) == -1) { + nd_log(NDLS_COLLECTORS, NDLP_ERR, + "SPAWN PARENT: Cannot create stderr pipe() for request No %zu, command: %s", + instance->request_id, command); + goto cleanup; + } + // Ensure pipes are in blocking mode if (set_fd_blocking(pipe_stdin[PIPE_READ]) == -1 || set_fd_blocking(pipe_stdin[PIPE_WRITE]) == -1 || - set_fd_blocking(pipe_stdout[PIPE_READ]) == -1 || set_fd_blocking(pipe_stdout[PIPE_WRITE]) == -1) { + set_fd_blocking(pipe_stdout[PIPE_READ]) == -1 || set_fd_blocking(pipe_stdout[PIPE_WRITE]) == -1 || + set_fd_blocking(pipe_stderr[PIPE_READ]) == -1 || set_fd_blocking(pipe_stderr[PIPE_WRITE]) == -1) { nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Failed to set blocking I/O on pipes for request No %zu, command: %s", instance->request_id, command); + goto cleanup; } // do not run multiple times this section @@ -181,9 +197,9 @@ SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custo // Convert POSIX file descriptors to Windows handles HANDLE stdin_read_handle = (HANDLE)_get_osfhandle(pipe_stdin[PIPE_READ]); HANDLE stdout_write_handle = (HANDLE)_get_osfhandle(pipe_stdout[PIPE_WRITE]); - HANDLE stderr_handle = (HANDLE)_get_osfhandle(stderr_fd); + HANDLE stderr_write_handle = (HANDLE)_get_osfhandle(pipe_stderr[PIPE_WRITE]); - if (stdin_read_handle == INVALID_HANDLE_VALUE || stdout_write_handle == INVALID_HANDLE_VALUE || stderr_handle == INVALID_HANDLE_VALUE) { + if (stdin_read_handle == INVALID_HANDLE_VALUE || stdout_write_handle == INVALID_HANDLE_VALUE || stderr_write_handle == INVALID_HANDLE_VALUE) { spinlock_unlock(&spinlock); nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Invalid handle value(s) for request No %zu, command: %s", @@ -194,7 +210,7 @@ SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custo // Set handle inheritance if (!SetHandleInformation(stdin_read_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT) || !SetHandleInformation(stdout_write_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT) || - !SetHandleInformation(stderr_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT)) { + !SetHandleInformation(stderr_write_handle, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT)) { spinlock_unlock(&spinlock); nd_log(NDLS_COLLECTORS, NDLP_ERR, "SPAWN PARENT: Cannot set handle(s) inheritance for request No %zu, command: %s", @@ -210,18 +226,18 @@ SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custo si.dwFlags = STARTF_USESTDHANDLES; si.hStdInput = stdin_read_handle; si.hStdOutput = stdout_write_handle; - si.hStdError = stderr_handle; + si.hStdError = stderr_write_handle; // Retrieve the current environment block char* env_block = GetEnvironmentStrings(); // print_environment_block(env_block); - nd_log(NDLS_COLLECTORS, NDLP_ERR, + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN PARENT: Running request No %zu, command: '%s'", instance->request_id, command); - int fds[3] = { pipe_stdin[PIPE_READ], pipe_stdout[PIPE_WRITE], stderr_fd }; - os_close_all_non_std_open_fds_except(fds, 3, CLOSE_RANGE_CLOEXEC); + int fds_to_keep_open[] = { pipe_stdin[PIPE_READ], pipe_stdout[PIPE_WRITE], pipe_stderr[PIPE_WRITE] }; + os_close_all_non_std_open_fds_except(fds_to_keep_open, 3, CLOSE_RANGE_CLOEXEC); // Spawn the process errno_clear(); @@ -247,6 +263,7 @@ SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custo // Close unused pipe ends close(pipe_stdin[PIPE_READ]); pipe_stdin[PIPE_READ] = -1; close(pipe_stdout[PIPE_WRITE]); pipe_stdout[PIPE_WRITE] = -1; + close(pipe_stderr[PIPE_WRITE]); pipe_stderr[PIPE_WRITE] = -1; // Store process information in instance instance->dwProcessId = pi.dwProcessId; @@ -256,9 +273,15 @@ SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custo // Convert handles to POSIX file descriptors instance->write_fd = pipe_stdin[PIPE_WRITE]; instance->read_fd = pipe_stdout[PIPE_READ]; + instance->stderr_fd = pipe_stderr[PIPE_READ]; + + // Add stderr_fd to the log forwarder + log_forwarder_add_fd(server->log_forwarder, instance->stderr_fd); + log_forwarder_annotate_fd_name(server->log_forwarder, instance->stderr_fd, command); + log_forwarder_annotate_fd_pid(server->log_forwarder, instance->stderr_fd, spawn_server_instance_pid(instance)); errno_clear(); - nd_log(NDLS_COLLECTORS, NDLP_ERR, + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN PARENT: created process for request No %zu, pid %d (winpid %d), command: %s", instance->request_id, (int)instance->child_pid, (int)pi.dwProcessId, command); @@ -269,6 +292,8 @@ SPAWN_INSTANCE* spawn_server_exec(SPAWN_SERVER *server, int stderr_fd, int custo if (pipe_stdin[PIPE_WRITE] >= 0) close(pipe_stdin[PIPE_WRITE]); if (pipe_stdout[PIPE_READ] >= 0) close(pipe_stdout[PIPE_READ]); if (pipe_stdout[PIPE_WRITE] >= 0) close(pipe_stdout[PIPE_WRITE]); + if (pipe_stderr[PIPE_READ] >= 0) close(pipe_stderr[PIPE_READ]); + if (pipe_stderr[PIPE_WRITE] >= 0) close(pipe_stderr[PIPE_WRITE]); freez(instance); return NULL; } @@ -314,7 +339,7 @@ static void TerminateChildProcesses(SPAWN_INSTANCE *si) { if (pe.th32ParentProcessID == si->dwProcessId) { HANDLE hChildProcess = OpenProcess(PROCESS_TERMINATE, FALSE, pe.th32ProcessID); if (hChildProcess) { - nd_log(NDLS_COLLECTORS, NDLP_ERR, + nd_log(NDLS_COLLECTORS, NDLP_WARNING, "SPAWN PARENT: killing subprocess %u of request No %zu, pid %d (winpid %u)", pe.th32ProcessID, si->request_id, (int)si->child_pid, si->dwProcessId); @@ -378,6 +403,12 @@ int spawn_server_exec_kill(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE * // to have them, to avoid abnormal shutdown of the plugins if(si->read_fd != -1) { close(si->read_fd); si->read_fd = -1; } if(si->write_fd != -1) { close(si->write_fd); si->write_fd = -1; } + if(si->stderr_fd != -1) { + if(!log_forwarder_del_and_close_fd(server->log_forwarder, si->stderr_fd)) + close(si->stderr_fd); + + si->stderr_fd = -1; + } errno_clear(); if(TerminateProcess(si->process_handle, STATUS_CONTROL_C_EXIT) == 0) @@ -394,6 +425,12 @@ int spawn_server_exec_kill(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE * int spawn_server_exec_wait(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE *si) { if(si->read_fd != -1) { close(si->read_fd); si->read_fd = -1; } if(si->write_fd != -1) { close(si->write_fd); si->write_fd = -1; } + if(si->stderr_fd != -1) { + if(!log_forwarder_del_and_close_fd(server->log_forwarder, si->stderr_fd)) + close(si->stderr_fd); + + si->stderr_fd = -1; + } // wait for the process to end WaitForSingleObject(si->process_handle, INFINITE); @@ -404,7 +441,7 @@ int spawn_server_exec_wait(SPAWN_SERVER *server __maybe_unused, SPAWN_INSTANCE * char *err = GetErrorString(exit_code); - nd_log(NDLS_COLLECTORS, NDLP_ERR, + nd_log(NDLS_COLLECTORS, NDLP_INFO, "SPAWN PARENT: child of request No %zu, pid %d (winpid %u), exited with code %u (0x%x): %s", si->request_id, (int)si->child_pid, si->dwProcessId, (unsigned)exit_code, (unsigned)exit_code, err ? err : "(no reason text)"); diff --git a/src/libnetdata/string/utf8.c b/src/libnetdata/string/utf8.c new file mode 100644 index 00000000000000..e0a6745da4ffdb --- /dev/null +++ b/src/libnetdata/string/utf8.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "../libnetdata.h" + +#if defined(OS_WINDOWS) +#include + +/* + * Convert any CodePage to UTF16 + * Goals: + * 1. Destination is always NULL terminated + * 2. If the destination buffer is not enough, return as much as possible data (truncate) + * 3. Always return the number of wide characters written, including the null terminator + */ + +size_t any_to_utf16(uint32_t CodePage, wchar_t *dst, size_t dst_size, const char *src, int src_len) { + if(!src || src_len == 0) { + // invalid input + if(dst && dst_size) + *dst = L'\0'; + return 0; + } + + if(!dst || !dst_size) { + // the caller wants to know the buffer to allocate for the conversion + int required = MultiByteToWideChar(CodePage, 0, src, src_len, NULL, 0); + if(required <= 0) return 0; // error in the conversion + + // Add 1 for null terminator only if src_len is not -1 + // so that the caller can call us again to get the entire string (not truncated) + return (size_t)required + ((src_len != -1) ? 1 : 0); + } + + // do the conversion directly to the destination buffer + int rc = MultiByteToWideChar(CodePage, 0, src, src_len, dst, (int)dst_size); + if(rc <= 0) { + // conversion failed, let's see why... + DWORD status = GetLastError(); + if(status == ERROR_INSUFFICIENT_BUFFER) { + // it cannot fit entirely, let's allocate a new buffer to convert it + // and then truncate it to the destination buffer + + // clear errno and LastError to clear the error of the + // MultiByteToWideChar() that failed + errno_clear(); + + // get the required size + int required_size = MultiByteToWideChar(CodePage, 0, src, src_len, NULL, 0); + + // mallocz() never fails (exits the program on NULL) + wchar_t *tmp = mallocz(required_size * sizeof(wchar_t)); + + // convert it, now it should fit + rc = MultiByteToWideChar(CodePage, 0, src, src_len, tmp, required_size); + if (rc <= 0) { + // it failed! + *dst = L'\0'; + freez(tmp); + return 0; + } + + size_t len = rc; + + // copy as much as we can + memcpy(dst, tmp, MIN(len, (dst_size - 1)) * sizeof(wchar_t)); + + // null terminate it + dst[MIN(len, (dst_size - 1))] = L'\0'; + + // free the temporary buffer + freez(tmp); + + // return the actual bytes written + return MIN(len, dst_size); + } + + // empty the destination + *dst = L'\0'; + return 0; + } + + size_t len = rc; + + if(len >= dst_size) { + // truncate it to fit the null + dst[dst_size - 1] = L'\0'; + return dst_size; + } + + if(dst[len - 1] != L'\0') { + // the result is not null terminated + // append the null + dst[len] = L'\0'; + return len + 1; + } + + // the result is already null terminated + return len; +} + +/* + * Convert UTF16 (wide-character string) to UTF8 + * Goals: + * 1. Destination is always NULL terminated + * 2. If the destination buffer is not enough, return as much as possible data (truncate) + * 3. Always return the number of bytes written, including the null terminator + */ + +size_t utf16_to_utf8(char *dst, size_t dst_size, const wchar_t *src, int src_len) { + if (!src || src_len == 0) { + // invalid input + if(dst && dst_size) + *dst = L'\0'; + return 0; + } + + if (!dst || dst_size == 0) { + // The caller wants to know the buffer size required for the conversion + int required = WideCharToMultiByte(CP_UTF8, 0, src, src_len, NULL, 0, NULL, NULL); + if (required <= 0) return 0; // error in the conversion + + // Add 1 for null terminator only if src_len is not -1 + return (size_t)required + ((src_len != -1) ? 1 : 0); + } + + // Perform the conversion directly into the destination buffer + int rc = WideCharToMultiByte(CP_UTF8, 0, src, src_len, dst, (int)dst_size, NULL, NULL); + if (rc <= 0) { + // Conversion failed, let's see why... + DWORD status = GetLastError(); + if (status == ERROR_INSUFFICIENT_BUFFER) { + // It cannot fit entirely, let's allocate a new buffer to convert it + // and then truncate it to the destination buffer + + // Clear errno and LastError to clear the error of the + // WideCharToMultiByte() that failed + errno_clear(); + + // Get the required size + int required_size = WideCharToMultiByte(CP_UTF8, 0, src, src_len, NULL, 0, NULL, NULL); + + // mallocz() never fails (exits the program on NULL) + char *tmp = mallocz(required_size * sizeof(char)); + + // Convert it, now it should fit + rc = WideCharToMultiByte(CP_UTF8, 0, src, src_len, tmp, required_size, NULL, NULL); + if (rc <= 0) { + // Conversion failed + *dst = '\0'; + freez(tmp); + return 0; + } + + size_t len = rc; + + // Copy as much as we can + memcpy(dst, tmp, MIN(len, (dst_size - 1)) * sizeof(char)); + + // Null-terminate it + dst[MIN(len, (dst_size - 1))] = '\0'; + + // Free the temporary buffer + freez(tmp); + + // Return the actual bytes written + return MIN(len, dst_size); + } + + // Empty the destination + *dst = '\0'; + return 0; + } + + size_t len = rc; + + if (len >= dst_size) { + // Truncate it to fit the null terminator + dst[dst_size - 1] = '\0'; + return dst_size; + } + + if (dst[len - 1] != '\0') { + // The result is not null-terminated + // Append the null terminator + dst[len] = '\0'; + return len + 1; + } + + // The result is already null-terminated + return len; +} +#endif diff --git a/src/libnetdata/string/utf8.h b/src/libnetdata/string/utf8.h index 6d934b543d7c9d..ee9fe12b20a8f7 100644 --- a/src/libnetdata/string/utf8.h +++ b/src/libnetdata/string/utf8.h @@ -6,4 +6,25 @@ #define IS_UTF8_BYTE(x) ((uint8_t)(x) & (uint8_t)0x80) #define IS_UTF8_STARTBYTE(x) (IS_UTF8_BYTE(x) && ((uint8_t)(x) & (uint8_t)0x40)) +#ifndef _countof +#define _countof(x) (sizeof(x) / sizeof(*(x))) +#endif + +#if defined(OS_WINDOWS) + +// return an always null terminated wide string, truncate to given size if destination is not big enough, +// src_len can be -1 use all of it. +// returns zero on errors, > 0 otherwise (including the null, even if src is not null terminated). +size_t any_to_utf16(uint32_t CodePage, wchar_t *dst, size_t dst_size, const char *src, int src_len); + +// always null terminated, truncated if it does not fit, src_len can be -1 to use all of it. +// returns zero on errors, > 0 otherwise (including the null, even if src is not null terminated). +#define utf8_to_utf16(utf16, utf16_count, src, src_len) any_to_utf16(CP_UTF8, utf16, utf16_count, src, src_len) + +// always null terminated, truncated if it does not fit, src_len can be -1 to use all of it. +// returns zero on errors, > 0 otherwise (including the null, even if src is not null terminated). +size_t utf16_to_utf8(char *dst, size_t dst_size, const wchar_t *src, int src_len); + +#endif + #endif /* NETDATA_STRING_UTF8_H */ diff --git a/src/libnetdata/template-enum.h b/src/libnetdata/template-enum.h index 82487336ac7612..2170ee86baf9f7 100644 --- a/src/libnetdata/template-enum.h +++ b/src/libnetdata/template-enum.h @@ -41,6 +41,7 @@ #define BITMAP_STR_DEFINE_FUNCTIONS_EXTERN(type) \ type type ## _2id_one(const char *str); \ + const char *type##_2str_one(type id); \ const char *type##_2json(BUFFER *wb, const char *key, type id); #define BITMAP_STR_DEFINE_FUNCTIONS(type, def, def_str) \ @@ -57,6 +58,16 @@ return def; \ } \ \ + const char *type##_2str_one(type id) \ + { \ + for (size_t i = 0; type ## _names[i].name; i++) { \ + if (id == type ## _names[i].id) \ + return type ## _names[i].name; \ + } \ + \ + return def_str; \ + } \ + \ const char *type##_2json(BUFFER *wb, const char *key, type id) \ { \ buffer_json_member_add_array(wb, key); \ diff --git a/src/ml/Config.cc b/src/ml/Config.cc index 84846cd01ca7ba..ccca7723b7d642 100644 --- a/src/ml/Config.cc +++ b/src/ml/Config.cc @@ -19,7 +19,7 @@ static T clamp(const T& Value, const T& Min, const T& Max) { void ml_config_load(ml_config_t *cfg) { const char *config_section_ml = CONFIG_SECTION_ML; - bool enable_anomaly_detection = config_get_boolean(config_section_ml, "enabled", true); + int enable_anomaly_detection = config_get_boolean_ondemand(config_section_ml, "enabled", CONFIG_BOOLEAN_AUTO); /* * Read values @@ -139,4 +139,10 @@ void ml_config_load(ml_config_t *cfg) { cfg->suppression_threshold = suppression_threshold; cfg->enable_statistics_charts = enable_statistics_charts; + + if (cfg->enable_anomaly_detection == CONFIG_BOOLEAN_AUTO && default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) { + Cfg.enable_anomaly_detection = 0; + config_set_boolean(config_section_ml, "enabled", CONFIG_BOOLEAN_NO); + return; + } } diff --git a/src/ml/ml-configuration.md b/src/ml/ml-configuration.md index a06a186d8198ec..dc5d535dbc1e32 100644 --- a/src/ml/ml-configuration.md +++ b/src/ml/ml-configuration.md @@ -1,18 +1,18 @@ # ML Configuration -Netdata's [Machine Learning](/src/ml/README.md) capabilities are enabled by default. +Netdata's [Machine Learning](/src/ml/README.md) capabilities are enabled by default if the [Database mode](/src/database/README.md) is set to `db = dbengine` To enable or disable Machine Learning capabilities on a node: 1. [Edit `netdata.conf`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) -2. In the `[ml]` section, set `enabled = yes` to enable or `enabled = no` to disable +2. In the `[ml]` section, set `enabled` to `yes` to enable ML, `no` to disable it, or leave it at the default `auto` to enable ML only when [Database mode](/src/database/README.md) is set to `dbengine` 3. [Restart Netdata](/docs/netdata-agent/start-stop-restart.md) Below is a list of all the available configuration params and their default values. ```bash [ml] - # enabled = yes + # enabled = auto # maximum num samples to train = 21600 # minimum num samples to train = 900 # train every = 3h @@ -85,7 +85,7 @@ flowchart BT ## Descriptions (min/max) -- `enabled`: `yes` to enable, `no` to disable. +- `enabled`: `yes` to enable, `no` to disable, or `auto` to let Netdata decide when to enable ML. - `maximum num samples to train`: (`3600`/`86400`) This is the maximum amount of time you would like to train each model on. For example, the default of `21600` trains on the preceding 6 hours of data, assuming an `update every` of 1 second. - `minimum num samples to train`: (`900`/`21600`) This is the minimum amount of data required to be able to train a model. For example, the default of `900` implies that once at least 15 minutes of data is available for training, a model is trained, otherwise it is skipped and checked again at the next training run. - `train every`: (`3h`/`6h`) This is how often each model will be retrained. For example, the default of `3h` means that each model is retrained every 3 hours. Note: The training of all models is spread out across the `train every` period for efficiency, so in reality, it means that each model will be trained in a staggered manner within each `train every` period. diff --git a/src/ml/ml-private.h b/src/ml/ml-private.h index fc90589b37b95d..cda24d0eda1ff3 100644 --- a/src/ml/ml-private.h +++ b/src/ml/ml-private.h @@ -313,7 +313,7 @@ typedef struct { } ml_training_thread_t; typedef struct { - bool enable_anomaly_detection; + int enable_anomaly_detection; unsigned max_train_samples; unsigned min_train_samples; diff --git a/src/registry/registry.c b/src/registry/registry.c index aab81afc5f8f6e..be8d6948f679da 100644 --- a/src/registry/registry.c +++ b/src/registry/registry.c @@ -517,16 +517,16 @@ void registry_statistics(void) { rrddim_add(stm, "machines_urls", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE); } - struct aral_statistics *p_aral_stats = aral_statistics(registry.persons_aral); + struct aral_statistics *p_aral_stats = aral_get_statistics(registry.persons_aral); rrddim_set(stm, "persons", (collected_number)p_aral_stats->structures.allocated_bytes + (collected_number)p_aral_stats->malloc.allocated_bytes + (collected_number)p_aral_stats->mmap.allocated_bytes); - struct aral_statistics *m_aral_stats = aral_statistics(registry.machines_aral); + struct aral_statistics *m_aral_stats = aral_get_statistics(registry.machines_aral); rrddim_set(stm, "machines", (collected_number)m_aral_stats->structures.allocated_bytes + (collected_number)m_aral_stats->malloc.allocated_bytes + (collected_number)m_aral_stats->mmap.allocated_bytes); - struct aral_statistics *pu_aral_stats = aral_statistics(registry.person_urls_aral); + struct aral_statistics *pu_aral_stats = aral_get_statistics(registry.person_urls_aral); rrddim_set(stm, "persons_urls", (collected_number)pu_aral_stats->structures.allocated_bytes + (collected_number)pu_aral_stats->malloc.allocated_bytes + (collected_number)pu_aral_stats->mmap.allocated_bytes); - struct aral_statistics *mu_aral_stats = aral_statistics(registry.machine_urls_aral); + struct aral_statistics *mu_aral_stats = aral_get_statistics(registry.machine_urls_aral); rrddim_set(stm, "machines_urls", (collected_number)mu_aral_stats->structures.allocated_bytes + (collected_number)mu_aral_stats->malloc.allocated_bytes + (collected_number)mu_aral_stats->mmap.allocated_bytes); rrdset_done(stm); diff --git a/src/streaming/rrdpush.h b/src/streaming/rrdpush.h index 6536f97f000736..e7bad86a8c3b70 100644 --- a/src/streaming/rrdpush.h +++ b/src/streaming/rrdpush.h @@ -601,7 +601,7 @@ static inline const char *rrdhost_dyncfg_status_to_string(RRDHOST_DYNCFG_STATUS } } -typedef struct rrdhost_status { +typedef struct { RRDHOST *host; time_t now; diff --git a/src/web/api/v2/api_v2_calls.h b/src/web/api/v2/api_v2_calls.h index e40f9f4521904d..809af9669fdef5 100644 --- a/src/web/api/v2/api_v2_calls.h +++ b/src/web/api/v2/api_v2_calls.h @@ -25,6 +25,7 @@ int api_v2_node_instances(RRDHOST *host, struct web_client *w, char *url); int api_v2_ilove(RRDHOST *host, struct web_client *w, char *url); int api_v2_claim(RRDHOST *host, struct web_client *w, char *url); +int api_v3_claim(RRDHOST *host, struct web_client *w, char *url); int api_v2_webrtc(RRDHOST *host, struct web_client *w, char *url); diff --git a/src/web/api/v2/api_v2_claim.c b/src/web/api/v2/api_v2_claim.c index 990920cf1266c5..6c869aac56ff5e 100644 --- a/src/web/api/v2/api_v2_claim.c +++ b/src/web/api/v2/api_v2_claim.c @@ -85,7 +85,94 @@ static bool check_claim_param(const char *s) { return true; } -int api_v2_claim(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { +static bool agent_can_be_claimed(void) { + CLOUD_STATUS status = cloud_status(); + switch(status) { + case CLOUD_STATUS_AVAILABLE: + case CLOUD_STATUS_OFFLINE: + case CLOUD_STATUS_INDIRECT: + return true; + + case CLOUD_STATUS_BANNED: + case CLOUD_STATUS_ONLINE: + return false; + } + + return false; +} + +typedef enum { + CLAIM_RESP_INFO, + CLAIM_RESP_ERROR, + CLAIM_RESP_ACTION_OK, + CLAIM_RESP_ACTION_FAILED, +} CLAIM_RESPONSE; + +static void claim_add_user_info_command(BUFFER *wb) { + const char *filename = netdata_random_session_id_get_filename(); + CLEAN_BUFFER *os_cmd = buffer_create(0, NULL); + + const char *os_filename; + const char *os_prefix; + const char *os_quote; + const char *os_message; + +#if defined(OS_WINDOWS) + char win_path[MAX_PATH]; + cygwin_conv_path(CCP_POSIX_TO_WIN_A, filename, win_path, sizeof(win_path)); + os_filename = win_path; + os_prefix = "more"; + os_message = "We need to verify this Windows server is yours. So, open a Command Prompt on this server to run the command. It will give you a UUID. Copy and paste this UUID to this box:"; +#else + os_filename = filename; + os_prefix = "sudo cat"; + os_message = "We need to verify this server is yours. SSH to this server and run this command. It will give you a UUID. Copy and paste this UUID to this box:"; +#endif + + // add quotes only when the filename has a space + if(strchr(os_filename, ' ')) + os_quote = "\""; + else + os_quote = ""; + + buffer_sprintf(os_cmd, "%s %s%s%s", os_prefix, os_quote, os_filename, os_quote); + + buffer_json_member_add_string(wb, "key_filename", os_filename); + buffer_json_member_add_string(wb, "cmd", buffer_tostring(os_cmd)); + buffer_json_member_add_string(wb, "help", os_message); +} + +static int claim_json_response(BUFFER *wb, CLAIM_RESPONSE response, const char *msg) { + time_t now_s = now_realtime_sec(); + buffer_reset(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); + + if(response != CLAIM_RESP_INFO) { + // this is not an info, so it needs a status report + buffer_json_member_add_boolean(wb, "success", response == CLAIM_RESP_ACTION_OK ? true : false); + buffer_json_member_add_string_or_empty(wb, "message", msg ? msg : ""); + } + + buffer_json_cloud_status(wb, now_s); + + if(response != CLAIM_RESP_ACTION_OK) { + buffer_json_member_add_boolean(wb, "can_be_claimed", agent_can_be_claimed()); + claim_add_user_info_command(wb); + } + + buffer_json_agents_v2(wb, NULL, now_s, false, false); + buffer_json_finalize(wb); + + return (response == CLAIM_RESP_ERROR) ? HTTP_RESP_BAD_REQUEST : HTTP_RESP_OK; +} + +static int claim_txt_response(BUFFER *wb, const char *msg) { + buffer_reset(wb); + buffer_strcat(wb, msg); + return HTTP_RESP_BAD_REQUEST; +} + +static int api_claim(uint8_t version, struct web_client *w, char *url) { char *key = NULL; char *token = NULL; char *rooms = NULL; @@ -110,103 +197,45 @@ int api_v2_claim(RRDHOST *host __maybe_unused, struct web_client *w, char *url) } BUFFER *wb = w->response.data; - buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - - time_t now_s = now_realtime_sec(); - CLOUD_STATUS status = buffer_json_cloud_status(wb, now_s); - - bool can_be_claimed = false; - switch(status) { - case CLOUD_STATUS_AVAILABLE: - case CLOUD_STATUS_OFFLINE: - case CLOUD_STATUS_INDIRECT: - can_be_claimed = true; - break; - case CLOUD_STATUS_BANNED: - case CLOUD_STATUS_ONLINE: - can_be_claimed = false; - break; - } - - buffer_json_member_add_boolean(wb, "can_be_claimed", can_be_claimed); + CLAIM_RESPONSE response = CLAIM_RESP_INFO; + const char *msg = NULL; + bool can_be_claimed = agent_can_be_claimed(); if(can_be_claimed && key) { if(!netdata_random_session_id_matches(key)) { - buffer_reset(wb); - buffer_strcat(wb, "invalid key"); netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it - return HTTP_RESP_FORBIDDEN; + if(version < 3) return claim_txt_response(wb, "invalid key"); + return claim_json_response(wb, CLAIM_RESP_ERROR, "invalid key"); } if(!token || !base_url || !check_claim_param(token) || !check_claim_param(base_url) || (rooms && !check_claim_param(rooms))) { - buffer_reset(wb); - buffer_strcat(wb, "invalid parameters"); netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it - return HTTP_RESP_BAD_REQUEST; + if(version < 3) return claim_txt_response(wb, "invalid parameters"); + return claim_json_response(wb, CLAIM_RESP_ERROR, "invalid parameters"); } netdata_random_session_id_generate(); // generate a new key, to avoid an attack to find it - bool success = false; - const char *msg; if(claim_agent(base_url, token, rooms, cloud_config_proxy_get(), cloud_config_insecure_get())) { msg = "ok"; - success = true; can_be_claimed = false; - status = claim_reload_and_wait_online(); + claim_reload_and_wait_online(); + response = CLAIM_RESP_ACTION_OK; } - else + else { msg = claim_agent_failure_reason_get(); - - // our status may have changed - // refresh the status in our output - buffer_flush(wb); - buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT); - now_s = now_realtime_sec(); - buffer_json_cloud_status(wb, now_s); - - // and this is the status of the claiming command we run - buffer_json_member_add_boolean(wb, "success", success); - buffer_json_member_add_string_or_empty(wb, "message", msg); + response = CLAIM_RESP_ACTION_FAILED; + } } - if(can_be_claimed) { - const char *filename = netdata_random_session_id_get_filename(); - CLEAN_BUFFER *buffer = buffer_create(0, NULL); - - const char *os_filename; - const char *os_prefix; - const char *os_quote; - const char *os_message; - -#if defined(OS_WINDOWS) - char win_path[MAX_PATH]; - cygwin_conv_path(CCP_POSIX_TO_WIN_A, filename, win_path, sizeof(win_path)); - os_filename = win_path; - os_prefix = "more"; - os_message = "We need to verify this Windows server is yours. So, open a Command Prompt on this server to run the command. It will give you a UUID. Copy and paste this UUID to this box:"; -#else - os_filename = filename; - os_prefix = "sudo cat"; - os_message = "We need to verify this server is yours. SSH to this server and run this command. It will give you a UUID. Copy and paste this UUID to this box:"; -#endif - - // add quotes only when the filename has a space - if(strchr(os_filename, ' ')) - os_quote = "\""; - else - os_quote = ""; - - buffer_sprintf(buffer, "%s %s%s%s", os_prefix, os_quote, os_filename, os_quote); - buffer_json_member_add_string(wb, "key_filename", os_filename); - buffer_json_member_add_string(wb, "cmd", buffer_tostring(buffer)); - buffer_json_member_add_string(wb, "help", os_message); - } + return claim_json_response(wb, response, msg); +} - buffer_json_agents_v2(wb, NULL, now_s, false, false); - buffer_json_finalize(wb); +int api_v2_claim(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_claim(2, w, url); +} - return HTTP_RESP_OK; +int api_v3_claim(RRDHOST *host __maybe_unused, struct web_client *w, char *url) { + return api_claim(3, w, url); } diff --git a/src/web/api/web_api_v3.c b/src/web/api/web_api_v3.c index d115922ee3d6a2..a1092471cdadfc 100644 --- a/src/web/api/web_api_v3.c +++ b/src/web/api/web_api_v3.c @@ -201,7 +201,7 @@ static struct web_api_command api_commands_v3[] = { .hash = 0, .acl = HTTP_ACL_NOCHECK, .access = HTTP_ACCESS_NONE, - .callback = api_v2_claim, + .callback = api_v3_claim, .allow_subpaths = 0 }, {