diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c7b0655d55665b..8e80efffcff237 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -518,7 +518,7 @@ jobs: uses: google-github-actions/setup-gcloud@v2.1.1 - name: Upload Artifacts id: upload - uses: google-github-actions/upload-cloud-storage@v2.1.3 + uses: google-github-actions/upload-cloud-storage@v2.2.0 with: destination: ${{ secrets.GCP_NIGHTLY_STORAGE_BUCKET }} gzip: false diff --git a/.gitignore b/.gitignore index 8f5842dbe410dc..f5d2ed1c804ee1 100644 --- a/.gitignore +++ b/.gitignore @@ -179,6 +179,7 @@ Session.*.vim # Special exceptions !packaging/repoconfig/Makefile +packaging/windows/resources/*.manifest # Jupyter notebook checkpoints .ipynb_checkpoints diff --git a/CHANGELOG.md b/CHANGELOG.md index 6e9b37c9d583f9..675ece6babb7c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,10 +6,41 @@ **Merged pull requests:** +- Improve processing on removed alerts after agent restart [\#18488](https://github.com/netdata/netdata/pull/18488) ([stelfrag](https://github.com/stelfrag)) +- Bump github.com/prometheus/common from 0.57.0 to 0.58.0 in /src/go [\#18487](https://github.com/netdata/netdata/pull/18487) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump golang.org/x/text from 0.17.0 to 0.18.0 in /src/go [\#18486](https://github.com/netdata/netdata/pull/18486) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Remove Warnings \(ebpf\) [\#18484](https://github.com/netdata/netdata/pull/18484) ([thiagoftsm](https://github.com/thiagoftsm)) +- fix win sysinfo installed ram calculation [\#18482](https://github.com/netdata/netdata/pull/18482) ([ilyam8](https://github.com/ilyam8)) +- Update LIbbpf [\#18480](https://github.com/netdata/netdata/pull/18480) ([thiagoftsm](https://github.com/thiagoftsm)) +- added missing comma in Access-Control-Allow-Headers [\#18479](https://github.com/netdata/netdata/pull/18479) ([ktsaou](https://github.com/ktsaou)) +- add Access-Control-Allow-Headers: x-transaction-id [\#18478](https://github.com/netdata/netdata/pull/18478) ([ktsaou](https://github.com/ktsaou)) +- add Access-Control-Allow-Headers: x-netdata-auth [\#18477](https://github.com/netdata/netdata/pull/18477) ([ktsaou](https://github.com/ktsaou)) +- prevent sigsegv in config-parsers [\#18476](https://github.com/netdata/netdata/pull/18476) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#18475](https://github.com/netdata/netdata/pull/18475) ([netdatabot](https://github.com/netdatabot)) +- added version to systemd-journal info response [\#18474](https://github.com/netdata/netdata/pull/18474) ([ktsaou](https://github.com/ktsaou)) +- Regenerate integrations.js [\#18473](https://github.com/netdata/netdata/pull/18473) ([netdatabot](https://github.com/netdatabot)) +- Remove w1sensor in favor of Go implementation [\#18471](https://github.com/netdata/netdata/pull/18471) ([Ancairon](https://github.com/Ancairon)) +- Improve processing of pending alerts [\#18470](https://github.com/netdata/netdata/pull/18470) ([stelfrag](https://github.com/stelfrag)) +- Fix node index in alerts [\#18469](https://github.com/netdata/netdata/pull/18469) ([stelfrag](https://github.com/stelfrag)) +- go.d storcli: fix unmarshal driveInfo [\#18466](https://github.com/netdata/netdata/pull/18466) ([ilyam8](https://github.com/ilyam8)) +- w1sensor collector Go implementation [\#18464](https://github.com/netdata/netdata/pull/18464) ([Ancairon](https://github.com/Ancairon)) +- Check correct number of bits for LZC of XOR value. [\#18463](https://github.com/netdata/netdata/pull/18463) ([vkalintiris](https://github.com/vkalintiris)) +- netdata-claim.sh: fix parsing url arg [\#18460](https://github.com/netdata/netdata/pull/18460) ([ilyam8](https://github.com/ilyam8)) +- Bump github.com/likexian/whois from 1.15.4 to 1.15.5 in /src/go [\#18457](https://github.com/netdata/netdata/pull/18457) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/likexian/whois-parser from 1.24.19 to 1.24.20 in /src/go [\#18456](https://github.com/netdata/netdata/pull/18456) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Cleanup, rename and packaging fix \(Windows Codes\) [\#18455](https://github.com/netdata/netdata/pull/18455) ([thiagoftsm](https://github.com/thiagoftsm)) +- Regenerate integrations.js [\#18454](https://github.com/netdata/netdata/pull/18454) ([netdatabot](https://github.com/netdatabot)) +- Bump github.com/Masterminds/sprig/v3 from 3.2.3 to 3.3.0 in /src/go [\#18453](https://github.com/netdata/netdata/pull/18453) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/prometheus/common from 0.55.0 to 0.57.0 in /src/go [\#18452](https://github.com/netdata/netdata/pull/18452) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/docker/docker from 27.1.2+incompatible to 27.2.0+incompatible in /src/go [\#18451](https://github.com/netdata/netdata/pull/18451) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Regenerate integrations.js [\#18450](https://github.com/netdata/netdata/pull/18450) ([netdatabot](https://github.com/netdatabot)) +- go.d sensors add parsing intrusion to exec method [\#18449](https://github.com/netdata/netdata/pull/18449) ([ilyam8](https://github.com/ilyam8)) - Exit slabinfo.plugin on EPIPE [\#18448](https://github.com/netdata/netdata/pull/18448) ([teqwve](https://github.com/teqwve)) +- ilert Integration [\#18447](https://github.com/netdata/netdata/pull/18447) ([DaTiMy](https://github.com/DaTiMy)) - go.d remove vnode disable [\#18446](https://github.com/netdata/netdata/pull/18446) ([ilyam8](https://github.com/ilyam8)) - go.d add support for symlinked vnode config files [\#18445](https://github.com/netdata/netdata/pull/18445) ([ilyam8](https://github.com/ilyam8)) - Proper precedence when calculating time\_to\_evict [\#18444](https://github.com/netdata/netdata/pull/18444) ([stelfrag](https://github.com/stelfrag)) +- Windows Permissions [\#18443](https://github.com/netdata/netdata/pull/18443) ([thiagoftsm](https://github.com/thiagoftsm)) - do not free the sender when the sender thread exits [\#18441](https://github.com/netdata/netdata/pull/18441) ([ktsaou](https://github.com/ktsaou)) - fix receiver deadlock [\#18440](https://github.com/netdata/netdata/pull/18440) ([ktsaou](https://github.com/ktsaou)) - fix charts.d/sensors leftovers [\#18439](https://github.com/netdata/netdata/pull/18439) ([ilyam8](https://github.com/ilyam8)) @@ -348,7 +379,6 @@ - Tidy-up build related CI jobs. [\#17962](https://github.com/netdata/netdata/pull/17962) ([Ferroin](https://github.com/Ferroin)) - Sign DEB packages in the GHA runners that build them. [\#17949](https://github.com/netdata/netdata/pull/17949) ([Ferroin](https://github.com/Ferroin)) - Detect on startup if the netdata-meta.db file is not a valid database file [\#17924](https://github.com/netdata/netdata/pull/17924) ([stelfrag](https://github.com/stelfrag)) -- eBPF cgroup and mutex [\#17915](https://github.com/netdata/netdata/pull/17915) ([thiagoftsm](https://github.com/thiagoftsm)) ## [v1.46.3](https://github.com/netdata/netdata/tree/v1.46.3) (2024-07-23) @@ -398,30 +428,6 @@ - Bump k8s.io/client-go from 0.30.1 to 0.30.2 in /src/go/collectors/go.d.plugin [\#17923](https://github.com/netdata/netdata/pull/17923) ([dependabot[bot]](https://github.com/apps/dependabot)) - go.d bump github.com/docker/docker v27.0.0+incompatible [\#17921](https://github.com/netdata/netdata/pull/17921) ([ilyam8](https://github.com/ilyam8)) - Bump github.com/jessevdk/go-flags from 1.5.0 to 1.6.1 in /src/go/collectors/go.d.plugin [\#17919](https://github.com/netdata/netdata/pull/17919) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump go.mongodb.org/mongo-driver from 1.15.0 to 1.15.1 in /src/go/collectors/go.d.plugin [\#17917](https://github.com/netdata/netdata/pull/17917) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/miekg/dns from 1.1.59 to 1.1.61 in /src/go/collectors/go.d.plugin [\#17916](https://github.com/netdata/netdata/pull/17916) ([dependabot[bot]](https://github.com/apps/dependabot)) -- go.d whoisquery: try requesting extended data if no expiration date [\#17913](https://github.com/netdata/netdata/pull/17913) ([ilyam8](https://github.com/ilyam8)) -- go.d whoisquery: check if exp date is empty [\#17911](https://github.com/netdata/netdata/pull/17911) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#17910](https://github.com/netdata/netdata/pull/17910) ([netdatabot](https://github.com/netdatabot)) -- Update nvme/metadata: add how to use in a docker [\#17909](https://github.com/netdata/netdata/pull/17909) ([powerman](https://github.com/powerman)) -- Update x509check/metadata: add missing smtp schema [\#17908](https://github.com/netdata/netdata/pull/17908) ([powerman](https://github.com/powerman)) -- systemd: start `netdata` after network is online [\#17906](https://github.com/netdata/netdata/pull/17906) ([k0ste](https://github.com/k0ste)) -- Fix Caddy setup in Install Netdata with Docker [\#17901](https://github.com/netdata/netdata/pull/17901) ([powerman](https://github.com/powerman)) -- sys\_block\_zram: don't use "/dev" [\#17900](https://github.com/netdata/netdata/pull/17900) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#17897](https://github.com/netdata/netdata/pull/17897) ([netdatabot](https://github.com/netdatabot)) -- go.d ll netlisteners add support for wildcard address [\#17896](https://github.com/netdata/netdata/pull/17896) ([ilyam8](https://github.com/ilyam8)) -- integrations make `
` open [\#17895](https://github.com/netdata/netdata/pull/17895) ([ilyam8](https://github.com/ilyam8)) -- allow alerts to be created without too many requirements [\#17894](https://github.com/netdata/netdata/pull/17894) ([ktsaou](https://github.com/ktsaou)) -- Improve ml thread termination during agent shutdown [\#17889](https://github.com/netdata/netdata/pull/17889) ([stelfrag](https://github.com/stelfrag)) -- Update netdata-charts.md [\#17888](https://github.com/netdata/netdata/pull/17888) ([Ancairon](https://github.com/Ancairon)) -- Regenerate integrations.js [\#17886](https://github.com/netdata/netdata/pull/17886) ([netdatabot](https://github.com/netdatabot)) -- Restore ML thread termination to original order [\#17885](https://github.com/netdata/netdata/pull/17885) ([stelfrag](https://github.com/stelfrag)) -- go.d intelgpu add an option to select specific GPU [\#17884](https://github.com/netdata/netdata/pull/17884) ([ilyam8](https://github.com/ilyam8)) -- ndsudo update intel\_gpu\_top [\#17883](https://github.com/netdata/netdata/pull/17883) ([ilyam8](https://github.com/ilyam8)) -- add netdata journald configuration [\#17882](https://github.com/netdata/netdata/pull/17882) ([ilyam8](https://github.com/ilyam8)) -- fix detect\_libc in installer [\#17880](https://github.com/netdata/netdata/pull/17880) ([ilyam8](https://github.com/ilyam8)) -- update bundled UI to v6.138.0 [\#17879](https://github.com/netdata/netdata/pull/17879) ([ilyam8](https://github.com/ilyam8)) -- Regenerate integrations.js [\#17878](https://github.com/netdata/netdata/pull/17878) ([netdatabot](https://github.com/netdatabot)) ## [v1.45.6](https://github.com/netdata/netdata/tree/v1.45.6) (2024-06-05) diff --git a/CMakeLists.txt b/CMakeLists.txt index ff79c1d427a60b..b8c1faa3f6e120 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -716,6 +716,8 @@ set(LIBNETDATA_FILES src/libnetdata/dictionary/dictionary-callbacks.h src/libnetdata/linked-lists.h src/libnetdata/storage-point.h + src/libnetdata/parsers/parsers.h + src/libnetdata/parsers/duration.c src/libnetdata/bitmap64.h src/libnetdata/os/gettid.c src/libnetdata/os/gettid.h @@ -762,6 +764,31 @@ set(LIBNETDATA_FILES src/libnetdata/paths/paths.c src/libnetdata/paths/paths.h src/libnetdata/json/json-c-parser-inline.c + src/libnetdata/parsers/duration.h + src/libnetdata/parsers/timeframe.c + src/libnetdata/parsers/timeframe.h + src/libnetdata/parsers/size.c + src/libnetdata/parsers/size.h + src/libnetdata/libjudy/judy-malloc.c + src/libnetdata/libjudy/judy-malloc.h + src/libnetdata/config/appconfig_internals.h + src/libnetdata/config/appconfig_exporters.c + src/libnetdata/config/appconfig_conf_file.c + src/libnetdata/config/appconfig_cleanup.c + src/libnetdata/config/appconfig_sections.c + src/libnetdata/config/appconfig_options.c + src/libnetdata/config/appconfig_migrate.c + src/libnetdata/config/appconfig_traversal.c + src/libnetdata/config/appconfig_api_sizes.c + src/libnetdata/config/appconfig_api_sizes.h + src/libnetdata/config/appconfig_api_durations.c + src/libnetdata/config/appconfig_api_durations.h + src/libnetdata/config/appconfig_api_numbers.c + src/libnetdata/config/appconfig_api_numbers.h + src/libnetdata/config/appconfig_api_text.c + src/libnetdata/config/appconfig_api_text.h + src/libnetdata/config/appconfig_api_boolean.c + src/libnetdata/config/appconfig_api_boolean.h ) if(ENABLE_PLUGIN_EBPF) @@ -1301,10 +1328,10 @@ set(CLAIM_PLUGIN_FILES ) set(CLAIM_WINDOWS_FILES - src/claim/netdata_claim.c - src/claim/netdata_claim.h - src/claim/netdata_claim_window.c - src/claim/netdata_claim_window.h + src/claim/main.c + src/claim/main.h + src/claim/ui.c + src/claim/ui.h ) set(ACLK_ALWAYS_BUILD @@ -2241,19 +2268,30 @@ endif() # build netdata (only Linux ATM) # +if(OS_WINDOWS) + set(NETDATA_CLAIM_RES_FILES "packaging/windows/resources/netdata_claim.rc") + configure_file(packaging/windows/resources/netdata_claim.manifest.in ${CMAKE_SOURCE_DIR}/packaging/windows/resources/netdata_claim.manifest @ONLY) + + set(NETDATACLI_RES_FILES "packaging/windows/resources/netdatacli.rc") + configure_file(packaging/windows/resources/netdatacli.manifest.in ${CMAKE_SOURCE_DIR}/packaging/windows/resources/netdatacli.manifest @ONLY) + + set(NETDATA_RES_FILES "packaging/windows/resources/netdata.rc") + configure_file(packaging/windows/resources/netdata.manifest.in ${CMAKE_SOURCE_DIR}/packaging/windows/resources/netdata.manifest @ONLY) +endif() + add_executable(netdata ${NETDATA_FILES} "${ACLK_FILES}" "$<$:${H2O_FILES}>" "$<$:${MONGODB_EXPORTING_FILES}>" "$<$:${PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES}>" + "$<$:${NETDATA_RES_FILES}>" ) if(OS_WINDOWS) - set(NETDATA_CLAIM_RES_FILES "packaging/windows/resources/netdata_claim.rc") - - add_executable(netdata_claim ${CLAIM_WINDOWS_FILES} ${NETDATA_CLAIM_RES_FILES}) - target_link_libraries(netdata_claim shell32;gdi32;msftedit) + add_executable(NetdataClaim ${CLAIM_WINDOWS_FILES} ${NETDATA_CLAIM_RES_FILES}) + target_link_libraries(NetdataClaim shell32;gdi32;msftedit) + target_compile_options(NetdataClaim PUBLIC -mwindows) endif() target_compile_definitions(netdata PRIVATE @@ -2353,7 +2391,7 @@ set(NETDATACLI_FILES src/cli/cli.c ) -add_executable(netdatacli ${NETDATACLI_FILES}) +add_executable(netdatacli ${NETDATACLI_FILES} "$<$:${NETDATACLI_RES_FILES}>") target_link_libraries(netdatacli libnetdata) install(TARGETS netdatacli @@ -2438,15 +2476,15 @@ set(varlibdir_POST "${NETDATA_RUNTIME_PREFIX}/var/lib/netdata") set(netdata_user_POST "${NETDATA_USER}") set(netdata_group_POST "${NETDATA_USER}") -configure_file(src/claim/netdata-claim.sh.in src/claim/netdata-claim.sh @ONLY) -install(PROGRAMS - ${CMAKE_BINARY_DIR}/src/claim/netdata-claim.sh - COMPONENT netdata - DESTINATION "${BINDIR}") - -if(OS_WINDOWS) +if(NOT OS_WINDOWS) + configure_file(src/claim/netdata-claim.sh.in src/claim/netdata-claim.sh @ONLY) install(PROGRAMS - ${CMAKE_BINARY_DIR}/netdata_claim.exe + ${CMAKE_BINARY_DIR}/src/claim/netdata-claim.sh + COMPONENT netdata + DESTINATION "${BINDIR}") +else() + install(PROGRAMS + ${CMAKE_BINARY_DIR}/NetdataClaim.exe COMPONENT netdata DESTINATION "${BINDIR}") endif() @@ -2865,7 +2903,6 @@ install(FILES src/collectors/python.d.plugin/spigotmc/spigotmc.conf src/collectors/python.d.plugin/traefik/traefik.conf src/collectors/python.d.plugin/varnish/varnish.conf - src/collectors/python.d.plugin/w1sensor/w1sensor.conf src/collectors/python.d.plugin/zscores/zscores.conf COMPONENT plugin-pythond DESTINATION usr/lib/netdata/conf.d/python.d) @@ -2881,7 +2918,6 @@ install(FILES src/collectors/python.d.plugin/spigotmc/spigotmc.chart.py src/collectors/python.d.plugin/traefik/traefik.chart.py src/collectors/python.d.plugin/varnish/varnish.chart.py - src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py src/collectors/python.d.plugin/zscores/zscores.chart.py COMPONENT plugin-pythond DESTINATION usr/libexec/netdata/python.d) @@ -3003,65 +3039,6 @@ if(NOT OS_WINDOWS) endif() if(OS_WINDOWS) - install(FILES /usr/bin/awk.exe - /usr/bin/bash.exe - /usr/bin/cat.exe - /usr/bin/chown.exe - /usr/bin/curl.exe - /usr/bin/env.exe - /usr/bin/grep.exe - /usr/bin/mkdir.exe - /usr/bin/openssl.exe - /usr/bin/rm.exe - /usr/bin/sed.exe - /usr/bin/sh.exe - /usr/bin/tail.exe - /usr/bin/tr.exe - /usr/bin/uuidgen.exe - /usr/bin/whoami.exe - DESTINATION "${BINDIR}") - - install(FILES /usr/bin/msys-2.0.dll - /usr/bin/msys-asn1-8.dll - /usr/bin/msys-brotlicommon-1.dll - /usr/bin/msys-brotlidec-1.dll - /usr/bin/msys-brotlienc-1.dll - /usr/bin/msys-com_err-1.dll - /usr/bin/msys-crypt-2.dll - /usr/bin/msys-crypto-3.dll - /usr/bin/msys-curl-4.dll - /usr/bin/msys-gcc_s-seh-1.dll - /usr/bin/msys-gmp-10.dll - /usr/bin/msys-gssapi-3.dll - /usr/bin/msys-hcrypto-4.dll - /usr/bin/msys-heimbase-1.dll - /usr/bin/msys-heimntlm-0.dll - /usr/bin/msys-hx509-5.dll - /usr/bin/msys-iconv-2.dll - /usr/bin/msys-idn2-0.dll - /usr/bin/msys-intl-8.dll - /usr/bin/msys-krb5-26.dll - /usr/bin/msys-lz4-1.dll - /usr/bin/msys-mpfr-6.dll - /usr/bin/msys-ncursesw6.dll - /usr/bin/msys-nghttp2-14.dll - /usr/bin/msys-pcre-1.dll - /usr/bin/msys-protobuf-32.dll - /usr/bin/msys-psl-5.dll - /usr/bin/msys-readline8.dll - /usr/bin/msys-roken-18.dll - /usr/bin/msys-sqlite3-0.dll - /usr/bin/msys-ssh2-1.dll - /usr/bin/msys-ssl-3.dll - /usr/bin/msys-stdc++-6.dll - /usr/bin/msys-unistring-5.dll - /usr/bin/msys-uuid-1.dll - /usr/bin/msys-uv-1.dll - /usr/bin/msys-wind-0.dll - /usr/bin/msys-z.dll - /usr/bin/msys-zstd-1.dll - DESTINATION "${BINDIR}") - # Make bash & netdata happy install(DIRECTORY DESTINATION tmp) diff --git a/README.md b/README.md index 8e510a4e826e26..d3d9ef4fc70b44 100644 --- a/README.md +++ b/README.md @@ -398,7 +398,7 @@ This is what you should expect: - For production systems, each Netdata Agent with default settings (everything enabled, ML, Health, DB) should consume about 5% CPU utilization of one core and about 150 MiB or RAM. - By using a Netdata parent and streaming all metrics to that parent, you can disable ML & health and use an ephemeral DB mode (like `alloc`) on the children, leading to utilization of about 1% CPU of a single core and 100 MiB of RAM. Of course, these depend on how many metrics are collected. + By using a Netdata parent and streaming all metrics to that parent, you can disable ML & health and use an ephemeral DB (like `alloc`) on the children, leading to utilization of about 1% CPU of a single core and 100 MiB of RAM. Of course, these depend on how many metrics are collected. - For Netdata Parents, for about 1 to 2 million metrics, all collected every second, we suggest a server with 16 cores and 32GB RAM. Less than half of it will be used for data collection and ML. The rest will be available for queries. diff --git a/docs/deployment-guides/deployment-strategies.md b/docs/deployment-guides/deployment-strategies.md index 1a3c67164792fa..017aaa0c28a6da 100644 --- a/docs/deployment-guides/deployment-strategies.md +++ b/docs/deployment-guides/deployment-strategies.md @@ -95,23 +95,18 @@ On the Parent, edit `netdata.conf` by using the [edit-config](/docs/netdata-agen ```yaml [db] mode = dbengine + dbengine tier backfill = new storage tiers = 3 - # To allow memory pressure to offload index from ram - dbengine page descriptors in file mapped memory = yes + dbengine page cache size = 1.4GiB # storage tier 0 update every = 1 - dbengine multihost disk space MB = 12000 - dbengine page cache size MB = 1400 + dbengine tier 0 retention space = 12GiB # storage tier 1 - dbengine tier 1 page cache size MB = 512 - dbengine tier 1 multihost disk space MB = 4096 dbengine tier 1 update every iterations = 60 - dbengine tier 1 backfill = new + dbengine tier 1 retention space = 4GiB # storage tier 2 - dbengine tier 2 page cache size MB = 128 - dbengine tier 2 multihost disk space MB = 2048 dbengine tier 2 update every iterations = 60 - dbengine tier 2 backfill = new + dbengine tier 2 retention space = 2GiB [ml] # Enabled by default # enabled = yes diff --git a/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md b/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md index 8a8659eff46c87..35eebbf920841a 100644 --- a/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md +++ b/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md @@ -7,9 +7,9 @@ space**. This provides greater control and helps you optimize storage usage for | Tier | Resolution | Time Limit | Size Limit (min 256 MB) | |:----:|:-------------------:|:----------:|:-----------------------:| -| 0 | high (per second) | 14 days | 1 GiB | -| 1 | middle (per minute) | 3 months | 1 GiB | -| 2 | low (per hour) | 2 years | 1 GiB | +| 0 | high (per second) | 14d | 1 GiB | +| 1 | middle (per minute) | 3mo | 1 GiB | +| 2 | low (per hour) | 2y | 1 GiB | > **Note**: If a user sets a disk space size less than 256 MB for a tier, Netdata will automatically adjust it to 256 MB. @@ -17,7 +17,7 @@ With these defaults, Netdata requires approximately 4 GiB of storage space (incl ## Retention Settings -> **In a parent-child setup**, these settings manage the shared storage space utilized by the Netdata parent agent for +> **In a parent-child setup**, these settings manage the shared storage space used by the Netdata parent agent for > storing metrics collected by both the parent and its child nodes. You can fine-tune retention for each tier by setting a time limit or size limit. Setting a limit to 0 disables it, @@ -38,16 +38,16 @@ You can change these limits in `netdata.conf`: storage tiers = 3 # Tier 0, per second data. Set to 0 for no limit. - dbengine tier 0 disk space MB = 1024 - dbengine tier 0 retention days = 14 + dbengine tier 0 retention size = 1GiB + dbengine tier 0 retention time = 14d # Tier 1, per minute data. Set to 0 for no limit. - dbengine tier 1 disk space MB = 1024 - dbengine tier 1 retention days = 90 + dbengine tier 1 retention size = 1GiB + dbengine tier 1 retention time = 3mo # Tier 2, per hour data. Set to 0 for no limit. - dbengine tier 2 disk space MB = 1024 - dbengine tier 2 retention days = 730 + dbengine tier 2 retention size = 1GiB + dbengine tier 2 retention time = 2y ``` ## Monitoring Retention Utilization @@ -58,6 +58,24 @@ your storage space (disk space limits) and time (time limits) are used for metri ## Legacy configuration +### v1.99.0 and prior + +Netdata prior to v2 supports the following configuration options in `netdata.conf`. +They have the same defaults as the latest v2, but the unit of each value is given in the option name, not at the value. + +``` +storage tiers = 3 +# Tier 0, per second data. Set to 0 for no limit. +dbengine tier 0 disk space MB = 1024 +dbengine tier 0 retention days = 14 +# Tier 1, per minute data. Set to 0 for no limit. +dbengine tier 1 disk space MB = 1024 +dbengine tier 1 retention days = 90 +# Tier 2, per hour data. Set to 0 for no limit. +dbengine tier 2 disk space MB = 1024 +dbengine tier 2 retention days = 730 +``` + ### v1.45.6 and prior Netdata versions prior to v1.46.0 relied on a disk space-based retention. @@ -76,13 +94,10 @@ You can change these limits in `netdata.conf`: [db] mode = dbengine storage tiers = 3 - # Tier 0, per second data dbengine multihost disk space MB = 256 - # Tier 1, per minute data dbengine tier 1 multihost disk space MB = 1024 - # Tier 2, per hour data dbengine tier 2 multihost disk space MB = 1024 ``` @@ -113,6 +128,7 @@ If `dbengine disk space MB`(**deprecated**) is set to the default `256`, each in which means the total disk space required to store all instances is, roughly, `256 MiB * 1 parent * 4 child nodes = 1280 MiB`. + #### Backward compatibility All existing metrics belonging to child nodes are automatically converted to legacy dbengine instances and the localhost diff --git a/docs/netdata-agent/sizing-netdata-agents/ram-requirements.md b/docs/netdata-agent/sizing-netdata-agents/ram-requirements.md index 8d8522517847bc..73316575eb9529 100644 --- a/docs/netdata-agent/sizing-netdata-agents/ram-requirements.md +++ b/docs/netdata-agent/sizing-netdata-agents/ram-requirements.md @@ -34,8 +34,8 @@ about 16 GiB There are 2 cache sizes that can be configured in `netdata.conf`: -1. `[db].dbengine page cache size MB`: this is the main cache that keeps metrics data into memory. When data are not found in it, the extent cache is consulted, and if not found in that either, they are loaded from disk. -2. `[db].dbengine extent cache size MB`: this is the compressed extent cache. It keeps in memory compressed data blocks, as they appear on disk, to avoid reading them again. Data found in the extend cache but not in the main cache have to be uncompressed to be queried. +1. `[db].dbengine page cache size`: this is the main cache that keeps metrics data into memory. When data are not found in it, the extent cache is consulted, and if not found in that either, they are loaded from disk. +2. `[db].dbengine extent cache size`: this is the compressed extent cache. It keeps in memory compressed data blocks, as they appear on disk, to avoid reading them again. Data found in the extend cache but not in the main cache have to be uncompressed to be queried. Both of them are dynamically adjusted to use some of the total memory computed above. The configuration in `netdata.conf` allows providing additional memory to them, increasing their caching efficiency. diff --git a/docs/observability-centralization-points/metrics-centralization-points/clustering-and-high-availability-of-netdata-parents.md b/docs/observability-centralization-points/metrics-centralization-points/clustering-and-high-availability-of-netdata-parents.md index 17a10b02efa3fc..412263bebac4db 100644 --- a/docs/observability-centralization-points/metrics-centralization-points/clustering-and-high-availability-of-netdata-parents.md +++ b/docs/observability-centralization-points/metrics-centralization-points/clustering-and-high-availability-of-netdata-parents.md @@ -45,6 +45,6 @@ The easiest way is to `rsync` the directory `/var/cache/netdata` from the existi To configure retention at the new Netdata Parent, set in `netdata.conf` the following to at least the values the old Netdata Parent has: -- `[db].dbengine multihost disk space MB`, this is the max disk size for `tier0`. The default is 256MiB. -- `[db].dbengine tier 1 multihost disk space MB`, this is the max disk space for `tier1`. The default is 50% of `tier0`. -- `[db].dbengine tier 2 multihost disk space MB`, this is the max disk space for `tier2`. The default is 50% of `tier1`. +- `[db].dbengine tier 0 retention size`, this is the max disk size for `tier0`. The default is 1GiB. +- `[db].dbengine tier 1 retention size`, this is the max disk space for `tier1`. The default is 1GiB. +- `[db].dbengine tier 2 retention size`, this is the max disk space for `tier2`. The default is 1GiB. diff --git a/docs/observability-centralization-points/metrics-centralization-points/replication-of-past-samples.md b/docs/observability-centralization-points/metrics-centralization-points/replication-of-past-samples.md index 5c776b8606d4da..e0c60e89fe2f17 100644 --- a/docs/observability-centralization-points/metrics-centralization-points/replication-of-past-samples.md +++ b/docs/observability-centralization-points/metrics-centralization-points/replication-of-past-samples.md @@ -45,13 +45,13 @@ The following `netdata.conf` configuration parameters affect replication. On the receiving side (Netdata Parent): -- `[db].seconds to replicate` limits the maximum time to be replicated. The default is 1 day (86400 seconds). Keep in mind that replication is also limited by the `tier0` retention the sending side has. +- `[db].replication period` limits the maximum time to be replicated. The default is 1 day. Keep in mind that replication is also limited by the `tier0` retention the sending side has. On the sending side (Netdata Children, or Netdata Parent when parents are clustered): - `[db].replication threads` controls how many concurrent threads will be replicating metrics. The default is 1. Usually the performance is about 2 million samples per second per thread, so increasing this number may allow replication to progress faster between Netdata Parents. -- `[db].cleanup obsolete charts after secs` controls for how much time after metrics stop being collected will not be available for replication. The default is 1 hour (3600 seconds). If you plan to have scheduled maintenance on Netdata Parents of more than 1 hour, we recommend increasing this setting. Keep in mind however, that increasing this duration in highly ephemeral environments can have an impact on RAM utilization, since metrics will be considered as collected for longer durations. +- `[db].cleanup obsolete charts after` controls for how much time after metrics stop being collected will not be available for replication. The default is 1 hour (3600 seconds). If you plan to have scheduled maintenance on Netdata Parents of more than 1 hour, we recommend increasing this setting. Keep in mind however, that increasing this duration in highly ephemeral environments can have an impact on RAM utilization, since metrics will be considered as collected for longer durations. ## Monitoring Replication Progress diff --git a/integrations/integrations.js b/integrations/integrations.js index 8b93f26a35feae..b700569f21a26a 100644 --- a/integrations/integrations.js +++ b/integrations/integrations.js @@ -1308,7 +1308,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Disk space\n\nPlugin: diskspace.plugin\nModule: diskspace.plugin\n\n## Overview\n\nMonitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15 | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15s | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mount_point | Path used to mount a filesystem |\n| filesystem | The filesystem used to format a partition. |\n| mount_root | Root directory where mount points are present. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", @@ -16482,7 +16482,7 @@ export const integrations = [ "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/sensors.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `sensors` binary. If left empty or if the binary is not found, [sysfs](https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface) will be used to collect sensor statistics. | /usr/bin/sensors | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: sensors\n binary_path: /usr/local/sbin/sensors\n\n```\n{% /details %}\n##### Use sysfs instead of sensors\n\nSet `binary_path` to an empty string to use sysfs.\n\n{% details open=true summary=\"Config\" %}\n```yaml\njobs:\n - name: sensors\n binary_path: \"\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `sensors` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m sensors\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `sensors` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep sensors\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep sensors /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep sensors\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the sensor.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| chip | The hardware component responsible for the sensor monitoring. |\n| feature | The specific sensor or monitoring point provided by the chip. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.sensor_temperature | temperature | Celsius |\n| sensors.sensor_voltage | voltage | Volts |\n| sensors.sensor_current | current | Amperes |\n| sensors.sensor_power | power | Watts |\n| sensors.sensor_fan_speed | fan | RPM |\n| sensors.sensor_energy | energy | Joules |\n| sensors.sensor_humidity | humidity | percent |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the sensor.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| chip | The hardware component responsible for the sensor monitoring. |\n| feature | The specific sensor or monitoring point provided by the chip. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.sensor_temperature | temperature | Celsius |\n| sensors.sensor_voltage | voltage | Volts |\n| sensors.sensor_current | current | Amperes |\n| sensors.sensor_power | power | Watts |\n| sensors.sensor_fan_speed | fan | RPM |\n| sensors.sensor_energy | energy | Joules |\n| sensors.sensor_humidity | humidity | percent |\n| sensors.sensor_intrusion | alarm_clear, alarm_triggered | status |\n\n", "integration_type": "collector", "id": "go.d.plugin-sensors-Linux_Sensors_(lm-sensors)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/sensors/metadata.yaml", @@ -17125,6 +17125,43 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/vsphere/metadata.yaml", "related_resources": "" }, + { + "meta": { + "plugin_name": "go.d.plugin", + "module_name": "w1sensor", + "monitored_instance": { + "name": "1-Wire Sensors", + "link": "https://www.analog.com/en/product-category/1wire-temperature-sensors.html", + "categories": [ + "data-collection.hardware-devices-and-sensors" + ], + "icon_filename": "1-wire.png" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "temperature", + "sensor", + "1-wire" + ], + "most_popular": false + }, + "overview": "# 1-Wire Sensors\n\nPlugin: go.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/w1sensor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| sensors_path | Directory path containing sensor folders with w1_slave files. | /sys/bus/w1/devices | no |\n\n{% /details %}\n#### Examples\n\n##### Custom sensor device path\n\nMonitors a virtual sensor when the w1_slave file is located in a custom directory instead of the default location.\n\n```yaml\njobs:\n - name: custom_sensors_path\n sensors_path: /custom/path/devices\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `w1sensor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m w1sensor\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep w1sensor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep w1sensor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep w1sensor\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the 1-Wire Sensor.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temperature | temperature | Celsius |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-w1sensor-1-Wire_Sensors", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/w1sensor/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-web_log", @@ -17618,7 +17655,7 @@ export const integrations = [ "most_popular": false }, "overview": "# Idle OS Jitter\n\nPlugin: idlejitter.plugin\nModule: idlejitter.plugin\n\n## Overview\n\nMonitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.\n\n\nA thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration will run by default on all supported systems.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20ms | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Idle OS Jitter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.idlejitter | min, max, average | microseconds lost/s |\n\n", @@ -19299,43 +19336,6 @@ export const integrations = [ "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/varnish/metadata.yaml", "related_resources": "" }, - { - "meta": { - "plugin_name": "python.d.plugin", - "module_name": "w1sensor", - "monitored_instance": { - "name": "1-Wire Sensors", - "link": "https://www.analog.com/en/product-category/1wire-temperature-sensors.html", - "categories": [ - "data-collection.hardware-devices-and-sensors" - ], - "icon_filename": "1-wire.png" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "temperature", - "sensor", - "1-wire" - ], - "most_popular": false - }, - "overview": "# 1-Wire Sensors\n\nPlugin: python.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/w1sensor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details open=true summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |\n\n{% /details %}\n#### Examples\n\n##### Provide human readable names\n\nAssociate two 1-Wire identifiers with human readable names.\n\n```yaml\nsensors:\n name_00000022276e: 'Machine room'\n name_00000022298f: 'Rack 12'\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin w1sensor debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep w1sensor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep w1sensor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep w1sensor\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per 1-Wire Sensors instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temp | a dimension per sensor | Celsius |\n\n", - "integration_type": "collector", - "id": "python.d.plugin-w1sensor-1-Wire_Sensors", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/w1sensor/metadata.yaml", - "related_resources": "" - }, { "meta": { "plugin_name": "python.d.plugin", @@ -21806,6 +21806,25 @@ export const integrations = [ "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/gotify/metadata.yaml" }, + { + "id": "notify-ilert", + "meta": { + "name": "ilert", + "link": "https://www.ilert.com/", + "categories": [ + "notify.agent" + ], + "icon_filename": "ilert.svg" + }, + "keywords": [ + "ilert" + ], + "overview": "# ilert\n\nilert is an alerting and incident management tool. It helps teams reduce response times by enhancing monitoring and ticketing tools with reliable alerts, automatic escalations, on-call schedules, and features for incident response, communication, and status updates.\nSending notification to ilert via Netdata's Agent alert notification feature includes links, images and resolving of corresponding alerts.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Netdata alert source in ilert. You can create a [Netdata alert source](https://docs.ilert.com/inbound-integrations/netdata) in [ilert](https://www.ilert.com/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details open=true summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ILERT | Set `SEND_ILERT` to YES | YES | yes |\n| ILERT_ALERT_SOURCE_URL | Set `ILERT_ALERT_SOURCE_URL` to your Netdata alert source url in ilert. | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_ILERT=\"YES\"\nILERT_ALERT_SOURCE_URL=\"https://api.ilert.com/api/v1/events/netdata/{API-KEY}\"\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", + "integration_type": "notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ilert/metadata.yaml" + }, { "id": "notify-irc", "meta": { diff --git a/integrations/integrations.json b/integrations/integrations.json index 21b18b4b0ddd60..4791b4a6f0802d 100644 --- a/integrations/integrations.json +++ b/integrations/integrations.json @@ -1306,7 +1306,7 @@ "most_popular": false }, "overview": "# Disk space\n\nPlugin: diskspace.plugin\nModule: diskspace.plugin\n\n## Overview\n\nMonitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15 | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15s | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mount_point | Path used to mount a filesystem |\n| filesystem | The filesystem used to format a partition. |\n| mount_root | Root directory where mount points are present. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", @@ -16480,7 +16480,7 @@ "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/sensors.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| binary_path | Path to the `sensors` binary. If left empty or if the binary is not found, [sysfs](https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface) will be used to collect sensor statistics. | /usr/bin/sensors | yes |\n| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: sensors\n binary_path: /usr/local/sbin/sensors\n\n```\n##### Use sysfs instead of sensors\n\nSet `binary_path` to an empty string to use sysfs.\n\n```yaml\njobs:\n - name: sensors\n binary_path: \"\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `sensors` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m sensors\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `sensors` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep sensors\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep sensors /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep sensors\n```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the sensor.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| chip | The hardware component responsible for the sensor monitoring. |\n| feature | The specific sensor or monitoring point provided by the chip. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.sensor_temperature | temperature | Celsius |\n| sensors.sensor_voltage | voltage | Volts |\n| sensors.sensor_current | current | Amperes |\n| sensors.sensor_power | power | Watts |\n| sensors.sensor_fan_speed | fan | RPM |\n| sensors.sensor_energy | energy | Joules |\n| sensors.sensor_humidity | humidity | percent |\n\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the sensor.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| chip | The hardware component responsible for the sensor monitoring. |\n| feature | The specific sensor or monitoring point provided by the chip. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.sensor_temperature | temperature | Celsius |\n| sensors.sensor_voltage | voltage | Volts |\n| sensors.sensor_current | current | Amperes |\n| sensors.sensor_power | power | Watts |\n| sensors.sensor_fan_speed | fan | RPM |\n| sensors.sensor_energy | energy | Joules |\n| sensors.sensor_humidity | humidity | percent |\n| sensors.sensor_intrusion | alarm_clear, alarm_triggered | status |\n\n", "integration_type": "collector", "id": "go.d.plugin-sensors-Linux_Sensors_(lm-sensors)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/sensors/metadata.yaml", @@ -17123,6 +17123,43 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/vsphere/metadata.yaml", "related_resources": "" }, + { + "meta": { + "plugin_name": "go.d.plugin", + "module_name": "w1sensor", + "monitored_instance": { + "name": "1-Wire Sensors", + "link": "https://www.analog.com/en/product-category/1wire-temperature-sensors.html", + "categories": [ + "data-collection.hardware-devices-and-sensors" + ], + "icon_filename": "1-wire.png" + }, + "related_resources": { + "integrations": { + "list": [] + } + }, + "info_provided_to_referring_integrations": { + "description": "" + }, + "keywords": [ + "temperature", + "sensor", + "1-wire" + ], + "most_popular": false + }, + "overview": "# 1-Wire Sensors\n\nPlugin: go.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/w1sensor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| sensors_path | Directory path containing sensor folders with w1_slave files. | /sys/bus/w1/devices | no |\n\n#### Examples\n\n##### Custom sensor device path\n\nMonitors a virtual sensor when the w1_slave file is located in a custom directory instead of the default location.\n\n```yaml\njobs:\n - name: custom_sensors_path\n sensors_path: /custom/path/devices\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.\n\nTo troubleshoot issues with the `w1sensor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m w1sensor\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep w1sensor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep w1sensor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep w1sensor\n```\n\n", + "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", + "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor\n\nThese metrics refer to the 1-Wire Sensor.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temperature | temperature | Celsius |\n\n", + "integration_type": "collector", + "id": "go.d.plugin-w1sensor-1-Wire_Sensors", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/w1sensor/metadata.yaml", + "related_resources": "" + }, { "meta": { "id": "collector-go.d.plugin-web_log", @@ -17616,7 +17653,7 @@ "most_popular": false }, "overview": "# Idle OS Jitter\n\nPlugin: idlejitter.plugin\nModule: idlejitter.plugin\n\n## Overview\n\nMonitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.\n\n\nA thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration will run by default on all supported systems.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20ms | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Idle OS Jitter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.idlejitter | min, max, average | microseconds lost/s |\n\n", @@ -19297,43 +19334,6 @@ "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/varnish/metadata.yaml", "related_resources": "" }, - { - "meta": { - "plugin_name": "python.d.plugin", - "module_name": "w1sensor", - "monitored_instance": { - "name": "1-Wire Sensors", - "link": "https://www.analog.com/en/product-category/1wire-temperature-sensors.html", - "categories": [ - "data-collection.hardware-devices-and-sensors" - ], - "icon_filename": "1-wire.png" - }, - "related_resources": { - "integrations": { - "list": [] - } - }, - "info_provided_to_referring_integrations": { - "description": "" - }, - "keywords": [ - "temperature", - "sensor", - "1-wire" - ], - "most_popular": false - }, - "overview": "# 1-Wire Sensors\n\nPlugin: python.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", - "setup": "## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/w1sensor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |\n\n#### Examples\n\n##### Provide human readable names\n\nAssociate two 1-Wire identifiers with human readable names.\n\n```yaml\nsensors:\n name_00000022276e: 'Machine room'\n name_00000022298f: 'Rack 12'\n\n```\n", - "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\nTo troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin w1sensor debug trace\n ```\n\n### Getting Logs\n\nIf you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues:\n\n- **Run the command** specific to your system (systemd, non-systemd, or Docker container).\n- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.\n\n#### System with systemd\n\nUse the following command to view logs generated since the last Netdata service restart:\n\n```bash\njournalctl _SYSTEMD_INVOCATION_ID=\"$(systemctl show --value --property=InvocationID netdata)\" --namespace=netdata --grep w1sensor\n```\n\n#### System without systemd\n\nLocate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:\n\n```bash\ngrep w1sensor /var/log/netdata/collector.log\n```\n\n**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.\n\n#### Docker Container\n\nIf your Netdata runs in a Docker container named \"netdata\" (replace if different), use this command:\n\n```bash\ndocker logs netdata 2>&1 | grep w1sensor\n```\n\n", - "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", - "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per 1-Wire Sensors instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temp | a dimension per sensor | Celsius |\n\n", - "integration_type": "collector", - "id": "python.d.plugin-w1sensor-1-Wire_Sensors", - "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/w1sensor/metadata.yaml", - "related_resources": "" - }, { "meta": { "plugin_name": "python.d.plugin", @@ -21804,6 +21804,25 @@ "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/gotify/metadata.yaml" }, + { + "id": "notify-ilert", + "meta": { + "name": "ilert", + "link": "https://www.ilert.com/", + "categories": [ + "notify.agent" + ], + "icon_filename": "ilert.svg" + }, + "keywords": [ + "ilert" + ], + "overview": "# ilert\n\nilert is an alerting and incident management tool. It helps teams reduce response times by enhancing monitoring and ticketing tools with reliable alerts, automatic escalations, on-call schedules, and features for incident response, communication, and status updates.\nSending notification to ilert via Netdata's Agent alert notification feature includes links, images and resolving of corresponding alerts.\n\n", + "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Netdata alert source in ilert. You can create a [Netdata alert source](https://docs.ilert.com/inbound-integrations/netdata) in [ilert](https://www.ilert.com/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ILERT | Set `SEND_ILERT` to YES | YES | yes |\n| ILERT_ALERT_SOURCE_URL | Set `ILERT_ALERT_SOURCE_URL` to your Netdata alert source url in ilert. | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_ILERT=\"YES\"\nILERT_ALERT_SOURCE_URL=\"https://api.ilert.com/api/v1/events/netdata/{API-KEY}\"\n\n```\n", + "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", + "integration_type": "notification", + "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ilert/metadata.yaml" + }, { "id": "notify-irc", "meta": { diff --git a/packaging/cmake/Modules/NetdataEBPFCORE.cmake b/packaging/cmake/Modules/NetdataEBPFCORE.cmake index f4c918bfe2e848..5152447a68c214 100644 --- a/packaging/cmake/Modules/NetdataEBPFCORE.cmake +++ b/packaging/cmake/Modules/NetdataEBPFCORE.cmake @@ -11,8 +11,8 @@ set(ebpf-co-re_SOURCE_DIR "${CMAKE_BINARY_DIR}/ebpf-co-re") function(netdata_fetch_ebpf_co_re) ExternalProject_Add( ebpf-co-re - URL https://github.com/netdata/ebpf-co-re/releases/download/v1.4.5.1/netdata-ebpf-co-re-glibc-v1.4.5.1.tar.xz - URL_HASH SHA256=10d49602c873932a4e0a3717a4af2137434b480d0170c2fb000ec70ae02f6e30 + URL https://github.com/netdata/ebpf-co-re/releases/download/v1.4.6/netdata-ebpf-co-re-glibc-v1.4.6.tar.xz + URL_HASH SHA256=e5ec1992132e7c4bb2772601cf2e131a77a199d77a2ec44177e8282d1ed42e57 SOURCE_DIR "${ebpf-co-re_SOURCE_DIR}" CONFIGURE_COMMAND "" BUILD_COMMAND "" diff --git a/packaging/cmake/Modules/NetdataEBPFLegacy.cmake b/packaging/cmake/Modules/NetdataEBPFLegacy.cmake index 12dfce4865687a..19b6aa20fa721d 100644 --- a/packaging/cmake/Modules/NetdataEBPFLegacy.cmake +++ b/packaging/cmake/Modules/NetdataEBPFLegacy.cmake @@ -20,19 +20,19 @@ function(netdata_fetch_legacy_ebpf_code) endif() if(need_static) - set(_hash 1c0c8f1177514e9e21a23c28841406595e57b7cfacd93746ff2d6b25987b94a6) + set(_hash 944ed28a95443c8476a907cad8275af27be818bdf25713126650297baebcfe9b) set(_libc "static") elseif(_libc STREQUAL "glibc") - set(_hash e365a76a2bb25190f1d91e4dea2cfc5ff5db63b5238fbfbc89f72755cf85a12c) + set(_hash 204e817978cd6a7985161cbcec4af5731bcd67c860a1c20236f73c9703860a29) elseif(_libc STREQUAL "musl") - set(_hash ec14dcdfa29d4fba1cea6763740b9d37683515bde88a1a29b6e7c70ce01a604d) + set(_hash 4ce0f9ebe25b1921de79924a3684dd9d0beae87b01a0d1690a3a1e303682a409) else() message(FATAL_ERROR "Could not determine libc implementation, unable to install eBPF legacy code.") endif() ExternalProject_Add( ebpf-code-legacy - URL https://github.com/netdata/kernel-collector/releases/download/v1.4.5.1/netdata-kernel-collector-${_libc}-v1.4.5.1.tar.xz + URL https://github.com/netdata/kernel-collector/releases/download/v1.4.6/netdata-kernel-collector-${_libc}-v1.4.6.tar.xz URL_HASH SHA256=${_hash} SOURCE_DIR "${ebpf-legacy_SOURCE_DIR}" CONFIGURE_COMMAND "" diff --git a/packaging/cmake/Modules/NetdataLibBPF.cmake b/packaging/cmake/Modules/NetdataLibBPF.cmake index 9c3bf6d2f4cb51..8e60c7cf863874 100644 --- a/packaging/cmake/Modules/NetdataLibBPF.cmake +++ b/packaging/cmake/Modules/NetdataLibBPF.cmake @@ -31,7 +31,7 @@ function(netdata_bundle_libbpf) if(USE_LEGACY_LIBBPF) set(_libbpf_tag 673424c56127bb556e64095f41fd60c26f9083ec) # v0.0.9_netdata-1 else() - set(_libbpf_tag 6923eb970e22682eaedff79f5be4f9934b99cf50) # v1.4.5p_netdata + set(_libbpf_tag 057f85d00029b2d2b71676b68c4fab961dc6dc96) # v1.4.6p_netdata endif() if(DEFINED BUILD_SHARED_LIBS) diff --git a/packaging/dag/files/child_stream.conf b/packaging/dag/files/child_stream.conf index ed78bd3fbd55b7..4e37d0a91c1dfd 100644 --- a/packaging/dag/files/child_stream.conf +++ b/packaging/dag/files/child_stream.conf @@ -2,9 +2,9 @@ enabled = {{ enabled }} destination = {{ destination }} api key = {{ api_key }} - timeout seconds = {{ timeout_seconds }} + timeout = {{ timeout_seconds }} default port = {{ default_port }} send charts matching = {{ send_charts_matching }} buffer size bytes = {{ buffer_size_bytes }} - reconnect delay seconds = {{ reconnect_delay_seconds }} + reconnect delay = {{ reconnect_delay_seconds }} initial clock resync iterations = {{ initial_clock_resync_iterations }} diff --git a/packaging/dag/files/parent_stream.conf b/packaging/dag/files/parent_stream.conf index 15f303f97ba6e8..4c190a7589cf52 100644 --- a/packaging/dag/files/parent_stream.conf +++ b/packaging/dag/files/parent_stream.conf @@ -1,7 +1,7 @@ [{{ api_key }}] enabled = {{ enabled }} allow from = {{ allow_from }} - default history = {{ default_history }} + retention = {{ default_history }} health enabled by default = {{ health_enabled_by_default }} - default postpone alarms on connect seconds = {{ default_postpone_alarms_on_connect_seconds }} + postpone alerts on connect = {{ default_postpone_alarms_on_connect_seconds }} multiple connections = {{ multiple_connections }} diff --git a/packaging/installer/methods/kubernetes.md b/packaging/installer/methods/kubernetes.md index 6a0dee98ad3f8a..0666cef0d70b2f 100644 --- a/packaging/installer/methods/kubernetes.md +++ b/packaging/installer/methods/kubernetes.md @@ -87,9 +87,9 @@ On an existing installation, in order to connect it to Netdata Cloud you will ne configs: netdata: data: | - [global] - memory mode = ram - history = 3600 + [db] + db = ram + retention = 3600 [health] enabled = no ``` @@ -103,7 +103,7 @@ On an existing installation, in order to connect it to Netdata Cloud you will ne > :bookmark_tabs: Info > - > These override settings, along with the Helm chart's defaults, will retain an hour's worth of metrics (`history = 3600`, or `3600 seconds`) on each child node. Based on your metrics retention needs, and the resources available on your cluster, you may want to increase the `history` setting. + > These override settings, along with the Helm chart's defaults, will retain an hour's worth of metrics (`retention = 3600`, or `3600 seconds`) on each child node. Based on your metrics retention needs, and the resources available on your cluster, you may want to increase the `history` setting. 3. To apply these new settings, run: diff --git a/packaging/version b/packaging/version index 928b4707aaf6c2..f91e84c6e0bb4e 100644 --- a/packaging/version +++ b/packaging/version @@ -1 +1 @@ -v1.99.0-53-nightly +v1.99.0-91-nightly diff --git a/packaging/windows/installer.nsi b/packaging/windows/installer.nsi index 3268d8a786a8de..93a0444bdf9c61 100644 --- a/packaging/windows/installer.nsi +++ b/packaging/windows/installer.nsi @@ -117,7 +117,7 @@ Function NetdataConfigLeave ${If} $0 == 135 ${AndIf} $1 >= 36 - nsExec::ExecToLog '$INSTDIR\usr\bin\netdata_claim.exe /T $cloudToken /R $cloudRooms /P $proxy /I $insecure' + nsExec::ExecToLog '$INSTDIR\usr\bin\NetdataClaim.exe /T $cloudToken /R $cloudRooms /P $proxy /I $insecure' pop $0 ${Else} MessageBox MB_OK "The Cloud information does not have the expected length." diff --git a/packaging/windows/package-windows.sh b/packaging/windows/package-windows.sh index 03f72a692fb70a..3a2a3068f3a305 100755 --- a/packaging/windows/package-windows.sh +++ b/packaging/windows/package-windows.sh @@ -18,6 +18,16 @@ fi set -exu -o pipefail +# Regenerate keys everytime there is an update +if [ -d /opt/netdata/etc/pki/ ]; then + rm -rf /opt/netdata/etc/pki/ +fi + +# Remove previous installation of msys2 script +if [ -f /opt/netdata/usr/bin/bashbug ]; then + rm -rf /opt/netdata/usr/bin/bashbug +fi + ${GITHUB_ACTIONS+echo "::group::Installing"} cmake --install "${build}" ${GITHUB_ACTIONS+echo "::endgroup::"} diff --git a/packaging/windows/resources/netdata.manifest.in b/packaging/windows/resources/netdata.manifest.in new file mode 100644 index 00000000000000..da6a2f2ea0ffe6 --- /dev/null +++ b/packaging/windows/resources/netdata.manifest.in @@ -0,0 +1,16 @@ + + + + Netdata is a high-performance, cloud-native, and on-premises observability platform designed to monitor metrics and logs with unparalleled efficiency. + + + + + + + + + diff --git a/packaging/windows/resources/netdata.rc b/packaging/windows/resources/netdata.rc new file mode 100644 index 00000000000000..0bc75d99b356c5 --- /dev/null +++ b/packaging/windows/resources/netdata.rc @@ -0,0 +1,3 @@ +#include "winuser.h" +1 RT_MANIFEST "netdata.manifest" +11 ICON "../NetdataWhite.ico" diff --git a/packaging/windows/resources/netdata_claim.manifest b/packaging/windows/resources/netdata_claim.manifest.in similarity index 91% rename from packaging/windows/resources/netdata_claim.manifest rename to packaging/windows/resources/netdata_claim.manifest.in index f0092df426403f..ae4e040cb29f3e 100644 --- a/packaging/windows/resources/netdata_claim.manifest +++ b/packaging/windows/resources/netdata_claim.manifest.in @@ -1,6 +1,6 @@ - diff --git a/packaging/windows/resources/netdatacli.manifest.in b/packaging/windows/resources/netdatacli.manifest.in new file mode 100644 index 00000000000000..ff2f48d31b0cef --- /dev/null +++ b/packaging/windows/resources/netdatacli.manifest.in @@ -0,0 +1,16 @@ + + + + The netdatacli executable provides a simple way to control the Netdata agent's operation. + + + + + + + + + diff --git a/packaging/windows/resources/netdatacli.rc b/packaging/windows/resources/netdatacli.rc new file mode 100644 index 00000000000000..baa9e8d620b38c --- /dev/null +++ b/packaging/windows/resources/netdatacli.rc @@ -0,0 +1,3 @@ +#include "winuser.h" +1 RT_MANIFEST "netdatacli.manifest" +11 ICON "../NetdataWhite.ico" diff --git a/src/aclk/aclk_capas.c b/src/aclk/aclk_capas.c index f09eb686414c53..734e6907b76904 100644 --- a/src/aclk/aclk_capas.c +++ b/src/aclk/aclk_capas.c @@ -28,8 +28,8 @@ const struct capability *aclk_get_agent_capas() agent_capabilities[2].version = ml_capable() ? 1 : 0; agent_capabilities[2].enabled = ml_enabled(localhost); - agent_capabilities[3].version = enable_metric_correlations ? metric_correlations_version : 0; - agent_capabilities[3].enabled = enable_metric_correlations; + agent_capabilities[3].version = metric_correlations_version; + agent_capabilities[3].enabled = 1; agent_capabilities[7].enabled = localhost->health.health_enabled; @@ -44,9 +44,7 @@ struct capability *aclk_get_node_instance_capas(RRDHOST *host) struct capability ni_caps[] = { { .name = "proto", .version = 1, .enabled = 1 }, { .name = "ml", .version = ml_capable(), .enabled = ml_enabled(host) }, - { .name = "mc", - .version = enable_metric_correlations ? metric_correlations_version : 0, - .enabled = enable_metric_correlations }, + { .name = "mc", .version = metric_correlations_version, .enabled = 1 }, { .name = "ctx", .version = 1, .enabled = 1 }, { .name = "funcs", .version = functions ? 1 : 0, .enabled = functions ? 1 : 0 }, { .name = "http_api_v2", .version = HTTP_API_V2_VERSION, .enabled = 1 }, diff --git a/src/claim/claim-with-api.c b/src/claim/claim-with-api.c index 6889053ecd993e..6d6976d1098d71 100644 --- a/src/claim/claim-with-api.c +++ b/src/claim/claim-with-api.c @@ -401,18 +401,7 @@ bool claim_agent_from_environment(void) { } bool claim_agent_from_claim_conf(void) { - static struct config claim_config = { - .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { - .avl_tree = { - .root = NULL, - .compar = appconfig_section_compare - }, - .rwlock = AVL_LOCK_INITIALIZER - } - }; + static struct config claim_config = APPCONFIG_INITIALIZER; static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER; bool ret = false; diff --git a/src/claim/cloud-conf.c b/src/claim/cloud-conf.c index 9ee617130f6018..bfa971b991fe37 100644 --- a/src/claim/cloud-conf.c +++ b/src/claim/cloud-conf.c @@ -2,18 +2,7 @@ #include "claim.h" -struct config cloud_config = { - .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { - .avl_tree = { - .root = NULL, - .compar = appconfig_section_compare - }, - .rwlock = AVL_LOCK_INITIALIZER - } -}; +struct config cloud_config = APPCONFIG_INITIALIZER; const char *cloud_config_url_get(void) { return appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "url", DEFAULT_CLOUD_BASE_URL); diff --git a/src/claim/netdata_claim.c b/src/claim/main.c similarity index 99% rename from src/claim/netdata_claim.c rename to src/claim/main.c index 6091ebe91023ad..901f8d81eacab2 100644 --- a/src/claim/netdata_claim.c +++ b/src/claim/main.c @@ -9,7 +9,7 @@ #include #include -#include "netdata_claim.h" +#include "main.h" LPWSTR token = NULL; LPWSTR room = NULL; diff --git a/src/claim/netdata_claim.h b/src/claim/main.h similarity index 92% rename from src/claim/netdata_claim.h rename to src/claim/main.h index f05e05a5a7375c..b5d9e3f89b16aa 100644 --- a/src/claim/netdata_claim.h +++ b/src/claim/main.h @@ -4,7 +4,7 @@ # define NETDATA_CLAIM_H_ 1 #include -#include "netdata_claim_window.h" +#include "ui.h" extern LPWSTR token; extern LPWSTR room; diff --git a/src/claim/netdata-claim.sh.in b/src/claim/netdata-claim.sh.in index 75809292e7bc20..15c166e3f3c529 100755 --- a/src/claim/netdata-claim.sh.in +++ b/src/claim/netdata-claim.sh.in @@ -73,7 +73,7 @@ parse_args() { --claim-rooms) NETDATA_CLAIM_ROOMS="${2}"; shift 1 ;; -rooms=*) NETDATA_CLAIM_ROOMS="$(echo "${1}" | sed 's/^-rooms=//')" ;; --claim-url) NETDATA_CLAIM_URL="${2}"; shift 1 ;; - -url=*) NETDATA_CLAIM_URL="$(echo "${1}" | sed 's/^-url=/')" ;; + -url=*) NETDATA_CLAIM_URL="$(echo "${1}" | sed 's/^-url=//')" ;; --claim-proxy) NETDATA_CLAIM_PROXY="${2}"; shift 1 ;; -proxy=*) NETDATA_CLAIM_PROXY="$(echo "${1}" | sed 's/-proxy=//')" ;; -noproxy|--noproxy) NETDATA_CLAIM_PROXY="none" ;; diff --git a/src/claim/netdata_claim_window.c b/src/claim/ui.c similarity index 99% rename from src/claim/netdata_claim_window.c rename to src/claim/ui.c index 5b8e81335c080d..851e336a472022 100644 --- a/src/claim/netdata_claim_window.c +++ b/src/claim/ui.c @@ -5,7 +5,7 @@ #include #include "richedit.h" #include "tchar.h" -#include "netdata_claim.h" +#include "main.h" static LPCTSTR szWindowClass = _T("DesktopApp"); diff --git a/src/claim/netdata_claim_window.h b/src/claim/ui.h similarity index 100% rename from src/claim/netdata_claim_window.h rename to src/claim/ui.h diff --git a/src/collectors/COLLECTORS.md b/src/collectors/COLLECTORS.md index a1dbc2defaf80c..6f2971f5c97e6b 100644 --- a/src/collectors/COLLECTORS.md +++ b/src/collectors/COLLECTORS.md @@ -459,7 +459,7 @@ If you don't see the app/service you'd like to monitor in this list: ### Hardware Devices and Sensors -- [1-Wire Sensors](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md) +- [1-Wire Sensors](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/w1sensor/integrations/1-wire_sensors.md) - [AM2320](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/am2320/integrations/am2320.md) diff --git a/src/collectors/cgroups.plugin/cgroup-discovery.c b/src/collectors/cgroups.plugin/cgroup-discovery.c index 315b0c042394ef..5d3027a47fdf98 100644 --- a/src/collectors/cgroups.plugin/cgroup-discovery.c +++ b/src/collectors/cgroups.plugin/cgroup-discovery.c @@ -23,7 +23,7 @@ struct cgroup *discovered_cgroup_root = NULL; char cgroup_chart_id_prefix[] = "cgroup_"; char services_chart_id_prefix[] = "systemd_"; -char *cgroups_rename_script = NULL; +const char *cgroups_rename_script = NULL; // Shared memory with information from detected cgroups netdata_ebpf_cgroup_shm_t shm_cgroup_ebpf = {NULL, NULL}; diff --git a/src/collectors/cgroups.plugin/cgroup-internals.h b/src/collectors/cgroups.plugin/cgroup-internals.h index d0b6641e247399..cdb5837bd1ae55 100644 --- a/src/collectors/cgroups.plugin/cgroup-internals.h +++ b/src/collectors/cgroups.plugin/cgroup-internals.h @@ -273,7 +273,7 @@ struct discovery_thread { extern struct discovery_thread discovery_thread; -extern char *cgroups_rename_script; +extern const char *cgroups_rename_script; extern char cgroup_chart_id_prefix[]; extern char services_chart_id_prefix[]; extern uv_mutex_t cgroup_root_mutex; @@ -313,7 +313,7 @@ extern SIMPLE_PATTERN *enabled_cgroup_renames; extern SIMPLE_PATTERN *systemd_services_cgroups; extern SIMPLE_PATTERN *entrypoint_parent_process_comm; -extern char *cgroups_network_interface_script; +extern const char *cgroups_network_interface_script; extern int cgroups_check; diff --git a/src/collectors/cgroups.plugin/sys_fs_cgroup.c b/src/collectors/cgroups.plugin/sys_fs_cgroup.c index c970119eaf4239..1ef559ad287087 100644 --- a/src/collectors/cgroups.plugin/sys_fs_cgroup.c +++ b/src/collectors/cgroups.plugin/sys_fs_cgroup.c @@ -39,7 +39,7 @@ SIMPLE_PATTERN *search_cgroup_paths = NULL; SIMPLE_PATTERN *enabled_cgroup_renames = NULL; SIMPLE_PATTERN *systemd_services_cgroups = NULL; SIMPLE_PATTERN *entrypoint_parent_process_comm = NULL; -char *cgroups_network_interface_script = NULL; +const char *cgroups_network_interface_script = NULL; int cgroups_check = 0; uint32_t Read_hash = 0; uint32_t Write_hash = 0; @@ -229,13 +229,17 @@ void read_cgroup_plugin_configuration() { throttled_time_hash = simple_hash("throttled_time"); throttled_usec_hash = simple_hash("throttled_usec"); - cgroup_update_every = (int)config_get_number("plugin:cgroups", "update every", localhost->rrd_update_every); - if(cgroup_update_every < localhost->rrd_update_every) + cgroup_update_every = (int)config_get_duration_seconds("plugin:cgroups", "update every", localhost->rrd_update_every); + if(cgroup_update_every < localhost->rrd_update_every) { cgroup_update_every = localhost->rrd_update_every; + config_set_duration_seconds("plugin:cgroups", "update every", localhost->rrd_update_every); + } - cgroup_check_for_new_every = (int)config_get_number("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every); - if(cgroup_check_for_new_every < cgroup_update_every) + cgroup_check_for_new_every = (int)config_get_duration_seconds("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every); + if(cgroup_check_for_new_every < cgroup_update_every) { cgroup_check_for_new_every = cgroup_update_every; + config_set_duration_seconds("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every); + } cgroup_use_unified_cgroups = config_get_boolean_ondemand("plugin:cgroups", "use unified cgroups", CONFIG_BOOLEAN_AUTO); if (cgroup_use_unified_cgroups == CONFIG_BOOLEAN_AUTO) diff --git a/src/collectors/diskspace.plugin/integrations/disk_space.md b/src/collectors/diskspace.plugin/integrations/disk_space.md index 61015120daf74d..7a677721afdd5c 100644 --- a/src/collectors/diskspace.plugin/integrations/disk_space.md +++ b/src/collectors/diskspace.plugin/integrations/disk_space.md @@ -125,7 +125,7 @@ You can also specify per mount point `[plugin:proc:diskspace:mountpoint]` |:----|:-----------|:-------|:--------:| | update every | Data collection frequency. | 1 | no | | remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no | -| check for new mount points every | Parse proc files frequency. | 15 | no | +| check for new mount points every | Parse proc files frequency. | 15s | no | | exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no | | exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no | | exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no | diff --git a/src/collectors/diskspace.plugin/metadata.yaml b/src/collectors/diskspace.plugin/metadata.yaml index 578f56bd0c52be..a00a9e91d4e5da 100644 --- a/src/collectors/diskspace.plugin/metadata.yaml +++ b/src/collectors/diskspace.plugin/metadata.yaml @@ -63,7 +63,7 @@ modules: required: false - name: check for new mount points every description: Parse proc files frequency. - default_value: 15 + default_value: 15s required: false - name: exclude space metrics on paths description: Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. diff --git a/src/collectors/diskspace.plugin/plugin_diskspace.c b/src/collectors/diskspace.plugin/plugin_diskspace.c index 920db1861bfcab..12935ad3be9960 100644 --- a/src/collectors/diskspace.plugin/plugin_diskspace.c +++ b/src/collectors/diskspace.plugin/plugin_diskspace.c @@ -855,11 +855,13 @@ void *diskspace_main(void *ptr) { cleanup_mount_points = config_get_boolean(CONFIG_SECTION_DISKSPACE, "remove charts of unmounted disks" , cleanup_mount_points); - int update_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every); - if(update_every < localhost->rrd_update_every) + int update_every = (int)config_get_duration_seconds(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every); + if(update_every < localhost->rrd_update_every) { update_every = localhost->rrd_update_every; + config_set_duration_seconds(CONFIG_SECTION_DISKSPACE, "update every", update_every); + } - check_for_new_mountpoints_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every); + check_for_new_mountpoints_every = (int)config_get_duration_seconds(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every); if(check_for_new_mountpoints_every < update_every) check_for_new_mountpoints_every = update_every; diff --git a/src/collectors/ebpf.plugin/ebpf.c b/src/collectors/ebpf.plugin/ebpf.c index 5424ea8f0a3ac7..c3ab357978770e 100644 --- a/src/collectors/ebpf.plugin/ebpf.c +++ b/src/collectors/ebpf.plugin/ebpf.c @@ -19,11 +19,7 @@ char *ebpf_plugin_dir = PLUGINS_DIR; static char *ebpf_configured_log_dir = LOG_DIR; char *ebpf_algorithms[] = { EBPF_CHART_ALGORITHM_ABSOLUTE, EBPF_CHART_ALGORITHM_INCREMENTAL}; -struct config collector_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config collector_config = APPCONFIG_INITIALIZER; int running_on_kernel = 0; int ebpf_nprocs; @@ -661,7 +657,7 @@ struct vfs_bpf *vfs_bpf_obj = NULL; #else void *default_btf = NULL; #endif -char *btf_path = NULL; +const char *btf_path = NULL; /***************************************************************** * @@ -1608,7 +1604,7 @@ static void get_ipv6_last_addr(union netdata_ip_t *out, union netdata_ip_t *in, * * @return it returns 0 on success and -1 otherwise. */ -static inline int ebpf_ip2nl(uint8_t *dst, char *ip, int domain, char *source) +static inline int ebpf_ip2nl(uint8_t *dst, const char *ip, int domain, char *source) { if (inet_pton(domain, ip, dst) <= 0) { netdata_log_error("The address specified (%s) is invalid ", source); @@ -1666,14 +1662,14 @@ void ebpf_clean_ip_structure(ebpf_network_viewer_ip_list_t **clean) * @param out a pointer to store the link list * @param ip the value given as parameter */ -static void ebpf_parse_ip_list_unsafe(void **out, char *ip) +static void ebpf_parse_ip_list_unsafe(void **out, const char *ip) { ebpf_network_viewer_ip_list_t **list = (ebpf_network_viewer_ip_list_t **)out; char *ipdup = strdupz(ip); union netdata_ip_t first = { }; union netdata_ip_t last = { }; - char *is_ipv6; + const char *is_ipv6; if (*ip == '*' && *(ip+1) == '\0') { memset(first.addr8, 0, sizeof(first.addr8)); memset(last.addr8, 0xFF, sizeof(last.addr8)); @@ -1684,7 +1680,8 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip) goto storethisip; } - char *end = ip; + char *enddup = strdupz(ip); + char *end = enddup; // Move while I cannot find a separator while (*end && *end != '/' && *end != '-') end++; @@ -1814,7 +1811,7 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip) ebpf_network_viewer_ip_list_t *store; - storethisip: +storethisip: store = callocz(1, sizeof(ebpf_network_viewer_ip_list_t)); store->value = ipdup; store->hash = simple_hash(ipdup); @@ -1825,8 +1822,9 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip) ebpf_fill_ip_list_unsafe(list, store, "socket"); return; - cleanipdup: +cleanipdup: freez(ipdup); + freez(enddup); } /** @@ -1836,7 +1834,7 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip) * * @param ptr is a pointer with the text to parse. */ -void ebpf_parse_ips_unsafe(char *ptr) +void ebpf_parse_ips_unsafe(const char *ptr) { // No value if (unlikely(!ptr)) @@ -1927,7 +1925,7 @@ static inline void fill_port_list(ebpf_network_viewer_port_list_t **out, ebpf_ne * @param out a pointer to store the link list * @param service the service used to create the structure that will be linked. */ -static void ebpf_parse_service_list(void **out, char *service) +static void ebpf_parse_service_list(void **out, const char *service) { ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out; struct servent *serv = getservbyname((const char *)service, "tcp"); @@ -1956,8 +1954,10 @@ static void ebpf_parse_service_list(void **out, char *service) * @param out a pointer to store the link list * @param range the informed range for the user. */ -static void ebpf_parse_port_list(void **out, char *range) -{ +static void ebpf_parse_port_list(void **out, const char *range_param) { + char range[strlen(range_param) + 1]; + strncpyz(range, range_param, strlen(range_param)); + int first, last; ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out; @@ -2029,7 +2029,7 @@ static void ebpf_parse_port_list(void **out, char *range) * * @param ptr is a pointer with the text to parse. */ -void ebpf_parse_ports(char *ptr) +void ebpf_parse_ports(const char *ptr) { // No value if (unlikely(!ptr)) @@ -2480,7 +2480,7 @@ static void ebpf_link_hostname(ebpf_network_viewer_hostname_list_t **out, ebpf_n * @param out is the output link list * @param parse is a pointer with the text to parser. */ -static void ebpf_link_hostnames(char *parse) +static void ebpf_link_hostnames(const char *parse) { // No value if (unlikely(!parse)) @@ -2536,7 +2536,7 @@ void parse_network_viewer_section(struct config *cfg) EBPF_CONFIG_RESOLVE_SERVICE, CONFIG_BOOLEAN_YES); - char *value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_PORTS, NULL); + const char *value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_PORTS, NULL); ebpf_parse_ports(value); if (network_viewer_opt.hostname_resolution_enabled) { @@ -2684,7 +2684,7 @@ static void ebpf_allocate_common_vectors() * * @param ptr the option given by users */ -static inline void ebpf_how_to_load(char *ptr) +static inline void ebpf_how_to_load(const char *ptr) { if (!strcasecmp(ptr, EBPF_CFG_LOAD_MODE_RETURN)) ebpf_set_thread_mode(MODE_RETURN); @@ -2775,7 +2775,7 @@ static inline void ebpf_set_load_mode(netdata_ebpf_load_mode_t load, netdata_ebp * @param str value read from configuration file. * @param origin specify the configuration file loaded */ -static inline void epbf_update_load_mode(char *str, netdata_ebpf_load_mode_t origin) +static inline void epbf_update_load_mode(const char *str, netdata_ebpf_load_mode_t origin) { netdata_ebpf_load_mode_t load = epbf_convert_string_to_load_mode(str); @@ -2808,7 +2808,7 @@ static void read_collector_values(int *disable_cgroups, int update_every, netdata_ebpf_load_mode_t origin) { // Read global section - char *value; + const char *value; if (appconfig_exists(&collector_config, EBPF_GLOBAL_SECTION, "load")) // Backward compatibility value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, "load", EBPF_CFG_LOAD_MODE_DEFAULT); @@ -4076,7 +4076,6 @@ int main(int argc, char **argv) heartbeat_t hb; heartbeat_init(&hb); int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT; - uint32_t max_period = EBPF_CLEANUP_FACTOR; int update_apps_list = update_apps_every - 1; int process_maps_per_core = ebpf_modules[EBPF_MODULE_PROCESS_IDX].maps_per_core; //Plugin will be killed when it receives a signal @@ -4099,7 +4098,7 @@ int main(int argc, char **argv) pthread_mutex_lock(&collect_data_mutex); ebpf_parse_proc_files(); if (collect_pids & (1<pid); + snprintfz(filename, FILENAME_MAX, "%s/proc/%u/cmdline", netdata_configured_host_prefix, p->pid); int ret = 0; @@ -490,7 +490,7 @@ static inline int read_proc_pid_stat(ebpf_pid_data_t *p) char *comm = procfile_lineword(ff, 0, 1); int32_t ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3)); - if (p->ppid == ppid && p->target) + if (p->ppid == (uint32_t)ppid && p->target) goto without_cmdline_target; p->ppid = ppid; @@ -546,7 +546,7 @@ static inline int ebpf_collect_data_for_pid(pid_t pid) read_proc_pid_stat(p); // check its parent pid - if (unlikely( p->ppid > pid_max)) { + if (unlikely( p->ppid > (uint32_t)pid_max)) { netdata_log_error("Pid %d (command '%s') states invalid parent pid %u. Using 0.", pid, p->comm, p->ppid); p->ppid = 0; } @@ -906,9 +906,8 @@ void ebpf_process_sum_values_for_pids(ebpf_process_stat_t *process, struct ebpf_ * * @param tbl_pid_stats_fd The mapped file descriptor for the hash table. * @param maps_per_core do I have hash maps per core? - * @param max_period max period to wait before remove from hash table. */ -void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core, uint32_t max_period) +void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core) { if (tbl_pid_stats_fd == -1) return; diff --git a/src/collectors/ebpf.plugin/ebpf_apps.h b/src/collectors/ebpf.plugin/ebpf_apps.h index 98c9995da9c812..5bf8953adb0636 100644 --- a/src/collectors/ebpf.plugin/ebpf_apps.h +++ b/src/collectors/ebpf.plugin/ebpf_apps.h @@ -495,7 +495,7 @@ int ebpf_read_hash_table(void *ep, int fd, uint32_t pid); int get_pid_comm(pid_t pid, size_t n, char *dest); -void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core, uint32_t max_period); +void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core); void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core); // The default value is at least 32 times smaller than maximum number of PIDs allowed on system, diff --git a/src/collectors/ebpf.plugin/ebpf_cachestat.c b/src/collectors/ebpf.plugin/ebpf_cachestat.c index 8c0260d51776c7..4d845b145036da 100644 --- a/src/collectors/ebpf.plugin/ebpf_cachestat.c +++ b/src/collectors/ebpf.plugin/ebpf_cachestat.c @@ -43,11 +43,7 @@ ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input = #endif }}; -struct config cachestat_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config cachestat_config = APPCONFIG_INITIALIZER; netdata_ebpf_targets_t cachestat_targets[] = { {.name = "add_to_page_cache_lru", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = "mark_page_accessed", .mode = EBPF_LOAD_TRAMPOLINE}, @@ -716,9 +712,8 @@ static inline void cachestat_save_pid_values(netdata_publish_cachestat_t *out, n * Read the apps table and store data inside the structure. * * @param maps_per_core do I need to read all cores? - * @param max_period limit of iterations without updates before remove data from hash table */ -static void ebpf_read_cachestat_apps_table(int maps_per_core, uint32_t max_period) +static void ebpf_read_cachestat_apps_table(int maps_per_core) { netdata_cachestat_pid_t *cv = cachestat_vector; int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd; @@ -849,7 +844,6 @@ void *ebpf_read_cachestat_thread(void *ptr) int maps_per_core = em->maps_per_core; int update_every = em->update_every; - uint32_t max_period = EBPF_CLEANUP_FACTOR; int counter = update_every - 1; @@ -863,7 +857,7 @@ void *ebpf_read_cachestat_thread(void *ptr) continue; pthread_mutex_lock(&collect_data_mutex); - ebpf_read_cachestat_apps_table(maps_per_core, max_period); + ebpf_read_cachestat_apps_table(maps_per_core); ebpf_resume_apps_data(); pthread_mutex_unlock(&collect_data_mutex); diff --git a/src/collectors/ebpf.plugin/ebpf_dcstat.c b/src/collectors/ebpf.plugin/ebpf_dcstat.c index e6053cb4ac955f..97b949ea160ecb 100644 --- a/src/collectors/ebpf.plugin/ebpf_dcstat.c +++ b/src/collectors/ebpf.plugin/ebpf_dcstat.c @@ -12,11 +12,7 @@ netdata_dcstat_pid_t *dcstat_vector = NULL; static netdata_idx_t dcstat_hash_values[NETDATA_DCSTAT_IDX_END]; static netdata_idx_t *dcstat_values = NULL; -struct config dcstat_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config dcstat_config = APPCONFIG_INITIALIZER; ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_global", .internal_input = NETDATA_DIRECTORY_CACHE_END, .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, @@ -542,9 +538,8 @@ static void ebpf_dcstat_apps_accumulator(netdata_dcstat_pid_t *out, int maps_per * Read the apps table and store data inside the structure. * * @param maps_per_core do I need to read all cores? - * @param max_period limit of iterations without updates before remove data from hash table */ -static void ebpf_read_dc_apps_table(int maps_per_core, uint32_t max_period) +static void ebpf_read_dc_apps_table(int maps_per_core) { netdata_dcstat_pid_t *cv = dcstat_vector; int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd; @@ -660,7 +655,6 @@ void *ebpf_read_dcstat_thread(void *ptr) uint32_t lifetime = em->lifetime; uint32_t running_time = 0; usec_t period = update_every * USEC_PER_SEC; - uint32_t max_period = EBPF_CLEANUP_FACTOR; pids_fd[EBPF_PIDS_DCSTAT_IDX] = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd; while (!ebpf_plugin_stop() && running_time < lifetime) { (void)heartbeat_next(&hb, period); @@ -668,7 +662,7 @@ void *ebpf_read_dcstat_thread(void *ptr) continue; pthread_mutex_lock(&collect_data_mutex); - ebpf_read_dc_apps_table(maps_per_core, max_period); + ebpf_read_dc_apps_table(maps_per_core); ebpf_dc_resume_apps_data(); pthread_mutex_unlock(&collect_data_mutex); diff --git a/src/collectors/ebpf.plugin/ebpf_disk.c b/src/collectors/ebpf.plugin/ebpf_disk.c index 246f98702dd3a3..c59fabe2184b94 100644 --- a/src/collectors/ebpf.plugin/ebpf_disk.c +++ b/src/collectors/ebpf.plugin/ebpf_disk.c @@ -6,11 +6,7 @@ #include "ebpf.h" #include "ebpf_disk.h" -struct config disk_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config disk_config = APPCONFIG_INITIALIZER; static ebpf_local_maps_t disk_maps[] = {{.name = "tbl_disk_iocall", .internal_input = NETDATA_DISK_HISTOGRAM_LENGTH, .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, diff --git a/src/collectors/ebpf.plugin/ebpf_fd.c b/src/collectors/ebpf.plugin/ebpf_fd.c index 61a9595ccc54b4..45ab5a8ad9e2f7 100644 --- a/src/collectors/ebpf.plugin/ebpf_fd.c +++ b/src/collectors/ebpf.plugin/ebpf_fd.c @@ -46,9 +46,7 @@ static ebpf_local_maps_t fd_maps[] = {{.name = "tbl_fd_pid", .internal_input = N }}; -struct config fd_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, - .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config fd_config = APPCONFIG_INITIALIZER; static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER]; static netdata_idx_t *fd_values = NULL; @@ -683,9 +681,8 @@ static void fd_apps_accumulator(netdata_fd_stat_t *out, int maps_per_core) * Read the apps table and store data inside the structure. * * @param maps_per_core do I need to read all cores? - * @param max_period limit of iterations without updates before remove data from hash table */ -static void ebpf_read_fd_apps_table(int maps_per_core, uint32_t max_period) +static void ebpf_read_fd_apps_table(int maps_per_core) { netdata_fd_stat_t *fv = fd_vector; int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd; @@ -799,7 +796,6 @@ void *ebpf_read_fd_thread(void *ptr) uint32_t lifetime = em->lifetime; uint32_t running_time = 0; int period = USEC_PER_SEC; - uint32_t max_period = EBPF_CLEANUP_FACTOR; pids_fd[EBPF_PIDS_FD_IDX] = fd_maps[NETDATA_FD_PID_STATS].map_fd; while (!ebpf_plugin_stop() && running_time < lifetime) { (void)heartbeat_next(&hb, period); @@ -807,7 +803,7 @@ void *ebpf_read_fd_thread(void *ptr) continue; pthread_mutex_lock(&collect_data_mutex); - ebpf_read_fd_apps_table(maps_per_core, max_period); + ebpf_read_fd_apps_table(maps_per_core); ebpf_fd_resume_apps_data(); pthread_mutex_unlock(&collect_data_mutex); diff --git a/src/collectors/ebpf.plugin/ebpf_filesystem.c b/src/collectors/ebpf.plugin/ebpf_filesystem.c index 1187b03e930a80..61317e41a6c2b9 100644 --- a/src/collectors/ebpf.plugin/ebpf_filesystem.c +++ b/src/collectors/ebpf.plugin/ebpf_filesystem.c @@ -2,11 +2,7 @@ #include "ebpf_filesystem.h" -struct config fs_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config fs_config = APPCONFIG_INITIALIZER; ebpf_local_maps_t ext4_maps[] = {{.name = "tbl_ext4", .internal_input = NETDATA_KEY_CALLS_SYNC, .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, diff --git a/src/collectors/ebpf.plugin/ebpf_hardirq.c b/src/collectors/ebpf.plugin/ebpf_hardirq.c index 911425e5433993..3b769f97efbffb 100644 --- a/src/collectors/ebpf.plugin/ebpf_hardirq.c +++ b/src/collectors/ebpf.plugin/ebpf_hardirq.c @@ -3,11 +3,7 @@ #include "ebpf.h" #include "ebpf_hardirq.h" -struct config hardirq_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config hardirq_config = APPCONFIG_INITIALIZER; static ebpf_local_maps_t hardirq_maps[] = { { diff --git a/src/collectors/ebpf.plugin/ebpf_mdflush.c b/src/collectors/ebpf.plugin/ebpf_mdflush.c index 77c109bff8f808..cfe7b9989795ee 100644 --- a/src/collectors/ebpf.plugin/ebpf_mdflush.c +++ b/src/collectors/ebpf.plugin/ebpf_mdflush.c @@ -3,11 +3,7 @@ #include "ebpf.h" #include "ebpf_mdflush.h" -struct config mdflush_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config mdflush_config = APPCONFIG_INITIALIZER; #define MDFLUSH_MAP_COUNT 0 static ebpf_local_maps_t mdflush_maps[] = { diff --git a/src/collectors/ebpf.plugin/ebpf_mount.c b/src/collectors/ebpf.plugin/ebpf_mount.c index 7441cc6e2f1903..e379e7427c611c 100644 --- a/src/collectors/ebpf.plugin/ebpf_mount.c +++ b/src/collectors/ebpf.plugin/ebpf_mount.c @@ -22,9 +22,7 @@ static char *mount_dimension_name[NETDATA_EBPF_MOUNT_SYSCALL] = { "mount", "umou static netdata_syscall_stat_t mount_aggregated_data[NETDATA_EBPF_MOUNT_SYSCALL]; static netdata_publish_syscall_t mount_publish_aggregated[NETDATA_EBPF_MOUNT_SYSCALL]; -struct config mount_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, - .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config mount_config = APPCONFIG_INITIALIZER; static netdata_idx_t mount_hash_values[NETDATA_MOUNT_END]; diff --git a/src/collectors/ebpf.plugin/ebpf_oomkill.c b/src/collectors/ebpf.plugin/ebpf_oomkill.c index 34361550b11e7a..b3045be4d95574 100644 --- a/src/collectors/ebpf.plugin/ebpf_oomkill.c +++ b/src/collectors/ebpf.plugin/ebpf_oomkill.c @@ -3,11 +3,7 @@ #include "ebpf.h" #include "ebpf_oomkill.h" -struct config oomkill_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config oomkill_config = APPCONFIG_INITIALIZER; #define OOMKILL_MAP_KILLCNT 0 static ebpf_local_maps_t oomkill_maps[] = { diff --git a/src/collectors/ebpf.plugin/ebpf_process.c b/src/collectors/ebpf.plugin/ebpf_process.c index d2810f899aa0aa..aec546d30ce1d6 100644 --- a/src/collectors/ebpf.plugin/ebpf_process.c +++ b/src/collectors/ebpf.plugin/ebpf_process.c @@ -57,11 +57,7 @@ ebpf_process_stat_t *process_stat_vector = NULL; static netdata_syscall_stat_t process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_END]; static netdata_publish_syscall_t process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_END]; -struct config process_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config process_config = APPCONFIG_INITIALIZER; /***************************************************************** * diff --git a/src/collectors/ebpf.plugin/ebpf_shm.c b/src/collectors/ebpf.plugin/ebpf_shm.c index ac44549b2ceb1c..fdcb5257201f9a 100644 --- a/src/collectors/ebpf.plugin/ebpf_shm.c +++ b/src/collectors/ebpf.plugin/ebpf_shm.c @@ -12,11 +12,7 @@ netdata_ebpf_shm_t *shm_vector = NULL; static netdata_idx_t shm_hash_values[NETDATA_SHM_END]; static netdata_idx_t *shm_values = NULL; -struct config shm_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config shm_config = APPCONFIG_INITIALIZER; static ebpf_local_maps_t shm_maps[] = {{.name = "tbl_pid_shm", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, .user_input = 0, @@ -569,9 +565,8 @@ static void ebpf_update_shm_cgroup() * Read the apps table and store data inside the structure. * * @param maps_per_core do I need to read all cores? - * @param max_period limit of iterations without updates before remove data from hash table */ -static void ebpf_read_shm_apps_table(int maps_per_core, uint32_t max_period) +static void ebpf_read_shm_apps_table(int maps_per_core) { netdata_ebpf_shm_t *cv = shm_vector; int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd; @@ -1079,7 +1074,6 @@ void *ebpf_read_shm_thread(void *ptr) uint32_t lifetime = em->lifetime; uint32_t running_time = 0; usec_t period = update_every * USEC_PER_SEC; - uint32_t max_period = EBPF_CLEANUP_FACTOR; pids_fd[EBPF_PIDS_SHM_IDX] = shm_maps[NETDATA_PID_SHM_TABLE].map_fd; while (!ebpf_plugin_stop() && running_time < lifetime) { (void)heartbeat_next(&hb, period); @@ -1087,7 +1081,7 @@ void *ebpf_read_shm_thread(void *ptr) continue; pthread_mutex_lock(&collect_data_mutex); - ebpf_read_shm_apps_table(maps_per_core, max_period); + ebpf_read_shm_apps_table(maps_per_core); ebpf_shm_resume_apps_data(); pthread_mutex_unlock(&collect_data_mutex); diff --git a/src/collectors/ebpf.plugin/ebpf_socket.c b/src/collectors/ebpf.plugin/ebpf_socket.c index 5b87a32560bd3f..9047e50cc95146 100644 --- a/src/collectors/ebpf.plugin/ebpf_socket.c +++ b/src/collectors/ebpf.plugin/ebpf_socket.c @@ -77,11 +77,7 @@ netdata_socket_t *socket_values; ebpf_network_viewer_port_list_t *listen_ports = NULL; ebpf_addresses_t tcp_v6_connect_address = {.function = "tcp_v6_connect", .hash = 0, .addr = 0, .type = 0}; -struct config socket_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config socket_config = APPCONFIG_INITIALIZER; netdata_ebpf_targets_t socket_targets[] = { {.name = "inet_csk_accept", .mode = EBPF_LOAD_PROBE}, {.name = "tcp_retransmit_skb", .mode = EBPF_LOAD_PROBE}, @@ -2708,7 +2704,7 @@ static void ebpf_socket_initialize_global_vectors() * @param hash the calculated hash for the dimension name. * @param name the dimension name. */ -static void ebpf_link_dimension_name(char *port, uint32_t hash, char *value) +static void ebpf_link_dimension_name(const char *port, uint32_t hash, const char *value) { int test = str2i(port); if (test < NETDATA_MINIMUM_PORT_VALUE || test > NETDATA_MAXIMUM_PORT_VALUE){ @@ -2753,15 +2749,15 @@ static void ebpf_link_dimension_name(char *port, uint32_t hash, char *value) * * @param cfg the configuration structure */ + +static bool config_service_value_cb(void *data __maybe_unused, const char *name, const char *value) { + ebpf_link_dimension_name(name, simple_hash(name), value); + return true; +} + void ebpf_parse_service_name_section(struct config *cfg) { - struct section *co = appconfig_get_section(cfg, EBPF_SERVICE_NAME_SECTION); - if (co) { - struct config_option *cv; - for (cv = co->values; cv ; cv = cv->next) { - ebpf_link_dimension_name(cv->name, cv->hash, cv->value); - } - } + appconfig_foreach_value_in_section(cfg, EBPF_SERVICE_NAME_SECTION, config_service_value_cb, NULL); // Always associated the default port to Netdata ebpf_network_viewer_dim_name_t *names = network_viewer_opt.names; diff --git a/src/collectors/ebpf.plugin/ebpf_socket.h b/src/collectors/ebpf.plugin/ebpf_socket.h index e01126035f1510..a236985eb8245b 100644 --- a/src/collectors/ebpf.plugin/ebpf_socket.h +++ b/src/collectors/ebpf.plugin/ebpf_socket.h @@ -339,8 +339,8 @@ extern ebpf_network_viewer_port_list_t *listen_ports; void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_connection_t *values); void ebpf_fill_ip_list_unsafe(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table); void ebpf_parse_service_name_section(struct config *cfg); -void ebpf_parse_ips_unsafe(char *ptr); -void ebpf_parse_ports(char *ptr); +void ebpf_parse_ips_unsafe(const char *ptr); +void ebpf_parse_ports(const char *ptr); void ebpf_socket_read_open_connections(BUFFER *buf, struct ebpf_module *em); void ebpf_socket_fill_publish_apps(ebpf_socket_publish_apps_t *curr, netdata_socket_t *ns); diff --git a/src/collectors/ebpf.plugin/ebpf_softirq.c b/src/collectors/ebpf.plugin/ebpf_softirq.c index 21bd83a3e222e8..f277027bde7c45 100644 --- a/src/collectors/ebpf.plugin/ebpf_softirq.c +++ b/src/collectors/ebpf.plugin/ebpf_softirq.c @@ -3,11 +3,7 @@ #include "ebpf.h" #include "ebpf_softirq.h" -struct config softirq_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config softirq_config = APPCONFIG_INITIALIZER; #define SOFTIRQ_MAP_LATENCY 0 static ebpf_local_maps_t softirq_maps[] = { diff --git a/src/collectors/ebpf.plugin/ebpf_swap.c b/src/collectors/ebpf.plugin/ebpf_swap.c index 9333531784efb6..fe1d5671c6f5ea 100644 --- a/src/collectors/ebpf.plugin/ebpf_swap.c +++ b/src/collectors/ebpf.plugin/ebpf_swap.c @@ -12,11 +12,7 @@ static netdata_idx_t *swap_values = NULL; netdata_ebpf_swap_t *swap_vector = NULL; -struct config swap_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config swap_config = APPCONFIG_INITIALIZER; static ebpf_local_maps_t swap_maps[] = {{.name = "tbl_pid_swap", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, .user_input = 0, @@ -543,9 +539,8 @@ void ebpf_swap_resume_apps_data() { * Read the apps table and store data inside the structure. * * @param maps_per_core do I need to read all cores? - * @param max_period limit of iterations without updates before remove data from hash table */ -static void ebpf_read_swap_apps_table(int maps_per_core, uint32_t max_period) +static void ebpf_read_swap_apps_table(int maps_per_core) { netdata_ebpf_swap_t *cv = swap_vector; int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd; @@ -613,7 +608,6 @@ void *ebpf_read_swap_thread(void *ptr) uint32_t lifetime = em->lifetime; uint32_t running_time = 0; usec_t period = update_every * USEC_PER_SEC; - uint32_t max_period = EBPF_CLEANUP_FACTOR; pids_fd[EBPF_PIDS_SWAP_IDX] = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd; while (!ebpf_plugin_stop() && running_time < lifetime) { @@ -622,7 +616,7 @@ void *ebpf_read_swap_thread(void *ptr) continue; pthread_mutex_lock(&collect_data_mutex); - ebpf_read_swap_apps_table(maps_per_core, max_period); + ebpf_read_swap_apps_table(maps_per_core); ebpf_swap_resume_apps_data(); pthread_mutex_unlock(&collect_data_mutex); diff --git a/src/collectors/ebpf.plugin/ebpf_sync.c b/src/collectors/ebpf.plugin/ebpf_sync.c index 2be9192c50a3e5..0802d769fc0aba 100644 --- a/src/collectors/ebpf.plugin/ebpf_sync.c +++ b/src/collectors/ebpf.plugin/ebpf_sync.c @@ -100,11 +100,7 @@ ebpf_local_maps_t sync_file_range_maps[] = {{.name = "tbl_syncfr", .internal_inp #endif }}; -struct config sync_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config sync_config = APPCONFIG_INITIALIZER; netdata_ebpf_targets_t sync_targets[] = { {.name = NETDATA_SYSCALLS_SYNC, .mode = EBPF_LOAD_TRAMPOLINE}, {.name = NETDATA_SYSCALLS_SYNCFS, .mode = EBPF_LOAD_TRAMPOLINE}, diff --git a/src/collectors/ebpf.plugin/ebpf_vfs.c b/src/collectors/ebpf.plugin/ebpf_vfs.c index cf1f50e99679e8..5d922affaa7358 100644 --- a/src/collectors/ebpf.plugin/ebpf_vfs.c +++ b/src/collectors/ebpf.plugin/ebpf_vfs.c @@ -52,11 +52,7 @@ struct netdata_static_thread ebpf_read_vfs = { .start_routine = NULL }; -struct config vfs_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config vfs_config = APPCONFIG_INITIALIZER; netdata_ebpf_targets_t vfs_targets[] = { {.name = "vfs_write", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = "vfs_writev", .mode = EBPF_LOAD_TRAMPOLINE}, diff --git a/src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md b/src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md index d5baa094a02ad2..01642bc24be94c 100644 --- a/src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md +++ b/src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md @@ -110,7 +110,7 @@ This integration only supports a single configuration option, and most users wil | Name | Description | Default | Required | |:----|:-----------|:-------|:--------:| -| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no | +| loop time | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20ms | no | #### Examples There are no configuration examples. diff --git a/src/collectors/idlejitter.plugin/metadata.yaml b/src/collectors/idlejitter.plugin/metadata.yaml index 0ad9469941771d..7c49a6ec7c2e24 100644 --- a/src/collectors/idlejitter.plugin/metadata.yaml +++ b/src/collectors/idlejitter.plugin/metadata.yaml @@ -55,10 +55,10 @@ modules: title: '' enabled: false list: - - name: loop time in ms + - name: loop time description: > Specifies the target time for the data collection thread to sleep, measured in miliseconds. - default_value: 20 + default_value: 20ms required: false examples: folding: diff --git a/src/collectors/idlejitter.plugin/plugin_idlejitter.c b/src/collectors/idlejitter.plugin/plugin_idlejitter.c index 99645b1d201e4b..2a212a669974c7 100644 --- a/src/collectors/idlejitter.plugin/plugin_idlejitter.c +++ b/src/collectors/idlejitter.plugin/plugin_idlejitter.c @@ -22,9 +22,9 @@ void *cpuidlejitter_main(void *ptr) { worker_register("IDLEJITTER"); worker_register_job_name(0, "measurements"); - usec_t sleep_ut = config_get_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS; + usec_t sleep_ut = config_get_duration_ms("plugin:idlejitter", "loop time", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS; if(sleep_ut <= 0) { - config_set_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS); + config_set_duration_ms("plugin:idlejitter", "loop time", CPU_IDLEJITTER_SLEEP_TIME_MS); sleep_ut = CPU_IDLEJITTER_SLEEP_TIME_MS * USEC_PER_MS; } diff --git a/src/collectors/plugins.d/plugins_d.c b/src/collectors/plugins.d/plugins_d.c index 6ae71c76951f84..d9f2caec2e7153 100644 --- a/src/collectors/plugins.d/plugins_d.c +++ b/src/collectors/plugins.d/plugins_d.c @@ -321,7 +321,7 @@ void *pluginsd_main(void *ptr) { cd->unsafe.enabled = enabled; cd->unsafe.running = false; - cd->update_every = (int)config_get_number(cd->id, "update every", localhost->rrd_update_every); + cd->update_every = (int)config_get_duration_seconds(cd->id, "update every", localhost->rrd_update_every); cd->started_t = now_realtime_sec(); char *def = ""; diff --git a/src/collectors/proc.plugin/README.md b/src/collectors/proc.plugin/README.md index bb09226107ff7f..07cec3aafa0e98 100644 --- a/src/collectors/proc.plugin/README.md +++ b/src/collectors/proc.plugin/README.md @@ -133,7 +133,7 @@ Then edit `netdata.conf` and find the following section. This is the basic plugi # extended operations for all disks = auto # backlog for all disks = auto # bcache for all disks = auto - # bcache priority stats update every = 0 + # bcache priority stats update every = off # remove charts of removed disks = yes # path to get block device = /sys/block/%s # path to get block device bcache = /sys/block/%s/bcache @@ -578,7 +578,7 @@ Default configuration will monitor only enabled infiniband ports, and refresh ne # hardware errors counters = auto # monitor only ports being active = auto # disable by default interfaces matching = - # refresh ports state every seconds = 30 + # refresh ports state every = 30s ``` ## AMD GPUs diff --git a/src/collectors/proc.plugin/ipc.c b/src/collectors/proc.plugin/ipc.c index 5b47116b9a718b..c280254ac4fdc6 100644 --- a/src/collectors/proc.plugin/ipc.c +++ b/src/collectors/proc.plugin/ipc.c @@ -182,7 +182,7 @@ static inline int ipc_sem_get_status(struct ipc_status *st) { return 0; } -int ipc_msq_get_info(char *msg_filename, struct message_queue **message_queue_root) { +static int ipc_msq_get_info(const char *msg_filename, struct message_queue **message_queue_root) { static procfile *ff; struct message_queue *msq; @@ -238,7 +238,7 @@ int ipc_msq_get_info(char *msg_filename, struct message_queue **message_queue_ro return 0; } -int ipc_shm_get_info(char *shm_filename, struct shm_stats *shm) { +static int ipc_shm_get_info(const char *shm_filename, struct shm_stats *shm) { static procfile *ff; if(unlikely(!ff)) { @@ -287,10 +287,10 @@ int do_ipc(int update_every, usec_t dt) { static const RRDVAR_ACQUIRED *arrays_max = NULL, *semaphores_max = NULL; static RRDSET *st_arrays = NULL; static RRDDIM *rd_arrays = NULL; - static char *msg_filename = NULL; + static const char *msg_filename = NULL; static struct message_queue *message_queue_root = NULL; static long long dimensions_limit; - static char *shm_filename = NULL; + static const char *shm_filename = NULL; if(unlikely(do_sem == -1)) { do_msg = config_get_boolean("plugin:proc:ipc", "message queues", CONFIG_BOOLEAN_YES); diff --git a/src/collectors/proc.plugin/plugin_proc.c b/src/collectors/proc.plugin/plugin_proc.c index b4a856467261c5..4c57f07c09e980 100644 --- a/src/collectors/proc.plugin/plugin_proc.c +++ b/src/collectors/proc.plugin/plugin_proc.c @@ -279,7 +279,7 @@ int get_numa_node_count(void) char name[FILENAME_MAX + 1]; snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node"); - char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name); + const char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name); DIR *dir = opendir(dirname); if (dir) { diff --git a/src/collectors/proc.plugin/proc_diskstats.c b/src/collectors/proc.plugin/proc_diskstats.c index 4cbd618f666149..ef48ed430dae2d 100644 --- a/src/collectors/proc.plugin/proc_diskstats.c +++ b/src/collectors/proc.plugin/proc_diskstats.c @@ -180,16 +180,16 @@ static struct disk { #define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete___safe_from_collector_thread(st); (st) = NULL; } } while(st) -static char *path_to_sys_dev_block_major_minor_string = NULL; -static char *path_to_sys_block_device = NULL; -static char *path_to_sys_block_device_bcache = NULL; -static char *path_to_sys_devices_virtual_block_device = NULL; -static char *path_to_device_mapper = NULL; -static char *path_to_dev_disk = NULL; -static char *path_to_sys_block = NULL; -static char *path_to_device_label = NULL; -static char *path_to_device_id = NULL; -static char *path_to_veritas_volume_groups = NULL; +static const char *path_to_sys_dev_block_major_minor_string = NULL; +static const char *path_to_sys_block_device = NULL; +static const char *path_to_sys_block_device_bcache = NULL; +static const char *path_to_sys_devices_virtual_block_device = NULL; +static const char *path_to_device_mapper = NULL; +static const char *path_to_dev_disk = NULL; +static const char *path_to_sys_block = NULL; +static const char *path_to_device_label = NULL; +static const char *path_to_device_id = NULL; +static const char *path_to_veritas_volume_groups = NULL; static int name_disks_by_id = CONFIG_BOOLEAN_NO; static int global_bcache_priority_stats_update_every = 0; // disabled by default @@ -1374,7 +1374,7 @@ int do_proc_diskstats(int update_every, usec_t dt) { global_do_ext = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "extended operations for all disks", global_do_ext); global_do_backlog = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "backlog for all disks", global_do_backlog); global_do_bcache = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache for all disks", global_do_bcache); - global_bcache_priority_stats_update_every = (int)config_get_number(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every); + global_bcache_priority_stats_update_every = (int)config_get_duration_seconds(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every); global_cleanup_removed_disks = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "remove charts of removed disks" , global_cleanup_removed_disks); diff --git a/src/collectors/proc.plugin/proc_mdstat.c b/src/collectors/proc.plugin/proc_mdstat.c index 3857d9ec4f5102..47c4f0d2b01f90 100644 --- a/src/collectors/proc.plugin/proc_mdstat.c +++ b/src/collectors/proc.plugin/proc_mdstat.c @@ -89,7 +89,7 @@ int do_proc_mdstat(int update_every, usec_t dt) static int do_health = -1, do_nonredundant = -1, do_disks = -1, do_operations = -1, do_mismatch = -1, do_mismatch_config = -1; static int make_charts_obsolete = -1; - static char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL; + static const char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL; static struct raid *raids = NULL; static size_t raids_allocated = 0; size_t raids_num = 0, raid_idx = 0, redundant_num = 0; diff --git a/src/collectors/proc.plugin/proc_net_sockstat.c b/src/collectors/proc.plugin/proc_net_sockstat.c index da8682b51728a4..185eb4e5a43a68 100644 --- a/src/collectors/proc.plugin/proc_net_sockstat.c +++ b/src/collectors/proc.plugin/proc_net_sockstat.c @@ -128,7 +128,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) { do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG sockets", CONFIG_BOOLEAN_AUTO); do_frag_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG memory", CONFIG_BOOLEAN_AUTO); - update_constants_every = config_get_number("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every); + update_constants_every = config_get_duration_seconds("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every); update_constants_count = update_constants_every; arl_sockets = arl_create("sockstat/sockets", arl_callback_str2kernel_uint_t, 60); diff --git a/src/collectors/proc.plugin/proc_net_stat_conntrack.c b/src/collectors/proc.plugin/proc_net_stat_conntrack.c index 6951cba792da72..152a16402b4c76 100644 --- a/src/collectors/proc.plugin/proc_net_stat_conntrack.c +++ b/src/collectors/proc.plugin/proc_net_stat_conntrack.c @@ -11,7 +11,7 @@ int do_proc_net_stat_conntrack(int update_every, usec_t dt) { static int do_sockets = -1, do_new = -1, do_changes = -1, do_expect = -1, do_search = -1, do_errors = -1; static usec_t get_max_every = 10 * USEC_PER_SEC, usec_since_last_max = 0; static int read_full = 1; - static char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename; + static const char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename; static const RRDVAR_ACQUIRED *rrdvar_max = NULL; unsigned long long aentries = 0, asearched = 0, afound = 0, anew = 0, ainvalid = 0, aignore = 0, adelete = 0, adelete_list = 0, diff --git a/src/collectors/proc.plugin/proc_net_wireless.c b/src/collectors/proc.plugin/proc_net_wireless.c index c7efa33350a4cb..1b137e6b2d9ebc 100644 --- a/src/collectors/proc.plugin/proc_net_wireless.c +++ b/src/collectors/proc.plugin/proc_net_wireless.c @@ -208,7 +208,7 @@ int do_proc_net_wireless(int update_every, usec_t dt) UNUSED(dt); static procfile *ff = NULL; static int do_status, do_quality = -1, do_discarded_packets, do_beacon; - static char *proc_net_wireless_filename = NULL; + static const char *proc_net_wireless_filename = NULL; if (unlikely(do_quality == -1)) { char filename[FILENAME_MAX + 1]; diff --git a/src/collectors/proc.plugin/proc_pressure.c b/src/collectors/proc.plugin/proc_pressure.c index 4037e60acedffb..c4d4bc2b1e67e0 100644 --- a/src/collectors/proc.plugin/proc_pressure.c +++ b/src/collectors/proc.plugin/proc_pressure.c @@ -158,7 +158,7 @@ int do_proc_pressure(int update_every, usec_t dt) { int i; static usec_t next_pressure_dt = 0; - static char *base_path = NULL; + static const char *base_path = NULL; update_every = (update_every < MIN_PRESSURE_UPDATE_EVERY) ? MIN_PRESSURE_UPDATE_EVERY : update_every; pressure_update_every = update_every; @@ -170,9 +170,8 @@ int do_proc_pressure(int update_every, usec_t dt) { return 0; } - if (unlikely(!base_path)) { + if (unlikely(!base_path)) base_path = config_get(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, "base path of pressure metrics", "/proc/pressure"); - } for (i = 0; i < PRESSURE_NUM_RESOURCES; i++) { procfile *ff = resource_info[i].pf; diff --git a/src/collectors/proc.plugin/proc_spl_kstat_zfs.c b/src/collectors/proc.plugin/proc_spl_kstat_zfs.c index be96f444931b65..a73a840e943c47 100644 --- a/src/collectors/proc.plugin/proc_spl_kstat_zfs.c +++ b/src/collectors/proc.plugin/proc_spl_kstat_zfs.c @@ -18,7 +18,7 @@ int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) { static int do_zfs_stats = 0; static procfile *ff = NULL; - static char *dirname = NULL; + static const char *dirname = NULL; static ARL_BASE *arl_base = NULL; arcstats.l2exist = -1; diff --git a/src/collectors/proc.plugin/proc_stat.c b/src/collectors/proc.plugin/proc_stat.c index c211ceee56642f..c7ded00c78ad6c 100644 --- a/src/collectors/proc.plugin/proc_stat.c +++ b/src/collectors/proc.plugin/proc_stat.c @@ -293,7 +293,7 @@ static void* wake_cpu_thread(void* core) { return 0; } -static int read_schedstat(char *schedstat_filename, struct per_core_cpuidle_chart **cpuidle_charts_address, size_t *schedstat_cores_found) { +static int read_schedstat(const char *schedstat_filename, struct per_core_cpuidle_chart **cpuidle_charts_address, size_t *schedstat_cores_found) { static size_t cpuidle_charts_len = 0; static procfile *ff = NULL; struct per_core_cpuidle_chart *cpuidle_charts = *cpuidle_charts_address; @@ -373,7 +373,7 @@ static int read_one_state(char *buf, const char *filename, int *fd) { return 1; } -static int read_cpuidle_states(char *cpuidle_name_filename , char *cpuidle_time_filename, struct per_core_cpuidle_chart *cpuidle_charts, size_t core) { +static int read_cpuidle_states(const char *cpuidle_name_filename, const char *cpuidle_time_filename, struct per_core_cpuidle_chart *cpuidle_charts, size_t core) { char filename[FILENAME_MAX + 1]; static char next_state_filename[FILENAME_MAX + 1]; struct stat stbuf; @@ -484,7 +484,7 @@ int do_proc_stat(int update_every, usec_t dt) { static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1, do_core_throttle_count = -1, do_package_throttle_count = -1, do_cpu_freq = -1, do_cpuidle = -1; static uint32_t hash_intr, hash_ctxt, hash_processes, hash_procs_running, hash_procs_blocked; - static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL, + static const char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL, *time_in_state_filename = NULL, *schedstat_filename = NULL, *cpuidle_name_filename = NULL, *cpuidle_time_filename = NULL; static const RRDVAR_ACQUIRED *cpus_var = NULL; static int accurate_freq_avail = 0, accurate_freq_is_used = 0; diff --git a/src/collectors/proc.plugin/proc_uptime.c b/src/collectors/proc.plugin/proc_uptime.c index ddab7269beba39..7471171eda544e 100644 --- a/src/collectors/proc.plugin/proc_uptime.c +++ b/src/collectors/proc.plugin/proc_uptime.c @@ -5,7 +5,7 @@ int do_proc_uptime(int update_every, usec_t dt) { (void)dt; - static char *uptime_filename = NULL; + static const char *uptime_filename = NULL; if(!uptime_filename) { char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/uptime"); diff --git a/src/collectors/proc.plugin/sys_class_drm.c b/src/collectors/proc.plugin/sys_class_drm.c index ab4d98a72095c9..0622274a0e49e1 100644 --- a/src/collectors/proc.plugin/sys_class_drm.c +++ b/src/collectors/proc.plugin/sys_class_drm.c @@ -837,7 +837,7 @@ int do_sys_class_drm(int update_every, usec_t dt) { if(unlikely(!drm_dir)) { char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/drm"); - char *drm_dir_name = config_get(CONFIG_SECTION_PLUGIN_PROC_DRM, "directory to monitor", filename); + const char *drm_dir_name = config_get(CONFIG_SECTION_PLUGIN_PROC_DRM, "directory to monitor", filename); if(unlikely(NULL == (drm_dir = opendir(drm_dir_name)))){ collector_error("Cannot read directory '%s'", drm_dir_name); return 1; diff --git a/src/collectors/proc.plugin/sys_class_infiniband.c b/src/collectors/proc.plugin/sys_class_infiniband.c index ff1652ddf789d2..6e32a344b1a050 100644 --- a/src/collectors/proc.plugin/sys_class_infiniband.c +++ b/src/collectors/proc.plugin/sys_class_infiniband.c @@ -302,7 +302,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt) static int initialized = 0; static int enable_new_ports = -1, enable_only_active = CONFIG_BOOLEAN_YES; static int do_bytes = -1, do_packets = -1, do_errors = -1, do_hwpackets = -1, do_hwerrors = -1; - static char *sys_class_infiniband_dirname = NULL; + static const char *sys_class_infiniband_dirname = NULL; static long long int dt_to_refresh_ports = 0, last_refresh_ports_usec = 0; @@ -332,7 +332,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt) SIMPLE_PATTERN_EXACT, true); dt_to_refresh_ports = - config_get_number(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "refresh ports state every seconds", 30) * + config_get_duration_seconds(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "refresh ports state every", 30) * USEC_PER_SEC; if (dt_to_refresh_ports < 0) dt_to_refresh_ports = 0; diff --git a/src/collectors/proc.plugin/sys_class_power_supply.c b/src/collectors/proc.plugin/sys_class_power_supply.c index c6be72679f3e71..7e4dda777da1b0 100644 --- a/src/collectors/proc.plugin/sys_class_power_supply.c +++ b/src/collectors/proc.plugin/sys_class_power_supply.c @@ -199,7 +199,7 @@ int do_sys_class_power_supply(int update_every, usec_t dt) { (void)dt; static int do_capacity = -1, do_power = -1, do_property[3] = {-1}; static int keep_fds_open = CONFIG_BOOLEAN_NO, keep_fds_open_config = -1; - static char *dirname = NULL; + static const char *dirname = NULL; if(unlikely(do_capacity == -1)) { do_capacity = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery capacity", CONFIG_BOOLEAN_YES); diff --git a/src/collectors/proc.plugin/sys_devices_pci_aer.c b/src/collectors/proc.plugin/sys_devices_pci_aer.c index 563ebf0515bed5..c39795ea1b0159 100644 --- a/src/collectors/proc.plugin/sys_devices_pci_aer.c +++ b/src/collectors/proc.plugin/sys_devices_pci_aer.c @@ -2,7 +2,7 @@ #include "plugin_proc.h" -static char *pci_aer_dirname = NULL; +static const char *pci_aer_dirname = NULL; typedef enum __attribute__((packed)) { AER_DEV_NONFATAL = (1 << 0), diff --git a/src/collectors/proc.plugin/sys_devices_system_edac_mc.c b/src/collectors/proc.plugin/sys_devices_system_edac_mc.c index d3db8c04420c62..93ee235cf02085 100644 --- a/src/collectors/proc.plugin/sys_devices_system_edac_mc.c +++ b/src/collectors/proc.plugin/sys_devices_system_edac_mc.c @@ -37,7 +37,7 @@ struct mc { }; static struct mc *mc_root = NULL; -static char *mc_dirname = NULL; +static const char *mc_dirname = NULL; static void find_all_mc() { char name[FILENAME_MAX + 1]; diff --git a/src/collectors/proc.plugin/sys_devices_system_node.c b/src/collectors/proc.plugin/sys_devices_system_node.c index 12f31a04ee3467..bf2059fcd9c877 100644 --- a/src/collectors/proc.plugin/sys_devices_system_node.c +++ b/src/collectors/proc.plugin/sys_devices_system_node.c @@ -15,7 +15,7 @@ static int find_all_nodes() { int numa_node_count = 0; char name[FILENAME_MAX + 1]; snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node"); - char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name); + const char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name); DIR *dir = opendir(dirname); if(!dir) { diff --git a/src/collectors/proc.plugin/sys_fs_btrfs.c b/src/collectors/proc.plugin/sys_fs_btrfs.c index bf9b002bcbc742..22eb990c8cde48 100644 --- a/src/collectors/proc.plugin/sys_fs_btrfs.c +++ b/src/collectors/proc.plugin/sys_fs_btrfs.c @@ -678,7 +678,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { , do_error_stats = CONFIG_BOOLEAN_AUTO; static usec_t refresh_delta = 0, refresh_every = 60 * USEC_PER_SEC; - static char *btrfs_path = NULL; + static const char *btrfs_path = NULL; (void)dt; @@ -689,7 +689,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) { snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/fs/btrfs"); btrfs_path = config_get("plugin:proc:/sys/fs/btrfs", "path to monitor", filename); - refresh_every = config_get_number("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC; + refresh_every = config_get_duration_seconds("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC; refresh_delta = refresh_every; do_allocation_disks = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "physical disks allocation", do_allocation_disks); diff --git a/src/collectors/profile.plugin/plugin_profile.cc b/src/collectors/profile.plugin/plugin_profile.cc index 390bca29ead2ae..8725d16d8e737b 100644 --- a/src/collectors/profile.plugin/plugin_profile.cc +++ b/src/collectors/profile.plugin/plugin_profile.cc @@ -194,9 +194,11 @@ static void profile_main_cleanup(void *pptr) { extern "C" void *profile_main(void *ptr) { CLEANUP_FUNCTION_REGISTER(profile_main_cleanup) cleanup_ptr = ptr; - int UpdateEvery = (int) config_get_number(CONFIG_SECTION_PROFILE, "update every", 1); - if (UpdateEvery < localhost->rrd_update_every) + int UpdateEvery = (int) config_get_duration_seconds(CONFIG_SECTION_PROFILE, "update every", 1); + if (UpdateEvery < localhost->rrd_update_every) { UpdateEvery = localhost->rrd_update_every; + config_set_duration_seconds(CONFIG_SECTION_PROFILE, "update every", UpdateEvery); + } // pick low-default values, in case this plugin is ever enabled accidentaly. size_t NumThreads = config_get_number(CONFIG_SECTION_PROFILE, "number of threads", 2); diff --git a/src/collectors/python.d.plugin/python.d.conf b/src/collectors/python.d.plugin/python.d.conf index 482123f2a6bdc9..0fa8b07e192ac3 100644 --- a/src/collectors/python.d.plugin/python.d.conf +++ b/src/collectors/python.d.plugin/python.d.conf @@ -38,7 +38,6 @@ go_expvar: no # spigotmc: yes # traefik: yes # varnish: yes -# w1sensor: yes # zscores: no @@ -79,3 +78,4 @@ tomcat: no # Removed (replaced with go.d/tomcat) tor: no # Removed (replaced with go.d/tor). puppet: no # Removed (replaced with go.d/puppet). uwsgi: no # Removed (replaced with go.d/uwsgi). +w1sensor: no # Removed (replaced with go.d/w1sensor) diff --git a/src/collectors/python.d.plugin/w1sensor/metadata.yaml b/src/collectors/python.d.plugin/w1sensor/metadata.yaml deleted file mode 100644 index 7b0768237184bf..00000000000000 --- a/src/collectors/python.d.plugin/w1sensor/metadata.yaml +++ /dev/null @@ -1,119 +0,0 @@ -plugin_name: python.d.plugin -modules: - - meta: - plugin_name: python.d.plugin - module_name: w1sensor - monitored_instance: - name: 1-Wire Sensors - link: "https://www.analog.com/en/product-category/1wire-temperature-sensors.html" - categories: - - data-collection.hardware-devices-and-sensors - icon_filename: "1-wire.png" - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: "" - keywords: - - temperature - - sensor - - 1-wire - most_popular: false - overview: - data_collection: - metrics_description: "Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts." - method_description: "The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected." - supported_platforms: - include: - - Linux - exclude: [] - multi_instance: true - additional_permissions: - description: "" - default_behavior: - auto_detection: - description: "The collector will try to auto detect available 1-Wire devices." - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: - - title: "Required Linux kernel modules" - description: "Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded." - configuration: - file: - name: python.d/w1sensor.conf - options: - description: | - There are 2 sections: - - * Global variables - * One or more JOBS that can define multiple different instances to monitor. - - The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - - Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - - Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. - folding: - title: "Config options" - enabled: true - list: - - name: update_every - description: Sets the default data collection frequency. - default_value: 5 - required: false - - name: priority - description: Controls the order of charts at the netdata dashboard. - default_value: 60000 - required: false - - name: autodetection_retry - description: Sets the job re-check interval in seconds. - default_value: 0 - required: false - - name: penalty - description: Indicates whether to apply penalty to update_every in case of failures. - default_value: yes - required: false - - name: name - description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. - default_value: "" - required: false - - name: name_<1-Wire id> - description: This allows associating a human readable name with a sensor's 1-Wire identifier. - default_value: "" - required: false - examples: - folding: - enabled: false - title: "Config" - list: - - name: Provide human readable names - description: Associate two 1-Wire identifiers with human readable names. - config: | - sensors: - name_00000022276e: 'Machine room' - name_00000022298f: 'Rack 12' - troubleshooting: - problems: - list: [] - alerts: [] - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: global - description: "These metrics refer to the entire monitored application." - labels: [] - metrics: - - name: w1sensor.temp - description: 1-Wire Temperature Sensor - unit: "Celsius" - chart_type: line - dimensions: - - name: a dimension per sensor diff --git a/src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py deleted file mode 100644 index 66797ced3abba0..00000000000000 --- a/src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- -# Description: 1-wire temperature monitor netdata python.d module -# Author: Diomidis Spinellis -# SPDX-License-Identifier: GPL-3.0-or-later - -import os -import re - -from bases.FrameworkServices.SimpleService import SimpleService - -# default module values (can be overridden per job in `config`) -update_every = 5 - -# Location where 1-Wire devices can be found -W1_DIR = '/sys/bus/w1/devices/' - -# Lines matching the following regular expression contain a temperature value -RE_TEMP = re.compile(r' t=(-?\d+)') - -ORDER = [ - 'temp', -] - -CHARTS = { - 'temp': { - 'options': [None, '1-Wire Temperature Sensor', 'Celsius', 'Temperature', 'w1sensor.temp', 'line'], - 'lines': [] - } -} - -# Known and supported family members -# Based on linux/drivers/w1/w1_family.h and w1/slaves/w1_therm.c -THERM_FAMILY = { - '10': 'W1_THERM_DS18S20', - '22': 'W1_THERM_DS1822', - '28': 'W1_THERM_DS18B20', - '3b': 'W1_THERM_DS1825', - '42': 'W1_THERM_DS28EA00', -} - - -class Service(SimpleService): - """Provide netdata service for 1-Wire sensors""" - - def __init__(self, configuration=None, name=None): - SimpleService.__init__(self, configuration=configuration, name=name) - self.order = ORDER - self.definitions = CHARTS - self.probes = [] - - def check(self): - """Auto-detect available 1-Wire sensors, setting line definitions - and probes to be monitored.""" - try: - file_names = os.listdir(W1_DIR) - except OSError as err: - self.error(err) - return False - - lines = [] - for file_name in file_names: - if file_name[2] != '-': - continue - if not file_name[0:2] in THERM_FAMILY: - continue - - self.probes.append(file_name) - identifier = file_name[3:] - name = identifier - config_name = self.configuration.get('name_' + identifier) - if config_name: - name = config_name - lines.append(['w1sensor_temp_' + identifier, name, 'absolute', - 1, 10]) - self.definitions['temp']['lines'] = lines - return len(self.probes) > 0 - - def get_data(self): - """Return data read from sensors.""" - data = dict() - - for file_name in self.probes: - file_path = W1_DIR + file_name + '/w1_slave' - identifier = file_name[3:] - try: - with open(file_path, 'r') as device_file: - for line in device_file: - matched = RE_TEMP.search(line) - if matched: - # Round to one decimal digit to filter-out noise - value = round(int(matched.group(1)) / 1000., 1) - value = int(value * 10) - data['w1sensor_temp_' + identifier] = value - except (OSError, IOError) as err: - self.error(err) - continue - return data or None diff --git a/src/collectors/python.d.plugin/w1sensor/w1sensor.conf b/src/collectors/python.d.plugin/w1sensor/w1sensor.conf deleted file mode 100644 index b60d286503ab74..00000000000000 --- a/src/collectors/python.d.plugin/w1sensor/w1sensor.conf +++ /dev/null @@ -1,72 +0,0 @@ -# netdata python.d.plugin configuration for w1sensor -# -# This file is in YaML format. Generally the format is: -# -# name: value -# -# There are 2 sections: -# - global variables -# - one or more JOBS -# -# JOBS allow you to collect values from multiple sources. -# Each source will have its own set of charts. -# -# JOB parameters have to be indented (using spaces only, example below). - -# ---------------------------------------------------------------------- -# Global Variables -# These variables set the defaults for all JOBs, however each JOB -# may define its own, overriding the defaults. - -# update_every sets the default data collection frequency. -# If unset, the python.d.plugin default is used. -# update_every: 5 - -# priority controls the order of charts at the netdata dashboard. -# Lower numbers move the charts towards the top of the page. -# If unset, the default for python.d.plugin is used. -# priority: 60000 - -# penalty indicates whether to apply penalty to update_every in case of failures. -# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes. -# penalty: yes - -# autodetection_retry sets the job re-check interval in seconds. -# The job is not deleted if check fails. -# Attempts to start the job are made once every autodetection_retry. -# This feature is disabled by default. -# autodetection_retry: 0 - -# ---------------------------------------------------------------------- -# JOBS (data collection sources) -# -# The default JOBS share the same *name*. JOBS with the same name -# are mutually exclusive. Only one of them will be allowed running at -# any time. This allows autodetection to try several alternatives and -# pick the one that works. -# -# Any number of jobs is supported. -# -# All python.d.plugin JOBS (for all its modules) support a set of -# predefined parameters. These are: -# -# job_name: -# name: myname # the JOB's name as it will appear at the -# # dashboard (by default is the job_name) -# # JOBs sharing a name are mutually exclusive -# update_every: 5 # the JOB's data collection frequency -# priority: 60000 # the JOB's order on the dashboard -# penalty: yes # the JOB's penalty -# autodetection_retry: 0 # the JOB's re-check interval in seconds -# -# Additionally to the above, w1sensor also supports the following: -# -# name_<1-Wire id>: '' -# This allows associating a human readable name with a sensor's 1-Wire -# identifier. Example: -# name_00000022276e: 'Machine room' -# name_00000022298f: 'Rack 12' -# -# ---------------------------------------------------------------------- -# AUTO-DETECTION JOBS -# only one of them will run (they have the same name) diff --git a/src/collectors/statsd.plugin/README.md b/src/collectors/statsd.plugin/README.md index 3028292423a0ea..4162a096a84f3a 100644 --- a/src/collectors/statsd.plugin/README.md +++ b/src/collectors/statsd.plugin/README.md @@ -170,11 +170,11 @@ You can find the configuration at `/etc/netdata/netdata.conf`: [statsd] # enabled = yes # decimal detail = 1000 - # update every (flushInterval) = 1 + # update every (flushInterval) = 1s # udp messages to process at once = 10 # create private charts for metrics matching = * # max private charts hard limit = 1000 - # cleanup obsolete charts after secs = 0 + # cleanup obsolete charts after = 0 # private charts memory mode = save # private charts history = 3996 # histograms and timers percentile (percentThreshold) = 95.00000 @@ -204,7 +204,7 @@ You can find the configuration at `/etc/netdata/netdata.conf`: is a space separated list of IPs and ports to listen to. The format is `PROTOCOL:IP:PORT` - if `PORT` is omitted, the `default port` will be used. If `IP` is IPv6, it needs to be enclosed in `[]`. `IP` can also be `*` (to listen on all IPs) or even a hostname. -- `update every (flushInterval) = 1` seconds, controls the frequency StatsD will push the collected metrics to Netdata charts. +- `update every (flushInterval) = 1s` controls the frequency StatsD will push the collected metrics to Netdata charts. - `decimal detail = 1000` controls the number of fractional digits in gauges and histograms. Netdata collects metrics using signed 64-bit integers and their fractional detail is controlled using multipliers and divisors. This setting is used to multiply all collected values to convert them to integers and is also set as the divisors, so that the final data will be a floating point number with this fractional detail (1000 = X.0 - X.999, 10000 = X.0 - X.9999, etc). @@ -238,7 +238,7 @@ The default behavior is to use the same settings as the rest of the Netdata Agen For optimization reasons, Netdata imposes a hard limit on private metric charts. The limit is set via the `max private charts hard limit` setting (which defaults to 1000 charts). Metrics above this hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too). -If you have many ephemeral metrics collected (i.e. that you collect values for a certain amount of time), you can set the configuration option `set charts as obsolete after secs`. Setting a value in seconds here, means that Netdata will mark those metrics (and their private charts) as obsolete after the specified time has passed since the last sent metric value. Those charts will later be deleted according to the setting in `cleanup obsolete charts after secs`. Setting `set charts as obsolete after secs` to 0 (which is also the default value) will disable this functionality. +If you have many ephemeral metrics collected (i.e. that you collect values for a certain amount of time), you can set the configuration option `set charts as obsolete after`. Setting a value in seconds here, means that Netdata will mark those metrics (and their private charts) as obsolete after the specified time has passed since the last sent metric value. Those charts will later be deleted according to the setting in `cleanup obsolete charts after`. Setting `set charts as obsolete after` to 0 (which is also the default value) will disable this functionality. Example private charts (automatically generated without any configuration): diff --git a/src/collectors/statsd.plugin/statsd.c b/src/collectors/statsd.plugin/statsd.c index 42c5ae0c720ac4..af962b0f6fe62f 100644 --- a/src/collectors/statsd.plugin/statsd.c +++ b/src/collectors/statsd.plugin/statsd.c @@ -2491,10 +2491,11 @@ void *statsd_main(void *ptr) { statsd.enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, "statsd", statsd.enabled); statsd.update_every = default_rrd_update_every; - statsd.update_every = (int)config_get_number(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every); + statsd.update_every = (int)config_get_duration_seconds(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every); if(statsd.update_every < default_rrd_update_every) { collector_error("STATSD: minimum flush interval %d given, but the minimum is the update every of netdata. Using %d", statsd.update_every, default_rrd_update_every); statsd.update_every = default_rrd_update_every; + config_set_duration_seconds(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every); } #ifdef HAVE_RECVMMSG @@ -2504,13 +2505,26 @@ void *statsd_main(void *ptr) { statsd.charts_for = simple_pattern_create( config_get(CONFIG_SECTION_STATSD, "create private charts for metrics matching", "*"), NULL, SIMPLE_PATTERN_EXACT, true); - statsd.max_private_charts_hard = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts_hard); - statsd.set_obsolete_after = (size_t)config_get_number(CONFIG_SECTION_STATSD, "set charts as obsolete after secs", (long long)statsd.set_obsolete_after); - statsd.decimal_detail = (collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail); - statsd.tcp_idle_timeout = (size_t) config_get_number(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds", (long long int)statsd.tcp_idle_timeout); - statsd.private_charts_hidden = (unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden); - statsd.histogram_percentile = (double)config_get_float(CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile); + statsd.max_private_charts_hard = + (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts_hard); + + statsd.set_obsolete_after = + (size_t)config_get_duration_seconds(CONFIG_SECTION_STATSD, "set charts as obsolete after", (long long)statsd.set_obsolete_after); + + statsd.decimal_detail = + (collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail); + + statsd.tcp_idle_timeout = + (size_t) config_get_duration_seconds(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after", (long long int)statsd.tcp_idle_timeout); + + statsd.private_charts_hidden = + (unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden); + + statsd.histogram_percentile = + (double)config_get_double( + CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile); + if(isless(statsd.histogram_percentile, 0) || isgreater(statsd.histogram_percentile, 100)) { collector_error("STATSD: invalid histograms and timers percentile %0.5f given", statsd.histogram_percentile); statsd.histogram_percentile = 95.0; @@ -2521,7 +2535,8 @@ void *statsd_main(void *ptr) { statsd.histogram_percentile_str = strdupz(buffer); } - statsd.dictionary_max_unique = config_get_number(CONFIG_SECTION_STATSD, "dictionaries max unique dimensions", statsd.dictionary_max_unique); + statsd.dictionary_max_unique = + config_get_number(CONFIG_SECTION_STATSD, "dictionaries max unique dimensions", statsd.dictionary_max_unique); if(config_get_boolean(CONFIG_SECTION_STATSD, "add dimension for number of events received", 0)) { statsd.gauges.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT; diff --git a/src/collectors/systemd-journal.plugin/systemd-journal.c b/src/collectors/systemd-journal.plugin/systemd-journal.c index 197bfd5ce1d770..68890732fd9ace 100644 --- a/src/collectors/systemd-journal.plugin/systemd-journal.c +++ b/src/collectors/systemd-journal.plugin/systemd-journal.c @@ -2224,6 +2224,7 @@ void function_systemd_journal(const char *transaction, char *function, usec_t *s int response; if(q.info) { + buffer_json_member_add_uint64(wb, "v", 3); facets_accepted_parameters_to_json_array(facets, wb, false); buffer_json_member_add_array(wb, "required_params"); { diff --git a/src/collectors/tc.plugin/plugin_tc.c b/src/collectors/tc.plugin/plugin_tc.c index 95b0e3b1c3f329..7102e216d55229 100644 --- a/src/collectors/tc.plugin/plugin_tc.c +++ b/src/collectors/tc.plugin/plugin_tc.c @@ -912,7 +912,7 @@ void *tc_main(void *ptr) { uint32_t first_hash; snprintfz(command, TC_LINE_MAX, "%s/tc-qos-helper.sh", netdata_configured_primary_plugins_dir); - char *tc_script = config_get("plugin:tc", "script to run to get tc values", command); + const char *tc_script = config_get("plugin:tc", "script to run to get tc values", command); while(service_running(SERVICE_COLLECTORS)) { struct tc_device *device = NULL; diff --git a/src/collectors/timex.plugin/plugin_timex.c b/src/collectors/timex.plugin/plugin_timex.c index 6e200c425190d7..284a69fd14751e 100644 --- a/src/collectors/timex.plugin/plugin_timex.c +++ b/src/collectors/timex.plugin/plugin_timex.c @@ -50,9 +50,11 @@ void *timex_main(void *ptr) worker_register("TIMEX"); worker_register_job_name(0, "clock check"); - int update_every = (int)config_get_number(CONFIG_SECTION_TIMEX, "update every", 10); - if (update_every < localhost->rrd_update_every) + int update_every = (int)config_get_duration_seconds(CONFIG_SECTION_TIMEX, "update every", 10); + if (update_every < localhost->rrd_update_every) { update_every = localhost->rrd_update_every; + config_set_duration_seconds(CONFIG_SECTION_TIMEX, "update every", update_every); + } int do_sync = config_get_boolean(CONFIG_SECTION_TIMEX, "clock synchronization state", CONFIG_BOOLEAN_YES); int do_offset = config_get_boolean(CONFIG_SECTION_TIMEX, "time offset", CONFIG_BOOLEAN_YES); diff --git a/src/daemon/commands.c b/src/daemon/commands.c index df083191f5891a..a2b208abeeadd9 100644 --- a/src/daemon/commands.c +++ b/src/daemon/commands.c @@ -261,9 +261,8 @@ static cmd_status_t cmd_read_config_execute(char *args, char **message) const char *conf_file = temp; /* "cloud" is cloud.conf, otherwise netdata.conf */ struct config *tmp_config = strcmp(conf_file, "cloud") ? &netdata_config : &cloud_config; - char *value = appconfig_get(tmp_config, temp + offset + 1, temp + offset2 + 1, NULL); - if (value == NULL) - { + const char *value = appconfig_get(tmp_config, temp + offset + 1, temp + offset2 + 1, NULL); + if (value == NULL) { netdata_log_error("Cannot execute read-config conf_file=%s section=%s / key=%s because no value set", conf_file, temp + offset + 1, @@ -271,13 +270,11 @@ static cmd_status_t cmd_read_config_execute(char *args, char **message) freez(temp); return CMD_STATUS_FAILURE; } - else - { + else { (*message) = strdupz(value); freez(temp); return CMD_STATUS_SUCCESS; } - } static cmd_status_t cmd_write_config_execute(char *args, char **message) diff --git a/src/daemon/common.c b/src/daemon/common.c index 6cc9c067cf1dea..aa7a3c581191be 100644 --- a/src/daemon/common.c +++ b/src/daemon/common.c @@ -2,21 +2,21 @@ #include "common.h" -char *netdata_configured_hostname = NULL; -char *netdata_configured_user_config_dir = CONFIG_DIR; -char *netdata_configured_stock_config_dir = LIBCONFIG_DIR; -char *netdata_configured_log_dir = LOG_DIR; -char *netdata_configured_primary_plugins_dir = PLUGINS_DIR; -char *netdata_configured_web_dir = WEB_DIR; -char *netdata_configured_cache_dir = CACHE_DIR; -char *netdata_configured_varlib_dir = VARLIB_DIR; -char *netdata_configured_lock_dir = VARLIB_DIR "/lock"; -char *netdata_configured_cloud_dir = VARLIB_DIR "/cloud.d"; -char *netdata_configured_home_dir = VARLIB_DIR; -char *netdata_configured_host_prefix = NULL; -char *netdata_configured_timezone = NULL; -char *netdata_configured_abbrev_timezone = NULL; -int32_t netdata_configured_utc_offset = 0; +const char *netdata_configured_hostname = NULL; +const char *netdata_configured_user_config_dir = CONFIG_DIR; +const char *netdata_configured_stock_config_dir = LIBCONFIG_DIR; +const char *netdata_configured_log_dir = LOG_DIR; +const char *netdata_configured_primary_plugins_dir = PLUGINS_DIR; +const char *netdata_configured_web_dir = WEB_DIR; +const char *netdata_configured_cache_dir = CACHE_DIR; +const char *netdata_configured_varlib_dir = VARLIB_DIR; +const char *netdata_configured_lock_dir = VARLIB_DIR "/lock"; +const char *netdata_configured_cloud_dir = VARLIB_DIR "/cloud.d"; +const char *netdata_configured_home_dir = VARLIB_DIR; +const char *netdata_configured_host_prefix = NULL; +const char *netdata_configured_timezone = NULL; +const char *netdata_configured_abbrev_timezone = NULL; +int32_t netdata_configured_utc_offset = 0; bool netdata_ready = false; diff --git a/src/daemon/common.h b/src/daemon/common.h index 732f55536f8f8a..cc2ea289e4d936 100644 --- a/src/daemon/common.h +++ b/src/daemon/common.h @@ -6,31 +6,6 @@ #include "libnetdata/libnetdata.h" #include "libuv_workers.h" -// ---------------------------------------------------------------------------- -// shortcuts for the default netdata configuration - -#define config_load(filename, overwrite_used, section) appconfig_load(&netdata_config, filename, overwrite_used, section) -#define config_get(section, name, default_value) appconfig_get(&netdata_config, section, name, default_value) -#define config_get_number(section, name, value) appconfig_get_number(&netdata_config, section, name, value) -#define config_get_float(section, name, value) appconfig_get_float(&netdata_config, section, name, value) -#define config_get_boolean(section, name, value) appconfig_get_boolean(&netdata_config, section, name, value) -#define config_get_boolean_ondemand(section, name, value) appconfig_get_boolean_ondemand(&netdata_config, section, name, value) -#define config_get_duration(section, name, value) appconfig_get_duration(&netdata_config, section, name, value) - -#define config_set(section, name, default_value) appconfig_set(&netdata_config, section, name, default_value) -#define config_set_default(section, name, value) appconfig_set_default(&netdata_config, section, name, value) -#define config_set_number(section, name, value) appconfig_set_number(&netdata_config, section, name, value) -#define config_set_float(section, name, value) appconfig_set_float(&netdata_config, section, name, value) -#define config_set_boolean(section, name, value) appconfig_set_boolean(&netdata_config, section, name, value) - -#define config_exists(section, name) appconfig_exists(&netdata_config, section, name) -#define config_move(section_old, name_old, section_new, name_new) appconfig_move(&netdata_config, section_old, name_old, section_new, name_new) - -#define netdata_conf_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed, true) - -#define config_section_destroy(section) appconfig_section_destroy_non_loaded(&netdata_config, section) -#define config_section_option_destroy(section, name) appconfig_section_option_destroy_non_loaded(&netdata_config, section, name) - // ---------------------------------------------------------------------------- // netdata include files @@ -96,20 +71,20 @@ #include "analytics.h" // global netdata daemon variables -extern char *netdata_configured_hostname; -extern char *netdata_configured_user_config_dir; -extern char *netdata_configured_stock_config_dir; -extern char *netdata_configured_log_dir; -extern char *netdata_configured_primary_plugins_dir; -extern char *netdata_configured_web_dir; -extern char *netdata_configured_cache_dir; -extern char *netdata_configured_varlib_dir; -extern char *netdata_configured_lock_dir; -extern char *netdata_configured_cloud_dir; -extern char *netdata_configured_home_dir; -extern char *netdata_configured_host_prefix; -extern char *netdata_configured_timezone; -extern char *netdata_configured_abbrev_timezone; +extern const char *netdata_configured_hostname; +extern const char *netdata_configured_user_config_dir; +extern const char *netdata_configured_stock_config_dir; +extern const char *netdata_configured_log_dir; +extern const char *netdata_configured_primary_plugins_dir; +extern const char *netdata_configured_web_dir; +extern const char *netdata_configured_cache_dir; +extern const char *netdata_configured_varlib_dir; +extern const char *netdata_configured_lock_dir; +extern const char *netdata_configured_cloud_dir; +extern const char *netdata_configured_home_dir; +extern const char *netdata_configured_host_prefix; +extern const char *netdata_configured_timezone; +extern const char *netdata_configured_abbrev_timezone; extern int32_t netdata_configured_utc_offset; extern int netdata_anonymous_statistics_enabled; diff --git a/src/daemon/config/README.md b/src/daemon/config/README.md index 3c0912fba5808e..d225d01fe01ae3 100644 --- a/src/daemon/config/README.md +++ b/src/daemon/config/README.md @@ -86,24 +86,22 @@ Please note that your data history will be lost if you have modified `history` p ### [db] section options -| setting | default | info | -|:---------------------------------------------:|:----------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| mode | `dbengine` | `dbengine`: The default for long-term metrics storage with efficient RAM and disk usage. Can be extended with `dbengine page cache size MB` and `dbengine disk space MB`.
`ram`: The round-robin database will be temporary and it will be lost when Netdata exits.
`alloc`: Similar to `ram`, but can significantly reduce memory usage, when combined with a low retention and does not support KSM.
`none`: Disables the database at this host, and disables health monitoring entirely, as that requires a database of metrics. Not to be used together with streaming. | -| retention | `3600` | Used with `mode = ram/alloc`, not the default `mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. Check [Memory Requirements](/src/database/README.md) for more information. | -| storage tiers | `3` | The number of storage tiers you want to have in your dbengine. Check the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). You can have up to 5 tiers of data (including the _Tier 0_). This number ranges between 1 and 5. | -| dbengine page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated to caching for _Tier 0_ Netdata metric values. | -| dbengine tier **`N`** page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated for caching Netdata metric values of the **`N`** tier.
`N belongs to [1..4]` | -| dbengine disk space MB | `256` | Determines the amount of disk space in MiB that is dedicated to storing _Tier 0_ Netdata metric values and all related metadata describing them. This option is available **only for legacy configuration** (`Agent v1.23.2 and prior`). | -| dbengine multihost disk space MB | `256` | Same functionality as `dbengine disk space MB`, but includes support for storing metrics streamed to a parent node by its children. Can be used in single-node environments as well. This setting is only for _Tier 0_ metrics. | -| dbengine tier **`N`** multihost disk space MB | `256` | Same functionality as `dbengine multihost disk space MB`, but stores metrics of the **`N`** tier (both parent node and its children). Can be used in single-node environments as well.
`N belongs to [1..4]` | -| update every | `1` | The frequency in seconds, for data collection. For more information see the [performance guide](/docs/netdata-agent/configuration/optimize-the-netdata-agents-performance.md). These metrics stored as _Tier 0_ data. Explore the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). | -| dbengine tier **`N`** update every iterations | `60` | The down sampling value of each tier from the previous one. For each Tier, the greater by one Tier has N (equal to 60 by default) less data points of any metric it collects. This setting can take values from `2` up to `255`.
`N belongs to [1..4]` | -| dbengine tier **`N`** back fill | `New` | Specifies the strategy of recreating missing data on each Tier from the exact lower Tier.
`New`: Sees the latest point on each Tier and save new points to it only if the exact lower Tier has available points for it's observation window (`dbengine tier N update every iterations` window).
`none`: No back filling is applied.
`N belongs to [1..4]` | -| memory deduplication (ksm) | `yes` | When set to `yes`, Netdata will offer its in-memory round robin database and the dbengine page cache to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](/src/database/README.md#ksm) | -| cleanup obsolete charts after secs | `3600` | See [monitoring ephemeral containers](/src/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions | -| gap when lost iterations above | `1` | | -| cleanup orphan hosts after secs | `3600` | How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data. | -| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. | +| setting | default | info | +|:---------------------------------------------:|:-------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| mode | `dbengine` | `dbengine`: The default for long-term metrics storage with efficient RAM and disk usage. Can be extended with `dbengine page cache size` and `dbengine tier X retention size`.
`ram`: The round-robin database will be temporary and it will be lost when Netdata exits.
`alloc`: Similar to `ram`, but can significantly reduce memory usage, when combined with a low retention and does not support KSM.
`none`: Disables the database at this host, and disables health monitoring entirely, as that requires a database of metrics. Not to be used together with streaming. | +| retention | `3600` | Used with `mode = ram/alloc`, not the default `mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. Check [Memory Requirements](/src/database/README.md) for more information. | +| storage tiers | `3` | The number of storage tiers you want to have in your dbengine. Check the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). You can have up to 5 tiers of data (including the _Tier 0_). This number ranges between 1 and 5. | +| dbengine page cache size | `32MiB` | Determines the amount of RAM in MiB that is dedicated to caching for _Tier 0_ Netdata metric values. | +| dbengine tier **`N`** retention size | `1GiB` | The disk space dedicated to metrics storage, per tier. Can be used in single-node environments as well.
`N belongs to [1..4]` | +| dbengine tier **`N`** retention time | `14d`, `3mo`, `1y`, `1y`, `1y` | The database retention, expressed in time. Can be used in single-node environments as well.
`N belongs to [1..4]` | +| update every | `1` | The frequency in seconds, for data collection. For more information see the [performance guide](/docs/netdata-agent/configuration/optimize-the-netdata-agents-performance.md). These metrics stored as _Tier 0_ data. Explore the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). | +| dbengine tier **`N`** update every iterations | `60` | The down sampling value of each tier from the previous one. For each Tier, the greater by one Tier has N (equal to 60 by default) less data points of any metric it collects. This setting can take values from `2` up to `255`.
`N belongs to [1..4]` | +| dbengine tier back fill | `new` | Specifies the strategy of recreating missing data on higher database Tiers.
`new`: Sees the latest point on each Tier and save new points to it only if the exact lower Tier has available points for it's observation window (`dbengine tier N update every iterations` window).
`none`: No back filling is applied.
`N belongs to [1..4]` | +| memory deduplication (ksm) | `yes` | When set to `yes`, Netdata will offer its in-memory round robin database and the dbengine page cache to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](/src/database/README.md#ksm) | +| cleanup obsolete charts after | `1h` | See [monitoring ephemeral containers](/src/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions | +| gap when lost iterations above | `1` | | +| cleanup orphan hosts after | `1h` | How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data. | +| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. | > ### Info > @@ -140,7 +138,7 @@ There are additional configuration options for the logs. For more info, see [Net | health | `journal` | The filename to save the log of Netdata health collectors. You can also set it to `syslog` to send the access log to syslog, or `off` to disable this log. Defaults to `Journal` if using systemd. | | daemon | `journal` | The filename to save the log of Netdata daemon. You can also set it to `syslog` to send the access log to syslog, or `off` to disable this log. Defaults to `Journal` if using systemd. | | facility | `daemon` | A facility keyword is used to specify the type of system that is logging the message. | -| logs flood protection period | `60` | Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`. | +| logs flood protection period | `1m` | Length of period during which the number of errors should not exceed the `errors to trigger flood protection`. | | logs to trigger flood protection | `1000` | Number of errors written to the log in `errors flood protection period` sec before flood protection is activated. | | level | `info` | Controls which log messages are logged, with error being the most important. Supported values: `info` and `error`. | @@ -172,15 +170,15 @@ monitoring](/src/health/README.md). [Alert notifications](/src/health/notifications/README.md) are configured in `health_alarm_notify.conf`. -| setting | default | info | -|:----------------------------------------------:|:------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| enabled | `yes` | Set to `no` to disable all alerts and notifications | -| in memory max health log entries | 1000 | Size of the alert history held in RAM | -| script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alert notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). | -| run at least every seconds | `10` | Controls how often all alert conditions should be evaluated. | -| postpone alarms during hibernation for seconds | `60` | Prevents false alerts. May need to be increased if you get alerts during hibernation. | -| health log history | `432000` | Specifies the history of alert events (in seconds) kept in the agent's sqlite database. | -| enabled alarms | * | Defines which alerts to load from both user and stock directories. This is a [simple pattern](/src/libnetdata/simple_pattern/README.md) list of alert or template names. Can be used to disable specific alerts. For example, `enabled alarms = !oom_kill *` will load all alerts except `oom_kill`. | +| setting | default | info | +|:--------------------------------------:|:------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| enabled | `yes` | Set to `no` to disable all alerts and notifications | +| in memory max health log entries | 1000 | Size of the alert history held in RAM | +| script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alert notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). | +| run at least every | `10s` | Controls how often all alert conditions should be evaluated. | +| postpone alarms during hibernation for | `1m` | Prevents false alerts. May need to be increased if you get alerts during hibernation. | +| health log retention | `5d` | Specifies the history of alert events (in seconds) kept in the agent's sqlite database. | +| enabled alarms | * | Defines which alerts to load from both user and stock directories. This is a [simple pattern](/src/libnetdata/simple_pattern/README.md) list of alert or template names. Can be used to disable specific alerts. For example, `enabled alarms = !oom_kill *` will load all alerts except `oom_kill`. | ### [web] section options diff --git a/src/daemon/daemon.c b/src/daemon/daemon.c index b97f687fcbdea7..da9b01d7230b78 100644 --- a/src/daemon/daemon.c +++ b/src/daemon/daemon.c @@ -58,7 +58,7 @@ static void change_dir_ownership(const char *dir, uid_t uid, gid_t gid, bool rec fix_directory_file_permissions(dir, uid, gid, recursive); } -static void clean_directory(char *dirname) +static void clean_directory(const char *dirname) { DIR *dir = opendir(dirname); if(!dir) return; @@ -188,7 +188,7 @@ static void oom_score_adj(void) { } // check the environment - char *s = getenv("OOMScoreAdjust"); + const char *s = getenv("OOMScoreAdjust"); if(!s || !*s) { snprintfz(buf, sizeof(buf) - 1, "%d", (int)wanted_score); s = buf; diff --git a/src/daemon/environment.c b/src/daemon/environment.c index d5f82a0c3c4a7a..2822278d3d22b2 100644 --- a/src/daemon/environment.c +++ b/src/daemon/environment.c @@ -69,7 +69,7 @@ void set_environment_for_plugins_and_scripts(void) { buffer_free(user_plugins_dirs); } - char *default_port = appconfig_get(&netdata_config, CONFIG_SECTION_WEB, "default port", NULL); + const char *default_port = appconfig_get(&netdata_config, CONFIG_SECTION_WEB, "default port", NULL); int clean = 0; if (!default_port) { default_port = strdupz("19999"); @@ -78,7 +78,7 @@ void set_environment_for_plugins_and_scripts(void) { nd_setenv("NETDATA_LISTEN_PORT", default_port, 1); if (clean) - freez(default_port); + freez((char *)default_port); // set the path we need char path[4096], *p = getenv("PATH"); diff --git a/src/daemon/global_statistics.c b/src/daemon/global_statistics.c index eaf1927236d41f..e860232edb9649 100644 --- a/src/daemon/global_statistics.c +++ b/src/daemon/global_statistics.c @@ -4221,9 +4221,11 @@ void *global_statistics_main(void *ptr) global_statistics_register_workers(); int update_every = - (int)config_get_number(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", localhost->rrd_update_every); - if (update_every < localhost->rrd_update_every) + (int)config_get_duration_seconds(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", localhost->rrd_update_every); + if (update_every < localhost->rrd_update_every) { update_every = localhost->rrd_update_every; + config_set_duration_seconds(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", update_every); + } usec_t step = update_every * USEC_PER_SEC; heartbeat_t hb; @@ -4277,9 +4279,11 @@ void *global_statistics_extended_main(void *ptr) global_statistics_register_workers(); int update_every = - (int)config_get_number(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", localhost->rrd_update_every); - if (update_every < localhost->rrd_update_every) + (int)config_get_duration_seconds(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", localhost->rrd_update_every); + if (update_every < localhost->rrd_update_every) { update_every = localhost->rrd_update_every; + config_set_duration_seconds(CONFIG_SECTION_GLOBAL_STATISTICS, "update every", update_every); + } usec_t step = update_every * USEC_PER_SEC; heartbeat_t hb; diff --git a/src/daemon/main.c b/src/daemon/main.c index 7af3533afa8fdf..9b3758ca466962 100644 --- a/src/daemon/main.c +++ b/src/daemon/main.c @@ -28,18 +28,7 @@ bool ieee754_doubles = false; time_t netdata_start_time = 0; struct netdata_static_thread *static_threads; -struct config netdata_config = { - .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { - .avl_tree = { - .root = NULL, - .compar = appconfig_section_compare - }, - .rwlock = AVL_LOCK_INITIALIZER - } -}; +struct config netdata_config = APPCONFIG_INITIALIZER; typedef struct service_thread { pid_t tid; @@ -527,12 +516,12 @@ void web_server_threading_selection(void) { int make_dns_decision(const char *section_name, const char *config_name, const char *default_value, SIMPLE_PATTERN *p) { - char *value = config_get(section_name,config_name,default_value); + const char *value = config_get(section_name,config_name,default_value); if(!strcmp("yes",value)) return 1; if(!strcmp("no",value)) return 0; - if(strcmp("heuristic",value)) + if(strcmp("heuristic",value) != 0) netdata_log_error("Invalid configuration option '%s' for '%s'/'%s'. Valid options are 'yes', 'no' and 'heuristic'. Proceeding with 'heuristic'", value, section_name, config_name); @@ -542,11 +531,13 @@ int make_dns_decision(const char *section_name, const char *config_name, const c void web_server_config_options(void) { web_client_timeout = - (int)config_get_number(CONFIG_SECTION_WEB, "disconnect idle clients after seconds", web_client_timeout); + (int)config_get_duration_seconds(CONFIG_SECTION_WEB, "disconnect idle clients after", web_client_timeout); + web_client_first_request_timeout = - (int)config_get_number(CONFIG_SECTION_WEB, "timeout for first request", web_client_first_request_timeout); + (int)config_get_duration_seconds(CONFIG_SECTION_WEB, "timeout for first request", web_client_first_request_timeout); + web_client_streaming_rate_t = - config_get_number(CONFIG_SECTION_WEB, "accept a streaming request every seconds", web_client_streaming_rate_t); + config_get_duration_seconds(CONFIG_SECTION_WEB, "accept a streaming request every", web_client_streaming_rate_t); respect_web_browser_do_not_track_policy = config_get_boolean(CONFIG_SECTION_WEB, "respect do not track policy", respect_web_browser_do_not_track_policy); @@ -595,7 +586,7 @@ void web_server_config_options(void) web_enable_gzip = config_get_boolean(CONFIG_SECTION_WEB, "enable gzip compression", web_enable_gzip); - char *s = config_get(CONFIG_SECTION_WEB, "gzip compression strategy", "default"); + const char *s = config_get(CONFIG_SECTION_WEB, "gzip compression strategy", "default"); if(!strcmp(s, "default")) web_gzip_strategy = Z_DEFAULT_STRATEGY; else if(!strcmp(s, "filtered")) @@ -842,7 +833,7 @@ static void log_init(void) { time_t period = ND_LOG_DEFAULT_THROTTLE_PERIOD; size_t logs = ND_LOG_DEFAULT_THROTTLE_LOGS; - period = config_get_number(CONFIG_SECTION_LOGS, "logs flood protection period", period); + period = config_get_duration_seconds(CONFIG_SECTION_LOGS, "logs flood protection period", period); logs = (unsigned long)config_get_number(CONFIG_SECTION_LOGS, "logs to trigger flood protection", (long long int)logs); nd_log_set_flood_protection(logs, period); @@ -886,7 +877,7 @@ static void log_init(void) { aclk_config_get_query_scope(); } -static char *get_varlib_subdir_from_config(const char *prefix, const char *dir) { +static const char *get_varlib_subdir_from_config(const char *prefix, const char *dir) { char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s/%s", prefix, dir); return config_get(CONFIG_SECTION_DIRECTORIES, dir, filename); @@ -894,6 +885,7 @@ static char *get_varlib_subdir_from_config(const char *prefix, const char *dir) static void backwards_compatible_config() { // move [global] options to the [web] section + config_move(CONFIG_SECTION_GLOBAL, "http port listen backlog", CONFIG_SECTION_WEB, "listen backlog"); @@ -997,7 +989,10 @@ static void backwards_compatible_config() { CONFIG_SECTION_PLUGINS, "statsd"); config_move(CONFIG_SECTION_GLOBAL, "memory mode", - CONFIG_SECTION_DB, "mode"); + CONFIG_SECTION_DB, "db"); + + config_move(CONFIG_SECTION_DB, "mode", + CONFIG_SECTION_DB, "db"); config_move(CONFIG_SECTION_GLOBAL, "history", CONFIG_SECTION_DB, "retention"); @@ -1006,7 +1001,13 @@ static void backwards_compatible_config() { CONFIG_SECTION_DB, "update every"); config_move(CONFIG_SECTION_GLOBAL, "page cache size", - CONFIG_SECTION_DB, "dbengine page cache size MB"); + CONFIG_SECTION_DB, "dbengine page cache size"); + + config_move(CONFIG_SECTION_DB, "dbengine page cache size MB", + CONFIG_SECTION_DB, "dbengine page cache size"); + + config_move(CONFIG_SECTION_DB, "dbengine extent cache size MB", + CONFIG_SECTION_DB, "dbengine extent cache size"); config_move(CONFIG_SECTION_DB, "page cache size", CONFIG_SECTION_DB, "dbengine page cache size MB"); @@ -1017,30 +1018,6 @@ static void backwards_compatible_config() { config_move(CONFIG_SECTION_DB, "page cache with malloc", CONFIG_SECTION_DB, "dbengine page cache with malloc"); - config_move(CONFIG_SECTION_GLOBAL, "dbengine disk space", - CONFIG_SECTION_DB, "dbengine disk space MB"); - - config_move(CONFIG_SECTION_GLOBAL, "dbengine multihost disk space", - CONFIG_SECTION_DB, "dbengine multihost disk space MB"); - - config_move(CONFIG_SECTION_DB, "dbengine disk space MB", - CONFIG_SECTION_DB, "dbengine multihost disk space MB"); - - config_move(CONFIG_SECTION_DB, "dbengine multihost disk space MB", - CONFIG_SECTION_DB, "dbengine tier 0 disk space MB"); - - config_move(CONFIG_SECTION_DB, "dbengine tier 1 multihost disk space MB", - CONFIG_SECTION_DB, "dbengine tier 1 disk space MB"); - - config_move(CONFIG_SECTION_DB, "dbengine tier 2 multihost disk space MB", - CONFIG_SECTION_DB, "dbengine tier 2 disk space MB"); - - config_move(CONFIG_SECTION_DB, "dbengine tier 3 multihost disk space MB", - CONFIG_SECTION_DB, "dbengine tier 3 disk space MB"); - - config_move(CONFIG_SECTION_DB, "dbengine tier 4 multihost disk space MB", - CONFIG_SECTION_DB, "dbengine tier 4 disk space MB"); - config_move(CONFIG_SECTION_GLOBAL, "memory deduplication (ksm)", CONFIG_SECTION_DB, "memory deduplication (ksm)"); @@ -1054,17 +1031,67 @@ static void backwards_compatible_config() { CONFIG_SECTION_DB, "dbengine pages per extent"); config_move(CONFIG_SECTION_GLOBAL, "cleanup obsolete charts after seconds", - CONFIG_SECTION_DB, "cleanup obsolete charts after secs"); + CONFIG_SECTION_DB, "cleanup obsolete charts after"); + + config_move(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", + CONFIG_SECTION_DB, "cleanup obsolete charts after"); config_move(CONFIG_SECTION_GLOBAL, "gap when lost iterations above", CONFIG_SECTION_DB, "gap when lost iterations above"); config_move(CONFIG_SECTION_GLOBAL, "cleanup orphan hosts after seconds", - CONFIG_SECTION_DB, "cleanup orphan hosts after secs"); + CONFIG_SECTION_DB, "cleanup orphan hosts after"); + + config_move(CONFIG_SECTION_DB, "cleanup orphan hosts after secs", + CONFIG_SECTION_DB, "cleanup orphan hosts after"); + + config_move(CONFIG_SECTION_DB, "cleanup ephemeral hosts after secs", + CONFIG_SECTION_DB, "cleanup ephemeral hosts after"); + + config_move(CONFIG_SECTION_DB, "seconds to replicate", + CONFIG_SECTION_DB, "replication period"); + + config_move(CONFIG_SECTION_DB, "seconds per replication step", + CONFIG_SECTION_DB, "replication step"); config_move(CONFIG_SECTION_GLOBAL, "enable zero metrics", CONFIG_SECTION_DB, "enable zero metrics"); + // ---------------------------------------------------------------------------------------------------------------- + + config_move(CONFIG_SECTION_GLOBAL, "dbengine disk space", + CONFIG_SECTION_DB, "dbengine tier 0 retention size"); + + config_move(CONFIG_SECTION_GLOBAL, "dbengine multihost disk space", + CONFIG_SECTION_DB, "dbengine tier 0 retention size"); + + config_move(CONFIG_SECTION_DB, "dbengine disk space MB", + CONFIG_SECTION_DB, "dbengine tier 0 retention size"); + + for(size_t tier = 0; tier < RRD_STORAGE_TIERS ;tier++) { + char old_config[128], new_config[128]; + + snprintfz(old_config, sizeof(old_config), "dbengine tier %zu retention days", tier); + snprintfz(new_config, sizeof(new_config), "dbengine tier %zu retention time", tier); + config_move(CONFIG_SECTION_DB, old_config, + CONFIG_SECTION_DB, new_config); + + if(tier == 0) + snprintfz(old_config, sizeof(old_config), "dbengine multihost disk space MB"); + else + snprintfz(old_config, sizeof(old_config), "dbengine tier %zu multihost disk space MB", tier); + snprintfz(new_config, sizeof(new_config), "dbengine tier %zu retention size", tier); + config_move(CONFIG_SECTION_DB, old_config, + CONFIG_SECTION_DB, new_config); + + snprintfz(old_config, sizeof(old_config), "dbengine tier %zu disk space MB", tier); + snprintfz(new_config, sizeof(new_config), "dbengine tier %zu retention size", tier); + config_move(CONFIG_SECTION_DB, old_config, + CONFIG_SECTION_DB, new_config); + } + + // ---------------------------------------------------------------------------------------------------------------- + config_move(CONFIG_SECTION_LOGS, "error", CONFIG_SECTION_LOGS, "daemon"); @@ -1076,11 +1103,42 @@ static void backwards_compatible_config() { config_move(CONFIG_SECTION_LOGS, "errors flood protection period", CONFIG_SECTION_LOGS, "logs flood protection period"); + config_move(CONFIG_SECTION_HEALTH, "is ephemeral", CONFIG_SECTION_GLOBAL, "is ephemeral node"); config_move(CONFIG_SECTION_HEALTH, "has unstable connection", CONFIG_SECTION_GLOBAL, "has unstable connection"); + + config_move(CONFIG_SECTION_HEALTH, "run at least every seconds", + CONFIG_SECTION_HEALTH, "run at least every"); + + config_move(CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for seconds", + CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for"); + + config_move(CONFIG_SECTION_HEALTH, "health log history", + CONFIG_SECTION_HEALTH, "health log retention"); + + config_move(CONFIG_SECTION_REGISTRY, "registry expire idle persons days", + CONFIG_SECTION_REGISTRY, "registry expire idle persons"); + + config_move(CONFIG_SECTION_WEB, "disconnect idle clients after seconds", + CONFIG_SECTION_WEB, "disconnect idle clients after"); + + config_move(CONFIG_SECTION_WEB, "accept a streaming request every seconds", + CONFIG_SECTION_WEB, "accept a streaming request every"); + + config_move(CONFIG_SECTION_STATSD, "set charts as obsolete after secs", + CONFIG_SECTION_STATSD, "set charts as obsolete after"); + + config_move(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds", + CONFIG_SECTION_STATSD, "disconnect idle tcp clients after"); + + config_move("plugin:idlejitter", "loop time in ms", + "plugin:idlejitter", "loop time"); + + config_move("plugin:proc:/sys/class/infiniband", "refresh ports state every seconds", + "plugin:proc:/sys/class/infiniband", "refresh ports state every"); } static int get_hostname(char *buf, size_t buf_size) { @@ -1125,22 +1183,22 @@ static void get_netdata_configured_variables() // ------------------------------------------------------------------------ // get default database update frequency - default_rrd_update_every = (int) config_get_number(CONFIG_SECTION_DB, "update every", UPDATE_EVERY); + default_rrd_update_every = (int) config_get_duration_seconds(CONFIG_SECTION_DB, "update every", UPDATE_EVERY); if(default_rrd_update_every < 1 || default_rrd_update_every > 600) { netdata_log_error("Invalid data collection frequency (update every) %d given. Defaulting to %d.", default_rrd_update_every, UPDATE_EVERY); default_rrd_update_every = UPDATE_EVERY; - config_set_number(CONFIG_SECTION_DB, "update every", default_rrd_update_every); + config_set_duration_seconds(CONFIG_SECTION_DB, "update every", default_rrd_update_every); } // ------------------------------------------------------------------------ - // get default memory mode for the database + // get the database selection { - const char *mode = config_get(CONFIG_SECTION_DB, "mode", rrd_memory_mode_name(default_rrd_memory_mode)); + const char *mode = config_get(CONFIG_SECTION_DB, "db", rrd_memory_mode_name(default_rrd_memory_mode)); default_rrd_memory_mode = rrd_memory_mode_id(mode); if(strcmp(mode, rrd_memory_mode_name(default_rrd_memory_mode)) != 0) { netdata_log_error("Invalid memory mode '%s' given. Using '%s'", mode, rrd_memory_mode_name(default_rrd_memory_mode)); - config_set(CONFIG_SECTION_DB, "mode", rrd_memory_mode_name(default_rrd_memory_mode)); + config_set(CONFIG_SECTION_DB, "db", rrd_memory_mode_name(default_rrd_memory_mode)); } } @@ -1194,17 +1252,19 @@ static void get_netdata_configured_variables() // ------------------------------------------------------------------------ // get default Database Engine page cache size in MiB - default_rrdeng_page_cache_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine page cache size MB", default_rrdeng_page_cache_mb); - default_rrdeng_extent_cache_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine extent cache size MB", default_rrdeng_extent_cache_mb); + default_rrdeng_page_cache_mb = (int) config_get_size_mb(CONFIG_SECTION_DB, "dbengine page cache size", default_rrdeng_page_cache_mb); + default_rrdeng_extent_cache_mb = (int) config_get_size_mb(CONFIG_SECTION_DB, "dbengine extent cache size", default_rrdeng_extent_cache_mb); db_engine_journal_check = config_get_boolean(CONFIG_SECTION_DB, "dbengine enable journal integrity check", CONFIG_BOOLEAN_NO); - if(default_rrdeng_extent_cache_mb < 0) + if(default_rrdeng_extent_cache_mb < 0) { default_rrdeng_extent_cache_mb = 0; + config_set_size_mb(CONFIG_SECTION_DB, "dbengine extent cache size", default_rrdeng_extent_cache_mb); + } if(default_rrdeng_page_cache_mb < RRDENG_MIN_PAGE_CACHE_SIZE_MB) { netdata_log_error("Invalid page cache size %d given. Defaulting to %d.", default_rrdeng_page_cache_mb, RRDENG_MIN_PAGE_CACHE_SIZE_MB); default_rrdeng_page_cache_mb = RRDENG_MIN_PAGE_CACHE_SIZE_MB; - config_set_number(CONFIG_SECTION_DB, "dbengine page cache size MB", default_rrdeng_page_cache_mb); + config_set_size_mb(CONFIG_SECTION_DB, "dbengine page cache size", default_rrdeng_page_cache_mb); } // ------------------------------------------------------------------------ @@ -1237,28 +1297,24 @@ static void get_netdata_configured_variables() // get KSM settings #ifdef MADV_MERGEABLE - enable_ksm = config_get_boolean(CONFIG_SECTION_DB, "memory deduplication (ksm)", enable_ksm); + enable_ksm = config_get_boolean_ondemand(CONFIG_SECTION_DB, "memory deduplication (ksm)", enable_ksm); #endif // -------------------------------------------------------------------- - // metric correlations - enable_metric_correlations = config_get_boolean(CONFIG_SECTION_GLOBAL, "enable metric correlations", enable_metric_correlations); - default_metric_correlations_method = weights_string_to_method(config_get( - CONFIG_SECTION_GLOBAL, "metric correlations method", - weights_method_to_string(default_metric_correlations_method))); + rrdhost_free_ephemeral_time_s = + config_get_duration_seconds(CONFIG_SECTION_DB, "cleanup ephemeral hosts after", rrdhost_free_ephemeral_time_s); - // -------------------------------------------------------------------- + rrdset_free_obsolete_time_s = + config_get_duration_seconds(CONFIG_SECTION_DB, "cleanup obsolete charts after", rrdset_free_obsolete_time_s); - rrdset_free_obsolete_time_s = config_get_number(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", rrdset_free_obsolete_time_s); - rrdhost_free_ephemeral_time_s = config_get_number(CONFIG_SECTION_DB, "cleanup ephemeral hosts after secs", rrdhost_free_ephemeral_time_s); // Current chart locking and invalidation scheme doesn't prevent Netdata from segmentation faults if a short // cleanup delay is set. Extensive stress tests showed that 10 seconds is quite a safe delay. Look at // https://github.com/netdata/netdata/pull/11222#issuecomment-868367920 for more information. if (rrdset_free_obsolete_time_s < 10) { rrdset_free_obsolete_time_s = 10; - netdata_log_info("The \"cleanup obsolete charts after seconds\" option was set to 10 seconds."); - config_set_number(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", rrdset_free_obsolete_time_s); + netdata_log_info("The \"cleanup obsolete charts after\" option was set to 10 seconds."); + config_set_duration_seconds(CONFIG_SECTION_DB, "cleanup obsolete charts after", rrdset_free_obsolete_time_s); } gap_when_lost_iterations_above = (int)config_get_number(CONFIG_SECTION_DB, "gap when lost iterations above", gap_when_lost_iterations_above); @@ -1278,7 +1334,7 @@ static void get_netdata_configured_variables() } -static void post_conf_load(char **user) +static void post_conf_load(const char **user) { // -------------------------------------------------------------------- // get the user we should run @@ -1293,7 +1349,7 @@ static void post_conf_load(char **user) } } -static bool load_netdata_conf(char *filename, char overwrite_used, char **user) { +static bool load_netdata_conf(char *filename, char overwrite_used, const char **user) { errno_clear(); int ret = 0; @@ -1406,7 +1462,7 @@ bool netdata_random_session_id_generate(void); int windows_perflib_dump(const char *key); #endif -int unittest_prepare_rrd(char **user) { +int unittest_prepare_rrd(const char **user) { post_conf_load(user); get_netdata_configured_variables(); default_rrd_update_every = 1; @@ -1437,7 +1493,7 @@ int netdata_main(int argc, char **argv) { int config_loaded = 0; bool close_open_fds = true; size_t default_stacksize; - char *user = NULL; + const char *user = NULL; #ifdef OS_WINDOWS int dont_fork = 1; @@ -1787,7 +1843,7 @@ int netdata_main(int argc, char **argv) { // so the caller can use -c netdata.conf before or // after this parameter to prevent or allow overwriting // variables at netdata.conf - config_set_default(section, key, value); + config_set_default_raw_value(section, key, value); // fprintf(stderr, "SET section '%s', key '%s', value '%s'\n", section, key, value); } @@ -1820,7 +1876,7 @@ int netdata_main(int argc, char **argv) { // so the caller can use -c netdata.conf before or // after this parameter to prevent or allow overwriting // variables at netdata.conf - appconfig_set_default(tmp_config, section, key, value); + appconfig_set_default_raw_value(tmp_config, section, key, value); // fprintf(stderr, "SET section '%s', key '%s', value '%s'\n", section, key, value); } @@ -1922,7 +1978,7 @@ int netdata_main(int argc, char **argv) { // ------------------------------------------------------------------------ // initialize netdata { - char *pmax = config_get(CONFIG_SECTION_GLOBAL, "glibc malloc arena max for plugins", "1"); + const char *pmax = config_get(CONFIG_SECTION_GLOBAL, "glibc malloc arena max for plugins", "1"); if(pmax && *pmax) setenv("MALLOC_ARENA_MAX", pmax, 1); @@ -1979,7 +2035,7 @@ int netdata_main(int argc, char **argv) { // -------------------------------------------------------------------- // get the debugging flags from the configuration file - char *flags = config_get(CONFIG_SECTION_LOGS, "debug flags", "0x0000000000000000"); + const char *flags = config_get(CONFIG_SECTION_LOGS, "debug flags", "0x0000000000000000"); nd_setenv("NETDATA_DEBUG_FLAGS", flags, 1); debug_flags = strtoull(flags, NULL, 0); @@ -2008,8 +2064,6 @@ int netdata_main(int argc, char **argv) { check_local_streaming_capabilities(); - aral_judy_init(); - get_system_timezone(); replication_initialize(); @@ -2159,7 +2213,7 @@ int netdata_main(int argc, char **argv) { delta_startup_time("initialize threads after fork"); - netdata_threads_init_after_fork((size_t)config_get_number(CONFIG_SECTION_GLOBAL, "pthread stack size", (long)default_stacksize)); + netdata_threads_init_after_fork((size_t)config_get_size_bytes(CONFIG_SECTION_GLOBAL, "pthread stack size", default_stacksize)); // initialize internal registry delta_startup_time("initialize registry"); diff --git a/src/daemon/win_system-info.c b/src/daemon/win_system-info.c index 2d67862fb41de2..c394a52909e374 100644 --- a/src/daemon/win_system-info.c +++ b/src/daemon/win_system-info.c @@ -108,10 +108,11 @@ static void netdata_windows_get_mem(struct rrdhost_system_info *systemInfo) { ULONGLONG size; char memSize[256]; + // The amount of physically installed RAM, in kilobytes. if (!GetPhysicallyInstalledSystemMemory(&size)) size = 0; else - (void)snprintf(memSize, 255, "%llu", size); + (void)snprintf(memSize, 255, "%llu", size * 1024); // to bytes (void)rrdhost_set_system_info_variable(systemInfo, "NETDATA_SYSTEM_TOTAL_RAM", diff --git a/src/database/contexts/api_v2_contexts_agents.c b/src/database/contexts/api_v2_contexts_agents.c index a8d4fbfda75843..5e75440df689eb 100644 --- a/src/database/contexts/api_v2_contexts_agents.c +++ b/src/database/contexts/api_v2_contexts_agents.c @@ -5,30 +5,6 @@ void build_info_to_json_object(BUFFER *b); -static void convert_seconds_to_dhms(time_t seconds, char *result, int result_size) { - int days, hours, minutes; - - days = (int) (seconds / (24 * 3600)); - seconds = (int) (seconds % (24 * 3600)); - hours = (int) (seconds / 3600); - seconds %= 3600; - minutes = (int) (seconds / 60); - seconds %= 60; - - // Format the result into the provided string buffer - BUFFER *buf = buffer_create(128, NULL); - if (days) - buffer_sprintf(buf,"%d day%s%s", days, days==1 ? "" : "s", hours || minutes ? ", " : ""); - if (hours) - buffer_sprintf(buf,"%d hour%s%s", hours, hours==1 ? "" : "s", minutes ? ", " : ""); - if (minutes) - buffer_sprintf(buf,"%d minute%s%s", minutes, minutes==1 ? "" : "s", seconds ? ", " : ""); - if (seconds) - buffer_sprintf(buf,"%d second%s", (int) seconds, seconds==1 ? "" : "s"); - strncpyz(result, buffer_tostring(buf), result_size); - buffer_free(buf); -} - void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now_s, bool info, bool array) { if(!now_s) now_s = now_realtime_sec(); @@ -117,8 +93,8 @@ void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now buffer_json_add_array_item_object(wb); buffer_json_member_add_uint64(wb, "tier", tier); char human_retention[128]; - convert_seconds_to_dhms((time_t) group_seconds, human_retention, sizeof(human_retention) - 1); - buffer_json_member_add_string(wb, "point_every", human_retention); + duration_snprintf_time_t(human_retention, sizeof(human_retention), (stime_t)group_seconds); + buffer_json_member_add_string(wb, "granularity", human_retention); buffer_json_member_add_uint64(wb, "metrics", storage_engine_metrics(eng->seb, localhost->db[tier].si)); buffer_json_member_add_uint64(wb, "samples", storage_engine_samples(eng->seb, localhost->db[tier].si)); @@ -136,7 +112,9 @@ void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now buffer_json_member_add_time_t(wb, "to", now_s); buffer_json_member_add_time_t(wb, "retention", retention); - convert_seconds_to_dhms(retention, human_retention, sizeof(human_retention) - 1); + duration_snprintf_hours(human_retention, sizeof(human_retention), + (int)duration_round_to_resolution(retention, 3600)); + buffer_json_member_add_string(wb, "retention_human", human_retention); if(used || max) { // we have disk space information @@ -148,12 +126,16 @@ void buffer_json_agents_v2(BUFFER *wb, struct query_timings *timings, time_t now time_t actual_retention = MIN(space_retention, time_retention ? time_retention : space_retention); if (time_retention) { - convert_seconds_to_dhms(time_retention, human_retention, sizeof(human_retention) - 1); + duration_snprintf_hours(human_retention, sizeof(human_retention), + (int)duration_round_to_resolution(time_retention, 3600)); + buffer_json_member_add_time_t(wb, "requested_retention", time_retention); buffer_json_member_add_string(wb, "requested_retention_human", human_retention); } - convert_seconds_to_dhms(actual_retention, human_retention, sizeof(human_retention) - 1); + duration_snprintf_hours(human_retention, sizeof(human_retention), + (int)duration_round_to_resolution(actual_retention, 3600)); + buffer_json_member_add_time_t(wb, "expected_retention", actual_retention); buffer_json_member_add_string(wb, "expected_retention_human", human_retention); } diff --git a/src/database/contexts/api_v2_contexts_alerts.c b/src/database/contexts/api_v2_contexts_alerts.c index b73dfa1f2f6779..ea7f977bb78bd6 100644 --- a/src/database/contexts/api_v2_contexts_alerts.c +++ b/src/database/contexts/api_v2_contexts_alerts.c @@ -527,7 +527,7 @@ static void rrdcontext_v2_set_transition_filter(const char *machine_guid, const bool rrdcontexts_v2_init_alert_dictionaries(struct rrdcontext_to_json_v2_data *ctl, struct api_v2_contexts_request *req) { if(req->alerts.transition) { ctl->options |= CONTEXTS_OPTION_ALERTS_WITH_INSTANCES | CONTEXTS_OPTION_ALERTS_WITH_VALUES; - if(!sql_find_alert_transition(req->alerts.transition, rrdcontext_v2_set_transition_filter, &ctl)) + if(!sql_find_alert_transition(req->alerts.transition, rrdcontext_v2_set_transition_filter, ctl)) return false; } @@ -536,9 +536,9 @@ bool rrdcontexts_v2_init_alert_dictionaries(struct rrdcontext_to_json_v2_data *c NULL, sizeof(struct alert_v2_entry)); - dictionary_register_insert_callback(ctl->alerts.summary, alerts_v2_insert_callback, &ctl); - dictionary_register_conflict_callback(ctl->alerts.summary, alerts_v2_conflict_callback, &ctl); - dictionary_register_delete_callback(ctl->alerts.summary, alerts_v2_delete_callback, &ctl); + dictionary_register_insert_callback(ctl->alerts.summary, alerts_v2_insert_callback, ctl); + dictionary_register_conflict_callback(ctl->alerts.summary, alerts_v2_conflict_callback, ctl); + dictionary_register_delete_callback(ctl->alerts.summary, alerts_v2_delete_callback, ctl); ctl->alerts.by_type = dictionary_create_advanced( DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, @@ -585,9 +585,9 @@ bool rrdcontexts_v2_init_alert_dictionaries(struct rrdcontext_to_json_v2_data *c DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct sql_alert_instance_v2_entry)); - dictionary_register_insert_callback(ctl->alerts.alert_instances, alert_instances_v2_insert_callback, &ctl); - dictionary_register_conflict_callback(ctl->alerts.alert_instances, alert_instances_v2_conflict_callback, &ctl); - dictionary_register_delete_callback(ctl->alerts.alert_instances, alert_instances_delete_callback, &ctl); + dictionary_register_insert_callback(ctl->alerts.alert_instances, alert_instances_v2_insert_callback, ctl); + dictionary_register_conflict_callback(ctl->alerts.alert_instances, alert_instances_v2_conflict_callback, ctl); + dictionary_register_delete_callback(ctl->alerts.alert_instances, alert_instances_delete_callback, ctl); } return true; diff --git a/src/database/rrd.h b/src/database/rrd.h index 608a88ef6ee557..f930fcfc4a16b1 100644 --- a/src/database/rrd.h +++ b/src/database/rrd.h @@ -1096,7 +1096,7 @@ typedef struct alarm_log { uint32_t next_alarm_id; unsigned int count; unsigned int max; - uint32_t health_log_history; // the health log history in seconds to be kept in db + uint32_t health_log_retention_s; // the health log retention in seconds to be kept in db ALARM_ENTRY *alarms; RW_SPINLOCK spinlock; } ALARM_LOG; @@ -1384,7 +1384,7 @@ void rrddim_index_destroy(RRDSET *st); extern time_t rrdhost_free_orphan_time_s; extern time_t rrdhost_free_ephemeral_time_s; -int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unittest); +int rrd_init(const char *hostname, struct rrdhost_system_info *system_info, bool unittest); RRDHOST *rrdhost_find_by_hostname(const char *hostname); RRDHOST *rrdhost_find_by_guid(const char *guid); @@ -1405,9 +1405,9 @@ RRDHOST *rrdhost_find_or_create( RRD_MEMORY_MODE mode, unsigned int health_enabled, unsigned int rrdpush_enabled, - char *rrdpush_destination, - char *rrdpush_api_key, - char *rrdpush_send_charts_matching, + const char *rrdpush_destination, + const char *rrdpush_api_key, + const char *rrdpush_send_charts_matching, bool rrdpush_enable_replication, time_t rrdpush_seconds_to_replicate, time_t rrdpush_replication_step, diff --git a/src/database/rrdhost.c b/src/database/rrdhost.c index 376db65bfef286..00eed82c0d9c70 100644 --- a/src/database/rrdhost.c +++ b/src/database/rrdhost.c @@ -232,10 +232,10 @@ void set_host_properties(RRDHOST *host, int update_every, RRD_MEMORY_MODE memory // RRDHOST - add a host static void rrdhost_initialize_rrdpush_sender(RRDHOST *host, - unsigned int rrdpush_enabled, - char *rrdpush_destination, - char *rrdpush_api_key, - char *rrdpush_send_charts_matching + unsigned int rrdpush_enabled, + const char *rrdpush_destination, + const char *rrdpush_api_key, + const char *rrdpush_send_charts_matching ) { if(rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_INITIALIZED)) return; @@ -341,9 +341,9 @@ static RRDHOST *rrdhost_create( RRD_MEMORY_MODE memory_mode, unsigned int health_enabled, unsigned int rrdpush_enabled, - char *rrdpush_destination, - char *rrdpush_api_key, - char *rrdpush_send_charts_matching, + const char *rrdpush_destination, + const char *rrdpush_api_key, + const char *rrdpush_send_charts_matching, bool rrdpush_enable_replication, time_t rrdpush_seconds_to_replicate, time_t rrdpush_replication_step, @@ -474,7 +474,7 @@ static RRDHOST *rrdhost_create( if (is_localhost && host->system_info) { host->system_info->ml_capable = ml_capable(); host->system_info->ml_enabled = ml_enabled(host); - host->system_info->mc_version = enable_metric_correlations ? metric_correlations_version : 0; + host->system_info->mc_version = metric_correlations_version; } // ------------------------------------------------------------------------ @@ -566,9 +566,9 @@ static void rrdhost_update(RRDHOST *host , RRD_MEMORY_MODE mode , unsigned int health_enabled , unsigned int rrdpush_enabled - , char *rrdpush_destination - , char *rrdpush_api_key - , char *rrdpush_send_charts_matching + , const char *rrdpush_destination + , const char *rrdpush_api_key + , const char *rrdpush_send_charts_matching , bool rrdpush_enable_replication , time_t rrdpush_seconds_to_replicate , time_t rrdpush_replication_step @@ -706,9 +706,9 @@ RRDHOST *rrdhost_find_or_create( , RRD_MEMORY_MODE mode , unsigned int health_enabled , unsigned int rrdpush_enabled - , char *rrdpush_destination - , char *rrdpush_api_key - , char *rrdpush_send_charts_matching + , const char *rrdpush_destination + , const char *rrdpush_api_key + , const char *rrdpush_send_charts_matching , bool rrdpush_enable_replication , time_t rrdpush_seconds_to_replicate , time_t rrdpush_replication_step @@ -862,7 +862,7 @@ RRD_BACKFILL get_dbengine_backfill(RRD_BACKFILL backfill) #endif -void dbengine_init(char *hostname) { +static void dbengine_init(const char *hostname) { #ifdef ENABLE_DBENGINE use_direct_io = config_get_boolean(CONFIG_SECTION_DB, "dbengine use direct io", use_direct_io); @@ -900,10 +900,10 @@ void dbengine_init(char *hostname) { !config_exists(CONFIG_SECTION_DB, "dbengine tier 2 update every iterations") && !config_exists(CONFIG_SECTION_DB, "dbengine tier 3 update every iterations") && !config_exists(CONFIG_SECTION_DB, "dbengine tier 4 update every iterations") && - !config_exists(CONFIG_SECTION_DB, "dbengine tier 1 disk space MB") && - !config_exists(CONFIG_SECTION_DB, "dbengine tier 2 disk space MB") && - !config_exists(CONFIG_SECTION_DB, "dbengine tier 3 disk space MB") && - !config_exists(CONFIG_SECTION_DB, "dbengine tier 4 disk space MB")); + !config_exists(CONFIG_SECTION_DB, "dbengine tier 1 retention size") && + !config_exists(CONFIG_SECTION_DB, "dbengine tier 2 retention size") && + !config_exists(CONFIG_SECTION_DB, "dbengine tier 3 retention size") && + !config_exists(CONFIG_SECTION_DB, "dbengine tier 4 retention size")); default_backfill = get_dbengine_backfill(RRD_BACKFILL_NEW); char dbengineconfig[200 + 1]; @@ -925,11 +925,11 @@ void dbengine_init(char *hostname) { storage_tiers_grouping_iterations[tier] = grouping_iterations; } - default_multidb_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine tier 0 disk space MB", RRDENG_DEFAULT_TIER_DISK_SPACE_MB); + default_multidb_disk_quota_mb = (int) config_get_size_mb(CONFIG_SECTION_DB, "dbengine tier 0 retention size", RRDENG_DEFAULT_TIER_DISK_SPACE_MB); if(default_multidb_disk_quota_mb && default_multidb_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB) { netdata_log_error("Invalid disk space %d for tier 0 given. Defaulting to %d.", default_multidb_disk_quota_mb, RRDENG_MIN_DISK_SPACE_MB); default_multidb_disk_quota_mb = RRDENG_MIN_DISK_SPACE_MB; - config_set_number(CONFIG_SECTION_DB, "dbengine tier 0 disk space MB", default_multidb_disk_quota_mb); + config_set_size_mb(CONFIG_SECTION_DB, "dbengine tier 0 retention size", default_multidb_disk_quota_mb); } #ifdef OS_WINDOWS @@ -959,11 +959,11 @@ void dbengine_init(char *hostname) { } int disk_space_mb = tier ? RRDENG_DEFAULT_TIER_DISK_SPACE_MB : default_multidb_disk_quota_mb; - snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu disk space MB", tier); - disk_space_mb = config_get_number(CONFIG_SECTION_DB, dbengineconfig, disk_space_mb); + snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu retention size", tier); + disk_space_mb = config_get_size_mb(CONFIG_SECTION_DB, dbengineconfig, disk_space_mb); - snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu retention days", tier); - storage_tiers_retention_days[tier] = config_get_number( + snprintfz(dbengineconfig, sizeof(dbengineconfig) - 1, "dbengine tier %zu retention time", tier); + storage_tiers_retention_days[tier] = config_get_duration_days( CONFIG_SECTION_DB, dbengineconfig, new_dbengine_defaults ? storage_tiers_retention_days[tier] : 0); tiers_init[tier].disk_space_mb = (int) disk_space_mb; @@ -1026,7 +1026,7 @@ void dbengine_init(char *hostname) { void api_v1_management_init(void); -int rrd_init(char *hostname, struct rrdhost_system_info *system_info, bool unittest) { +int rrd_init(const char *hostname, struct rrdhost_system_info *system_info, bool unittest) { rrdhost_init(); if (unlikely(sql_init_meta_database(DB_CHECK_NONE, system_info ? 0 : 1))) { @@ -1445,6 +1445,11 @@ void rrdhost_set_is_parent_label(void) { } } +static bool config_label_cb(void *data __maybe_unused, const char *name, const char *value) { + rrdlabels_add(localhost->rrdlabels, name, value, RRDLABEL_SRC_CONFIG); + return true; +} + static void rrdhost_load_config_labels(void) { int status = config_load(NULL, 1, CONFIG_SECTION_HOST_LABEL); if(!status) { @@ -1454,16 +1459,7 @@ static void rrdhost_load_config_labels(void) { filename); } - struct section *co = appconfig_get_section(&netdata_config, CONFIG_SECTION_HOST_LABEL); - if(co) { - config_section_wrlock(co); - struct config_option *cv; - for(cv = co->values; cv ; cv = cv->next) { - rrdlabels_add(localhost->rrdlabels, cv->name, cv->value, RRDLABEL_SRC_CONFIG); - cv->flags |= CONFIG_VALUE_USED; - } - config_section_unlock(co); - } + appconfig_foreach_value_in_section(&netdata_config, CONFIG_SECTION_HOST_LABEL, config_label_cb, NULL); } static void rrdhost_load_kubernetes_labels(void) { diff --git a/src/database/sqlite/sqlite_aclk_alert.c b/src/database/sqlite/sqlite_aclk_alert.c index 605b411937bd77..803f6396c1013d 100644 --- a/src/database/sqlite/sqlite_aclk_alert.c +++ b/src/database/sqlite/sqlite_aclk_alert.c @@ -483,10 +483,9 @@ static void aclk_push_alert_event(RRDHOST *host __maybe_unused) SQLITE_FINALIZE(res); } -#define SQL_DELETE_PROCESSED_ROWS \ - "DELETE FROM alert_queue WHERE host_id = @host_id AND rowid between @row1 AND @row2" +#define SQL_DELETE_PROCESSED_ROWS "DELETE FROM alert_queue WHERE host_id = @host_id AND rowid = @row" -static void delete_alert_from_pending_queue(RRDHOST *host, int64_t row1, int64_t row2) +static void delete_alert_from_pending_queue(RRDHOST *host, int64_t row) { static __thread sqlite3_stmt *res = NULL; @@ -495,8 +494,7 @@ static void delete_alert_from_pending_queue(RRDHOST *host, int64_t row1, int64_t int param = 0; SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); - SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, row1)); - SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, row2)); + SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, row)); param = 0; int rc = sqlite3_step_monitored(res); @@ -566,8 +564,6 @@ bool process_alert_pending_queue(RRDHOST *host) SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); param = 0; - int64_t start_row = 0; - int64_t end_row = 0; while (sqlite3_step_monitored(res) == SQLITE_ROW) { int64_t health_log_id = sqlite3_column_int64(res, 0); @@ -581,16 +577,11 @@ bool process_alert_pending_queue(RRDHOST *host) added++; } - if (!start_row) - start_row = row; - end_row = row; + delete_alert_from_pending_queue(host, row); count++; } - if (start_row) - delete_alert_from_pending_queue(host, start_row, end_row); - if(count) nd_log(NDLS_ACCESS, NDLP_NOTICE, "ACLK STA [%s (N/A)]: Processed %d entries, queued %d", rrdhost_hostname(host), count, added); done: diff --git a/src/database/sqlite/sqlite_health.c b/src/database/sqlite/sqlite_health.c index bde8ba1d52c356..3a800d384253b1 100644 --- a/src/database/sqlite/sqlite_health.c +++ b/src/database/sqlite/sqlite_health.c @@ -311,7 +311,7 @@ void sql_health_alarm_log_cleanup(RRDHOST *host) int param = 0; SQLITE_BIND_FAIL(done, sqlite3_bind_blob(res, ++param, &host->host_id.uuid, sizeof(host->host_id.uuid), SQLITE_STATIC)); - SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, (sqlite3_int64)host->health_log.health_log_history)); + SQLITE_BIND_FAIL(done, sqlite3_bind_int64(res, ++param, (sqlite3_int64)host->health_log.health_log_retention_s)); param = 0; rc = sqlite3_step_monitored(res); @@ -430,10 +430,8 @@ static void sql_inject_removed_status( int64_t health_log_id = sqlite3_column_int64(res, 0); RRDCALC_STATUS old_status = (RRDCALC_STATUS)sqlite3_column_double(res, 1); insert_alert_queue( - host, health_log_id, (int64_t)unique_id, (int64_t)alarm_id, old_status, RRDCALC_STATUS_REMOVED); + host, health_log_id, (int64_t)max_unique_id, (int64_t)alarm_id, old_status, RRDCALC_STATUS_REMOVED); } - //else - // error_report("HEALTH [N/A]: Failed to execute SQL_INJECT_REMOVED, rc = %d", rc); done: REPORT_BIND_FAIL(res, param); diff --git a/src/exporting/read_config.c b/src/exporting/read_config.c index 875e62cf4840c9..13fe1088367189 100644 --- a/src/exporting/read_config.c +++ b/src/exporting/read_config.c @@ -5,11 +5,7 @@ EXPORTING_OPTIONS global_exporting_options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES; const char *global_exporting_prefix = "netdata"; -struct config exporting_config = { .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, - .rwlock = AVL_LOCK_INITIALIZER } }; +struct config exporting_config = APPCONFIG_INITIALIZER; struct instance *prometheus_exporter_instance = NULL; @@ -32,7 +28,7 @@ static _CONNECTOR_INSTANCE *find_instance(const char *section) return local_ci; } -char *expconfig_get(struct config *root, const char *section, const char *name, const char *default_value) +static const char *expconfig_get(struct config *root, const char *section, const char *name, const char *default_value) { _CONNECTOR_INSTANCE *local_ci; @@ -243,7 +239,7 @@ struct engine *read_exporting_config() prometheus_exporter_instance->config.options |= global_exporting_options & EXPORTING_OPTIONS_SOURCE_BITS; - char *data_source = prometheus_config_get("data source", "average"); + const char *data_source = prometheus_config_get("data source", "average"); prometheus_exporter_instance->config.options = exporting_parse_data_source(data_source, prometheus_exporter_instance->config.options); @@ -378,7 +374,7 @@ struct engine *read_exporting_config() tmp_instance->config.hosts_pattern = simple_pattern_create( exporter_get(instance_name, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT, true); - char *data_source = exporter_get(instance_name, "data source", "average"); + const char *data_source = exporter_get(instance_name, "data source", "average"); tmp_instance->config.options = exporting_parse_data_source(data_source, tmp_instance->config.options); if (EXPORTING_OPTIONS_DATA_SOURCE(tmp_instance->config.options) != EXPORTING_SOURCE_DATA_AS_COLLECTED && diff --git a/src/go/go.mod b/src/go/go.mod index 25153fc610e141..517bd9081a8ed6 100644 --- a/src/go/go.mod +++ b/src/go/go.mod @@ -6,7 +6,7 @@ replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0. require ( github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/Masterminds/sprig/v3 v3.2.3 + github.com/Masterminds/sprig/v3 v3.3.0 github.com/Wing924/ltsv v0.3.1 github.com/apparentlymart/go-cidr v1.1.0 github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de @@ -16,7 +16,7 @@ require ( github.com/clbanning/rfile/v2 v2.0.0-20231024120205-ac3fca974b0e github.com/cloudflare/cfssl v1.6.5 github.com/coreos/go-systemd/v22 v22.5.0 - github.com/docker/docker v27.1.2+incompatible + github.com/docker/docker v27.2.0+incompatible github.com/facebook/time v0.0.0-20240626113945-18207c5d8ddc github.com/fsnotify/fsnotify v1.7.0 github.com/go-redis/redis/v8 v8.11.5 @@ -30,15 +30,15 @@ require ( github.com/jackc/pgx/v5 v5.6.0 github.com/jessevdk/go-flags v1.6.1 github.com/kanocz/fcgi_client v0.0.0-20210113082628-fff85c8adfb7 - github.com/likexian/whois v1.15.4 - github.com/likexian/whois-parser v1.24.19 + github.com/likexian/whois v1.15.5 + github.com/likexian/whois-parser v1.24.20 github.com/lmittmann/tint v1.0.5 github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-xmlrpc v0.0.3 github.com/miekg/dns v1.1.62 github.com/mitchellh/go-homedir v1.1.0 github.com/prometheus-community/pro-bing v0.4.1 - github.com/prometheus/common v0.55.0 + github.com/prometheus/common v0.58.0 github.com/prometheus/prometheus v2.5.0+incompatible github.com/stretchr/testify v1.9.0 github.com/tidwall/gjson v1.17.3 @@ -46,7 +46,7 @@ require ( github.com/vmware/govmomi v0.42.0 go.mongodb.org/mongo-driver v1.16.1 golang.org/x/net v0.28.0 - golang.org/x/text v0.17.0 + golang.org/x/text v0.18.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20220504211119-3d4a969bb56b gopkg.in/ini.v1 v1.67.0 gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.2 @@ -58,12 +58,13 @@ require ( ) require ( + dario.cat/mergo v1.0.1 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect @@ -88,7 +89,7 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect - github.com/huandu/xstrings v1.3.3 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.14.3 // indirect @@ -107,8 +108,8 @@ require ( github.com/mdlayher/genetlink v1.3.2 // indirect github.com/mdlayher/netlink v1.7.2 // indirect github.com/mdlayher/socket v0.4.1 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect - github.com/mitchellh/reflectwalk v1.0.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -120,9 +121,9 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/shopspring/decimal v1.2.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect @@ -138,7 +139,7 @@ require ( golang.org/x/crypto v0.26.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.18.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.23.0 // indirect golang.org/x/term v0.23.0 // indirect diff --git a/src/go/go.sum b/src/go/go.sum index c572aa7c4340df..5e5b6241ca5141 100644 --- a/src/go/go.sum +++ b/src/go/go.sum @@ -1,3 +1,5 @@ +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= @@ -8,10 +10,10 @@ github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Wing924/ltsv v0.3.1 h1:hbjzQ6YuS/sOm7nQJG7ddT9ua1yYmcH25Q8lsuiQE0A= @@ -32,8 +34,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/clbanning/rfile/v2 v2.0.0-20231024120205-ac3fca974b0e h1:Iw4JdD/TlCUvlVWIjuV1M98rGNo/C+NxM6U5ghStom4= github.com/clbanning/rfile/v2 v2.0.0-20231024120205-ac3fca974b0e/go.mod h1:Y53jAgtl30vLWEnRWkZFT+CpwLNsrQJb0F5AwHieNGs= github.com/cloudflare/cfssl v1.6.5 h1:46zpNkm6dlNkMZH/wMW22ejih6gIaJbzL2du6vD7ZeI= @@ -58,8 +60,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v27.1.2+incompatible h1:AhGzR1xaQIy53qCkxARaFluI00WPGtXn0AJuoQsVYTY= -github.com/docker/docker v27.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4= +github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -70,6 +72,8 @@ github.com/facebook/time v0.0.0-20240626113945-18207c5d8ddc h1:0VQsg5ZXW9MPUxzem github.com/facebook/time v0.0.0-20240626113945-18207c5d8ddc/go.mod h1:2UFAomOuD2vAK1x68czUtCVjAqmyWCEnAXOlmGqf+G0= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= @@ -127,7 +131,6 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gosnmp/gosnmp v1.38.0 h1:I5ZOMR8kb0DXAFg/88ACurnuwGwYkXWq3eLpJPHMEYc= @@ -140,11 +143,10 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp4 github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ilyam8/hashstructure v1.1.0 h1:N8t8hzzKLf2Da87XgC/DBYqXUmSbclgx+2cZxS5/klU= github.com/ilyam8/hashstructure v1.1.0/go.mod h1:LoLuwBSNpZOi3eTMfAqe2i4oW9QkI08e6g1Pci9h7hs= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= @@ -232,10 +234,10 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/likexian/gokit v0.25.15 h1:QjospM1eXhdMMHwZRpMKKAHY/Wig9wgcREmLtf9NslY= github.com/likexian/gokit v0.25.15/go.mod h1:S2QisdsxLEHWeD/XI0QMVeggp+jbxYqUxMvSBil7MRg= -github.com/likexian/whois v1.15.4 h1:r5En62c+S9HKFgJtdh2WsdmRGTcxE4WUtGBdZkSBXmM= -github.com/likexian/whois v1.15.4/go.mod h1:rXFTPcQdNlPQBJCQpPWTSIDGzzmgKBftmhdOOcLpwXk= -github.com/likexian/whois-parser v1.24.19 h1:vT8lWhnV8ogkdaYLyef6IvE5VTHVCwlUDG5BUXCx06k= -github.com/likexian/whois-parser v1.24.19/go.mod h1:rAtaofg2luol09H+ogDzGIfcG8ig1NtM5R16uQADDz4= +github.com/likexian/whois v1.15.5 h1:gpPxyCTJtLtJDmakHCo//0ZjK/ocI01GCAd/WBJ2oH8= +github.com/likexian/whois v1.15.5/go.mod h1:4b6o1QTCfjwrB5I3KeNQnn79QtuPUTsewsE+ys94I78= +github.com/likexian/whois-parser v1.24.20 h1:oxEkRi0GxgqWQRLDMJpXU1EhgWmLmkqEFZ2ChXTeQLE= +github.com/likexian/whois-parser v1.24.20/go.mod h1:rAtaofg2luol09H+ogDzGIfcG8ig1NtM5R16uQADDz4= github.com/lmittmann/tint v1.0.5 h1:NQclAutOfYsqs2F1Lenue6OoWCajs5wJcP3DfWVpePw= github.com/lmittmann/tint v1.0.5/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -260,13 +262,12 @@ github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= @@ -311,8 +312,8 @@ github.com/prometheus-community/pro-bing v0.4.1 h1:aMaJwyifHZO0y+h8+icUz0xbToHbi github.com/prometheus-community/pro-bing v0.4.1/go.mod h1:aLsw+zqCaDoa2RLVVSX3+UiCkBBXTMtZC3c7EkfWnAE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.58.0 h1:N+N8vY4/23r6iYfD3UQZUoJPnUYAo7v6LG5XZxjZTXo= +github.com/prometheus/common v0.58.0/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/prometheus v0.50.1 h1:N2L+DYrxqPh4WZStU+o1p/gQlBaqFbcLBTjlp3vpdXw= github.com/prometheus/prometheus v0.50.1/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -325,15 +326,16 @@ github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThC github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -418,7 +420,6 @@ golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= @@ -442,11 +443,10 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -476,14 +476,12 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -493,9 +491,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -560,7 +557,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/src/go/plugin/go.d/README.md b/src/go/plugin/go.d/README.md index 9824aa17628420..0770b557df93d0 100644 --- a/src/go/plugin/go.d/README.md +++ b/src/go/plugin/go.d/README.md @@ -151,6 +151,7 @@ see the appropriate collector readme. | [vcsa](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/vcsa) | vCenter Server Appliance | | [vernemq](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/vernemq) | VerneMQ | | [vsphere](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/vsphere) | VMware vCenter Server | +| [w1sensor](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/w1sensor) | 1-Wire Sensors | | [web_log](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/weblog) | Apache/NGINX | | [wireguard](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/wireguard) | WireGuard | | [whoisquery](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/whoisquery) | Domain Expiry | diff --git a/src/go/plugin/go.d/config/go.d.conf b/src/go/plugin/go.d/config/go.d.conf index 7af1c101f7b43d..d2949c399831e4 100644 --- a/src/go/plugin/go.d/config/go.d.conf +++ b/src/go/plugin/go.d/config/go.d.conf @@ -116,6 +116,7 @@ modules: # vernemq: yes # vcsa: yes # vsphere: yes +# w1sensor: yes # web_log: yes # wireguard: yes # whoisquery: yes diff --git a/src/go/plugin/go.d/config/go.d/w1sensor.conf b/src/go/plugin/go.d/config/go.d/w1sensor.conf new file mode 100644 index 00000000000000..005f58058e5d9d --- /dev/null +++ b/src/go/plugin/go.d/config/go.d/w1sensor.conf @@ -0,0 +1,6 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/w1sensor#readme + +jobs: + - name: w1sensor + sensors_path: /sys/bus/w1/devices diff --git a/src/go/plugin/go.d/modules/init.go b/src/go/plugin/go.d/modules/init.go index 47c63b560fc7cf..fdc0040176d6fa 100644 --- a/src/go/plugin/go.d/modules/init.go +++ b/src/go/plugin/go.d/modules/init.go @@ -108,6 +108,7 @@ import ( _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vcsa" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vernemq" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere" + _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/w1sensor" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/weblog" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/whoisquery" _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/windows" diff --git a/src/go/plugin/go.d/modules/sensors/charts.go b/src/go/plugin/go.d/modules/sensors/charts.go index b298e096c05300..7836b872c05682 100644 --- a/src/go/plugin/go.d/modules/sensors/charts.go +++ b/src/go/plugin/go.d/modules/sensors/charts.go @@ -121,8 +121,8 @@ var sensorIntrusionChartTmpl = module.Chart{ Type: module.Line, Priority: prioSensorIntrusion, Dims: module.Dims{ - {ID: "sensor_chip_%s_feature_%s_subfeature_%s_alarm_off", Name: "alarm_off"}, - {ID: "sensor_chip_%s_feature_%s_subfeature_%s_alarm_on", Name: "alarm_on"}, + {ID: "sensor_chip_%s_feature_%s_subfeature_%s_clear", Name: "alarm_clear"}, + {ID: "sensor_chip_%s_feature_%s_subfeature_%s_triggered", Name: "alarm_triggered"}, }, } @@ -144,6 +144,8 @@ func (s *Sensors) addExecSensorChart(sn execSensor) { chart = sensorCurrentChartTmpl.Copy() case sensorTypeEnergy: chart = sensorEnergyChartTmpl.Copy() + case sensorTypeIntrusion: + chart = sensorIntrusionChartTmpl.Copy() default: return } diff --git a/src/go/plugin/go.d/modules/sensors/collect_exec.go b/src/go/plugin/go.d/modules/sensors/collect_exec.go index 4fbbb7faf83eb5..203b83cf6c6a46 100644 --- a/src/go/plugin/go.d/modules/sensors/collect_exec.go +++ b/src/go/plugin/go.d/modules/sensors/collect_exec.go @@ -12,13 +12,14 @@ import ( ) const ( - sensorTypeTemp = "temperature" - sensorTypeVoltage = "voltage" - sensorTypePower = "power" - sensorTypeHumidity = "humidity" - sensorTypeFan = "fan" - sensorTypeCurrent = "current" - sensorTypeEnergy = "energy" + sensorTypeTemp = "temperature" + sensorTypeVoltage = "voltage" + sensorTypePower = "power" + sensorTypeHumidity = "humidity" + sensorTypeFan = "fan" + sensorTypeCurrent = "current" + sensorTypeEnergy = "energy" + sensorTypeIntrusion = "intrusion" ) type execSensor struct { @@ -36,6 +37,8 @@ func (s *execSensor) sensorType() string { switch { case strings.HasPrefix(s.subfeature, "temp"): return sensorTypeTemp + case strings.HasPrefix(s.subfeature, "intrusion"): + return sensorTypeIntrusion case strings.HasPrefix(s.subfeature, "in"): return sensorTypeVoltage case strings.HasPrefix(s.subfeature, "power"): @@ -96,9 +99,18 @@ func (s *Sensors) collectExec() (map[string]int64, error) { seen := make(map[string]bool) for _, sn := range sensors { - sx := "_input" - if sn.sensorType() == sensorTypePower { + var sx string + + switch sn.sensorType() { + case "": + s.Debugf("can not find type for sensor '%s'", sn) + continue + case sensorTypePower: sx = "_average" + case sensorTypeIntrusion: + sx = "_alarm" + default: + sx = "_input" } if !strings.HasSuffix(sn.subfeature, sx) { @@ -112,11 +124,6 @@ func (s *Sensors) collectExec() (map[string]int64, error) { continue } - if sn.sensorType() == "" { - s.Debugf("can not find type for sensor '%s'", sn) - continue - } - if minVal, maxVal, ok := sn.limits(); ok && (v < minVal || v > maxVal) { s.Debugf("value outside limits [%d/%d] for sensor '%s'", int64(minVal), int64(maxVal), sn) continue @@ -132,7 +139,12 @@ func (s *Sensors) collectExec() (map[string]int64, error) { seen[key] = true - mx[key] = int64(v * precision) + if sn.sensorType() == sensorTypeIntrusion { + mx[key+"_triggered"] = boolToInt(v != 0) + mx[key+"_clear"] = boolToInt(v == 0) + } else { + mx[key] = int64(v * precision) + } } for k := range s.sensors { diff --git a/src/go/plugin/go.d/modules/sensors/collect_sysfs.go b/src/go/plugin/go.d/modules/sensors/collect_sysfs.go index bb2921fd3be71f..39e2fe6baf10e7 100644 --- a/src/go/plugin/go.d/modules/sensors/collect_sysfs.go +++ b/src/go/plugin/go.d/modules/sensors/collect_sysfs.go @@ -50,8 +50,8 @@ func (s *Sensors) collectSysfs() (map[string]int64, error) { mx[key] = int64(v.Input * precision) case *lmsensors.IntrusionSensor: key = snakeCase(fmt.Sprintf("sensor_chip_%s_feature_%s_subfeature_%s_alarm", dev.Name, firstNotEmpty(v.Label, v.Name), v.Name)) - mx[key+"_on"] = boolToInt(v.Alarm) - mx[key+"_off"] = boolToInt(!v.Alarm) + mx[key+"_triggered"] = boolToInt(v.Alarm) + mx[key+"_clear"] = boolToInt(!v.Alarm) default: s.Debugf("unexpected sensor type: %T", v) continue diff --git a/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md b/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md index a41c6bb4cf55b4..075b4da4b75814 100644 --- a/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md +++ b/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md @@ -85,6 +85,7 @@ Metrics: | sensors.sensor_fan_speed | fan | RPM | | sensors.sensor_energy | energy | Joules | | sensors.sensor_humidity | humidity | percent | +| sensors.sensor_intrusion | alarm_clear, alarm_triggered | status | diff --git a/src/go/plugin/go.d/modules/sensors/metadata.yaml b/src/go/plugin/go.d/modules/sensors/metadata.yaml index f104c3e8786ecc..aa8c18b14440e9 100644 --- a/src/go/plugin/go.d/modules/sensors/metadata.yaml +++ b/src/go/plugin/go.d/modules/sensors/metadata.yaml @@ -157,3 +157,10 @@ modules: chart_type: area dimensions: - name: humidity + - name: sensors.sensor_intrusion + description: Sensor intrusion + unit: status + chart_type: line + dimensions: + - name: alarm_clear + - name: alarm_triggered diff --git a/src/go/plugin/go.d/modules/sensors/sensors_test.go b/src/go/plugin/go.d/modules/sensors/sensors_test.go index 54437b0aa9c926..af49941e1537ef 100644 --- a/src/go/plugin/go.d/modules/sensors/sensors_test.go +++ b/src/go/plugin/go.d/modules/sensors/sensors_test.go @@ -18,8 +18,7 @@ var ( dataConfigJSON, _ = os.ReadFile("testdata/config.json") dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") - dataSensorsTemp, _ = os.ReadFile("testdata/sensors-temp.txt") - dataSensorsTempInCurrPowerFan, _ = os.ReadFile("testdata/sensors-temp-in-curr-power-fan.txt") + dataSensors, _ = os.ReadFile("testdata/sensors.txt") ) func Test_testDataIsValid(t *testing.T) { @@ -27,8 +26,7 @@ func Test_testDataIsValid(t *testing.T) { "dataConfigJSON": dataConfigJSON, "dataConfigYAML": dataConfigYAML, - "dataSensorsTemp": dataSensorsTemp, - "dataSensorsTempInCurrPowerFan": dataSensorsTempInCurrPowerFan, + "dataSensors": dataSensors, } { require.NotNil(t, data, name) @@ -84,7 +82,7 @@ func TestSensors_Cleanup(t *testing.T) { "after check": { prepare: func() *Sensors { sensors := New() - sensors.exec = prepareMockExecOkOnlyTemp() + sensors.exec = prepareMockExecOk() _ = sensors.Check() return sensors }, @@ -92,7 +90,7 @@ func TestSensors_Cleanup(t *testing.T) { "after collect": { prepare: func() *Sensors { sensors := New() - sensors.exec = prepareMockExecOkTempInCurrPowerFan() + sensors.exec = prepareMockExecOk() _ = sensors.Collect() return sensors }, @@ -119,11 +117,7 @@ func TestSensors_Check(t *testing.T) { }{ "exec: only temperature": { wantFail: false, - prepareMock: prepareMockExecOkOnlyTemp, - }, - "exec: temperature and voltage": { - wantFail: false, - prepareMock: prepareMockExecOkTempInCurrPowerFan, + prepareMock: prepareMockExecOk, }, "exec: error on sensors info call": { wantFail: true, @@ -161,39 +155,9 @@ func TestSensors_Collect(t *testing.T) { wantMetrics map[string]int64 wantCharts int }{ - "exec: only temperature": { - prepareExecMock: prepareMockExecOkOnlyTemp, - wantCharts: 24, - wantMetrics: map[string]int64{ - "sensor_chip_bnxt_en-pci-6200_feature_temp1_subfeature_temp1_input": 80000, - "sensor_chip_bnxt_en-pci-6201_feature_temp1_subfeature_temp1_input": 81000, - "sensor_chip_k10temp-pci-00c3_feature_tccd1_subfeature_temp3_input": 58250, - "sensor_chip_k10temp-pci-00c3_feature_tccd2_subfeature_temp4_input": 60250, - "sensor_chip_k10temp-pci-00c3_feature_tccd3_subfeature_temp5_input": 57000, - "sensor_chip_k10temp-pci-00c3_feature_tccd4_subfeature_temp6_input": 57250, - "sensor_chip_k10temp-pci-00c3_feature_tccd5_subfeature_temp7_input": 57750, - "sensor_chip_k10temp-pci-00c3_feature_tccd6_subfeature_temp8_input": 59500, - "sensor_chip_k10temp-pci-00c3_feature_tccd7_subfeature_temp9_input": 58500, - "sensor_chip_k10temp-pci-00c3_feature_tccd8_subfeature_temp10_input": 61250, - "sensor_chip_k10temp-pci-00c3_feature_tctl_subfeature_temp1_input": 62000, - "sensor_chip_k10temp-pci-00cb_feature_tccd1_subfeature_temp3_input": 54000, - "sensor_chip_k10temp-pci-00cb_feature_tccd2_subfeature_temp4_input": 55500, - "sensor_chip_k10temp-pci-00cb_feature_tccd3_subfeature_temp5_input": 56000, - "sensor_chip_k10temp-pci-00cb_feature_tccd4_subfeature_temp6_input": 52750, - "sensor_chip_k10temp-pci-00cb_feature_tccd5_subfeature_temp7_input": 53500, - "sensor_chip_k10temp-pci-00cb_feature_tccd6_subfeature_temp8_input": 55250, - "sensor_chip_k10temp-pci-00cb_feature_tccd7_subfeature_temp9_input": 53000, - "sensor_chip_k10temp-pci-00cb_feature_tccd8_subfeature_temp10_input": 53750, - "sensor_chip_k10temp-pci-00cb_feature_tctl_subfeature_temp1_input": 57500, - "sensor_chip_nouveau-pci-4100_feature_temp1_subfeature_temp1_input": 51000, - "sensor_chip_nvme-pci-0100_feature_composite_subfeature_temp1_input": 39850, - "sensor_chip_nvme-pci-6100_feature_composite_subfeature_temp1_input": 48850, - "sensor_chip_nvme-pci-8100_feature_composite_subfeature_temp1_input": 39850, - }, - }, "exec: multiple sensors": { - prepareExecMock: prepareMockExecOkTempInCurrPowerFan, - wantCharts: 20, + prepareExecMock: prepareMockExecOk, + wantCharts: 22, wantMetrics: map[string]int64{ "sensor_chip_acpitz-acpi-0_feature_temp1_subfeature_temp1_input": 88000, "sensor_chip_amdgpu-pci-0300_feature_edge_subfeature_temp1_input": 53000, @@ -210,6 +174,10 @@ func TestSensors_Collect(t *testing.T) { "sensor_chip_asus-isa-0000_feature_gpu_fan_subfeature_fan2_input": 6600000, "sensor_chip_bat0-acpi-0_feature_in0_subfeature_in0_input": 17365, "sensor_chip_k10temp-pci-00c3_feature_tctl_subfeature_temp1_input": 90000, + "sensor_chip_nct6779-isa-0290_feature_intrusion0_subfeature_intrusion0_alarm_clear": 0, + "sensor_chip_nct6779-isa-0290_feature_intrusion0_subfeature_intrusion0_alarm_triggered": 1, + "sensor_chip_nct6779-isa-0290_feature_intrusion1_subfeature_intrusion1_alarm_clear": 0, + "sensor_chip_nct6779-isa-0290_feature_intrusion1_subfeature_intrusion1_alarm_triggered": 1, "sensor_chip_nvme-pci-0600_feature_composite_subfeature_temp1_input": 33850, "sensor_chip_nvme-pci-0600_feature_sensor_1_subfeature_temp2_input": 48850, "sensor_chip_nvme-pci-0600_feature_sensor_2_subfeature_temp3_input": 33850, @@ -232,7 +200,7 @@ func TestSensors_Collect(t *testing.T) { "sysfs: multiple sensors": { prepareSysfsMock: prepareMockSysfsScannerOk, - wantCharts: 20, + wantCharts: 21, wantMetrics: map[string]int64{ "sensor_chip_acpitz-acpi-0_feature_temp1_subfeature_temp1_input": 88000, "sensor_chip_amdgpu-pci-0300_feature_edge_subfeature_temp1_input": 53000, @@ -247,6 +215,8 @@ func TestSensors_Collect(t *testing.T) { "sensor_chip_amdgpu-pci-6700_feature_vddnb_subfeature_in1_input": 973, "sensor_chip_asus-isa-0000_feature_cpu_fan_subfeature_fan1_input": 5700000, "sensor_chip_asus-isa-0000_feature_gpu_fan_subfeature_fan2_input": 6600000, + "sensor_chip_asus-isa-0000_feature_intrusion0_subfeature_intrusion0_alarm_clear": 0, + "sensor_chip_asus-isa-0000_feature_intrusion0_subfeature_intrusion0_alarm_triggered": 1, "sensor_chip_bat0-acpi-0_feature_in0_subfeature_in0_input": 17365, "sensor_chip_k10temp-pci-00c3_feature_tctl_subfeature_temp1_input": 90000, "sensor_chip_nvme-pci-0600_feature_composite_subfeature_temp1_input": 33850, @@ -265,6 +235,7 @@ func TestSensors_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { sensors := New() + if test.prepareExecMock != nil { sensors.exec = test.prepareExecMock() } else if test.prepareSysfsMock != nil { @@ -274,6 +245,7 @@ func TestSensors_Collect(t *testing.T) { } var mx map[string]int64 + for i := 0; i < 10; i++ { mx = sensors.Collect() } @@ -289,15 +261,9 @@ func TestSensors_Collect(t *testing.T) { } } -func prepareMockExecOkOnlyTemp() *mockSensorsBinary { - return &mockSensorsBinary{ - sensorsInfoData: dataSensorsTemp, - } -} - -func prepareMockExecOkTempInCurrPowerFan() *mockSensorsBinary { +func prepareMockExecOk() *mockSensorsBinary { return &mockSensorsBinary{ - sensorsInfoData: dataSensorsTempInCurrPowerFan, + sensorsInfoData: dataSensors, } } @@ -348,6 +314,11 @@ func prepareMockSysfsScannerOk() *mockSysfsScanner { Label: "gpu_fan", Input: 6600, }, + &lmsensors.IntrusionSensor{ + Name: "intrusion0", + Label: "intrusion0", + Alarm: true, + }, }}, {Name: "nvme-pci-0600", Sensors: []lmsensors.Sensor{ &lmsensors.TemperatureSensor{ diff --git a/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp.txt b/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp.txt deleted file mode 100644 index decc7ee3992658..00000000000000 --- a/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp.txt +++ /dev/null @@ -1,81 +0,0 @@ -k10temp-pci-00cb -Tctl: - temp1_input: 57.500 -Tccd1: - temp3_input: 54.000 -Tccd2: - temp4_input: 55.500 -Tccd3: - temp5_input: 56.000 -Tccd4: - temp6_input: 52.750 -Tccd5: - temp7_input: 53.500 -Tccd6: - temp8_input: 55.250 -Tccd7: - temp9_input: 53.000 -Tccd8: - temp10_input: 53.750 - -bnxt_en-pci-6201 -temp1: - temp1_input: 81.000 - -nvme-pci-6100 -Composite: - temp1_input: 48.850 - temp1_max: 89.850 - temp1_min: -20.150 - temp1_crit: 94.850 - temp1_alarm: 0.000 - -nvme-pci-0100 -Composite: - temp1_input: 39.850 - temp1_max: 89.850 - temp1_min: -20.150 - temp1_crit: 94.850 - temp1_alarm: 0.000 - -nouveau-pci-4100 -temp1: - temp1_input: 51.000 - temp1_max: 95.000 - temp1_max_hyst: 3.000 - temp1_crit: 105.000 - temp1_crit_hyst: 5.000 - temp1_emergency: 135.000 - temp1_emergency_hyst: 5.000 - -k10temp-pci-00c3 -Tctl: - temp1_input: 62.000 -Tccd1: - temp3_input: 58.250 -Tccd2: - temp4_input: 60.250 -Tccd3: - temp5_input: 57.000 -Tccd4: - temp6_input: 57.250 -Tccd5: - temp7_input: 57.750 -Tccd6: - temp8_input: 59.500 -Tccd7: - temp9_input: 58.500 -Tccd8: - temp10_input: 61.250 - -bnxt_en-pci-6200 -temp1: - temp1_input: 80.000 - -nvme-pci-8100 -Composite: - temp1_input: 39.850 - temp1_max: 89.850 - temp1_min: -20.150 - temp1_crit: 94.850 - temp1_alarm: 0.000 diff --git a/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp-in-curr-power-fan.txt b/src/go/plugin/go.d/modules/sensors/testdata/sensors.txt similarity index 89% rename from src/go/plugin/go.d/modules/sensors/testdata/sensors-temp-in-curr-power-fan.txt rename to src/go/plugin/go.d/modules/sensors/testdata/sensors.txt index a38c7ab4ecf4aa..0936f0aaaa49c3 100644 --- a/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp-in-curr-power-fan.txt +++ b/src/go/plugin/go.d/modules/sensors/testdata/sensors.txt @@ -70,3 +70,10 @@ PPT: acpitz-acpi-0 temp1: temp1_input: 88.000 +nct6779-isa-0290 +intrusion0: + intrusion0_alarm: 1.000 + intrusion0_beep: 0.000 +intrusion1: + intrusion1_alarm: 1.000 + intrusion1_beep: 0.000 diff --git a/src/go/plugin/go.d/modules/storcli/collect_drives.go b/src/go/plugin/go.d/modules/storcli/collect_drives.go index 5c2ecb387037a9..95965d572e0768 100644 --- a/src/go/plugin/go.d/modules/storcli/collect_drives.go +++ b/src/go/plugin/go.d/modules/storcli/collect_drives.go @@ -23,18 +23,18 @@ type drivesInfoResponse struct { type ( driveInfo struct { EIDSlt string `json:"EID:Slt"` - DID int `json:"DID"` - State string `json:"State"` - DG int `json:"DG"` - Size string `json:"Size"` - Intf string `json:"Intf"` - Med string `json:"Med"` - SED string `json:"SED"` - PI string `json:"PI"` - SeSz string `json:"SeSz"` - Model string `json:"Model"` - Sp string `json:"Sp"` - Type string `json:"Type"` + //DID int `json:"DID"` + //State string `json:"State"` + //DG int `json:"DG"` // FIX: can be integer or "-" + //Size string `json:"Size"` + //Intf string `json:"Intf"` + Med string `json:"Med"` + //SED string `json:"SED"` + //PI string `json:"PI"` + //SeSz string `json:"SeSz"` + //Model string `json:"Model"` + //Sp string `json:"Sp"` + //Type string `json:"Type"` } driveState struct { MediaErrorCount storNumber `json:"Media Error Count"` diff --git a/src/collectors/python.d.plugin/w1sensor/README.md b/src/go/plugin/go.d/modules/w1sensor/README.md similarity index 100% rename from src/collectors/python.d.plugin/w1sensor/README.md rename to src/go/plugin/go.d/modules/w1sensor/README.md diff --git a/src/go/plugin/go.d/modules/w1sensor/charts.go b/src/go/plugin/go.d/modules/w1sensor/charts.go new file mode 100644 index 00000000000000..c068c92f063642 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/charts.go @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package w1sensor + +import ( + "fmt" + "strings" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +const ( + prioTemperature = module.Priority + iota +) + +var ( + sensorChartTmpl = module.Chart{ + ID: "w1sensor_%s_temperature", + Title: "1-Wire Temperature Sensor", + Units: "Celsius", + Fam: "Temperature", + Ctx: "w1sensor.temperature", + Type: module.Line, + Priority: prioTemperature, + Dims: module.Dims{ + {ID: "w1sensor_%s_temperature", Div: precision}, + }, + } +) + +func (w *W1sensor) addSensorChart(id string) { + chart := sensorChartTmpl.Copy() + + chart.ID = fmt.Sprintf(chart.ID, id) + chart.Labels = []module.Label{ + {Key: "sensor_id", Value: id}, + } + + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, id) + } + + if err := w.Charts().Add(chart); err != nil { + w.Warning(err) + } + +} + +func (w *W1sensor) removeSensorChart(id string) { + px := fmt.Sprintf("w1sensor_%s", id) + for _, chart := range *w.Charts() { + if strings.HasPrefix(chart.ID, px) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} diff --git a/src/go/plugin/go.d/modules/w1sensor/collect.go b/src/go/plugin/go.d/modules/w1sensor/collect.go new file mode 100644 index 00000000000000..4f1054174bcb41 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/collect.go @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package w1sensor + +import ( + "bufio" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "strconv" + "strings" +) + +const precision = 10 + +func (w *W1sensor) collect() (map[string]int64, error) { + des, err := os.ReadDir(w.SensorsPath) + if err != nil { + return nil, err + } + + mx := make(map[string]int64) + seen := make(map[string]bool) + + for _, de := range des { + if !de.IsDir() { + continue + } + if !isW1sensorDir(de.Name()) { + w.Debugf("'%s' is not a w1sensor directory, skipping it", filepath.Join(w.SensorsPath, de.Name())) + continue + } + + filename := filepath.Join(w.SensorsPath, de.Name(), "w1_slave") + + temp, err := readW1sensorTemperature(filename) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + w.Debugf("'%s' doesn't have 'w1_slave', skipping it", filepath.Join(w.SensorsPath, de.Name())) + continue + } + return nil, fmt.Errorf("failed to read temperature from '%s': %w", filename, err) + } + + seen[de.Name()] = true + if !w.seenSensors[de.Name()] { + w.addSensorChart(de.Name()) + + } + + mx[fmt.Sprintf("w1sensor_%s_temperature", de.Name())] = temp + } + + for id := range w.seenSensors { + if !seen[id] { + delete(w.seenSensors, id) + w.removeSensorChart(id) + } + } + + if len(mx) == 0 { + return nil, errors.New("no w1 sensors found") + } + + return mx, nil +} + +func readW1sensorTemperature(filename string) (int64, error) { + file, err := os.Open(filename) + if err != nil { + return 0, err + } + defer file.Close() + + sc := bufio.NewScanner(file) + sc.Scan() + // The second line displays the retained values along with a temperature in milli degrees Centigrade after t=. + sc.Scan() + + _, tempStr, ok := strings.Cut(strings.TrimSpace(sc.Text()), "t=") + if !ok { + return 0, errors.New("no temperature found") + } + + v, err := strconv.ParseInt(tempStr, 10, 64) + if err != nil { + return 0, err + } + + return int64(float64(v) / 1000 * precision), nil +} + +func isW1sensorDir(dirName string) bool { + // Supported family members + // Based on linux/drivers/w1/w1_family.h and w1/slaves/w1_therm.c + for _, px := range []string{ + "10-", // W1_THERM_DS18S20 + "22-", // W1_THERM_DS1822 + "28-", // W1_THERM_DS18B20 + "3b-", // W1_THERM_DS1825 + "42-", // W1_THERM_DS28EA00 + } { + if strings.HasPrefix(dirName, px) { + return true + } + } + return false +} diff --git a/src/go/plugin/go.d/modules/w1sensor/config_schema.json b/src/go/plugin/go.d/modules/w1sensor/config_schema.json new file mode 100644 index 00000000000000..bba6f8a5d80de3 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/config_schema.json @@ -0,0 +1,32 @@ +{ + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Access Point collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "Data collection interval, measured in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "sensors_path": { + "title": "Sensors path", + "description": "Directory path containing sensor folders with w1_slave files.", + "type": "string", + "default": "/sys/bus/w1/devices" + } + }, + "required": [], + "additionalProperties": false, + "patternProperties": { + "^name$": {} + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } +} diff --git a/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md b/src/go/plugin/go.d/modules/w1sensor/integrations/1-wire_sensors.md similarity index 65% rename from src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md rename to src/go/plugin/go.d/modules/w1sensor/integrations/1-wire_sensors.md index 15582879e1f88d..edb152787ff304 100644 --- a/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md +++ b/src/go/plugin/go.d/modules/w1sensor/integrations/1-wire_sensors.md @@ -1,6 +1,6 @@ -Plugin: python.d.plugin +Plugin: go.d.plugin Module: w1sensor @@ -23,6 +23,7 @@ Module: w1sensor Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts. + The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected. This collector is only supported on the following platforms: @@ -55,9 +56,9 @@ The scope defines the instance that the metric belongs to. An instance is unique -### Per 1-Wire Sensors instance +### Per sensor -These metrics refer to the entire monitored application. +These metrics refer to the 1-Wire Sensor. This scope has no labels. @@ -65,7 +66,7 @@ Metrics: | Metric | Dimensions | Unit | |:------|:----------|:----| -| w1sensor.temp | a dimension per sensor | Celsius | +| w1sensor.temperature | temperature | Celsius | @@ -87,7 +88,7 @@ Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded. #### File -The configuration file name for this integration is `python.d/w1sensor.conf`. +The configuration file name for this integration is `go.d/w1sensor.conf`. You can edit the configuration file using the `edit-config` script from the @@ -95,45 +96,32 @@ Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netda ```bash cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config python.d/w1sensor.conf +sudo ./edit-config go.d/w1sensor.conf ``` #### Options -There are 2 sections: - -* Global variables -* One or more JOBS that can define multiple different instances to monitor. - -The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values. - -Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition. - -Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. +The following options can be defined globally: update_every.
Config options | Name | Description | Default | Required | |:----|:-----------|:-------|:--------:| -| update_every | Sets the default data collection frequency. | 5 | no | -| priority | Controls the order of charts at the netdata dashboard. | 60000 | no | -| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no | -| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no | -| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no | -| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no | +| update_every | Data collection frequency. | 1 | no | +| sensors_path | Directory path containing sensor folders with w1_slave files. | /sys/bus/w1/devices | no |
#### Examples -##### Provide human readable names +##### Custom sensor device path -Associate two 1-Wire identifiers with human readable names. +Monitors a virtual sensor when the w1_slave file is located in a custom directory instead of the default location. ```yaml -sensors: - name_00000022276e: 'Machine room' - name_00000022298f: 'Rack 12' +jobs: + - name: custom_sensors_path + sensors_path: /custom/path/devices ``` @@ -142,8 +130,9 @@ sensors: ### Debug Mode +**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature. -To troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output +To troubleshoot issues with the `w1sensor` collector, run the `go.d.plugin` with the debug option enabled. The output should give you clues as to why the collector isn't working. - Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on @@ -159,10 +148,10 @@ should give you clues as to why the collector isn't working. sudo -u netdata -s ``` -- Run the `python.d.plugin` to debug the collector: +- Run the `go.d.plugin` to debug the collector: ```bash - ./python.d.plugin w1sensor debug trace + ./go.d.plugin -d -m w1sensor ``` ### Getting Logs diff --git a/src/go/plugin/go.d/modules/w1sensor/metadata.yaml b/src/go/plugin/go.d/modules/w1sensor/metadata.yaml new file mode 100644 index 00000000000000..920fce49916830 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/metadata.yaml @@ -0,0 +1,95 @@ +plugin_name: go.d.plugin +modules: + - meta: + plugin_name: go.d.plugin + module_name: w1sensor + monitored_instance: + name: 1-Wire Sensors + link: "https://www.analog.com/en/product-category/1wire-temperature-sensors.html" + categories: + - data-collection.hardware-devices-and-sensors + icon_filename: "1-wire.png" + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - temperature + - sensor + - 1-wire + most_popular: false + overview: + data_collection: + metrics_description: | + Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts. + method_description: The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected. + supported_platforms: + include: + - Linux + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "The collector will try to auto detect available 1-Wire devices." + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: "Required Linux kernel modules" + description: "Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded." + configuration: + file: + name: go.d/w1sensor.conf + options: + description: | + The following options can be defined globally: update_every. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: sensors_path + description: Directory path containing sensor folders with w1_slave files. + default_value: /sys/bus/w1/devices + required: false + examples: + folding: + title: "" + enabled: false + list: + - name: Custom sensor device path + description: Monitors a virtual sensor when the w1_slave file is located in a custom directory instead of the default location. + config: | + jobs: + - name: custom_sensors_path + sensors_path: /custom/path/devices + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: sensor + description: These metrics refer to the 1-Wire Sensor. + labels: [] + metrics: + - name: w1sensor.temperature + description: 1-Wire Temperature Sensor + unit: "Celsius" + chart_type: line + dimensions: + - name: temperature diff --git a/src/go/plugin/go.d/modules/w1sensor/testdata/config.json b/src/go/plugin/go.d/modules/w1sensor/testdata/config.json new file mode 100644 index 00000000000000..7409104c1e6c08 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/testdata/config.json @@ -0,0 +1,4 @@ +{ + "update_every": 123, + "sensors_path": "ok" +} diff --git a/src/go/plugin/go.d/modules/w1sensor/testdata/config.yaml b/src/go/plugin/go.d/modules/w1sensor/testdata/config.yaml new file mode 100644 index 00000000000000..c897086f2bd082 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/testdata/config.yaml @@ -0,0 +1,2 @@ +update_every: 123 +sensors_path: "ok" diff --git a/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa0/w1_slave b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa0/w1_slave new file mode 100644 index 00000000000000..b37c46650fd320 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa0/w1_slave @@ -0,0 +1,2 @@ +17 01 4b 46 7f ff 0c 10 71 : crc=71 YES +17 01 4b 46 7f ff 0c 10 71 t=12435 \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa1/w1_slave b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa1/w1_slave new file mode 100644 index 00000000000000..d4dee090e15ada --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa1/w1_slave @@ -0,0 +1,2 @@ +17 01 4b 46 7f ff 0c 10 71 : crc=71 YES +17 01 4b 46 7f ff 0c 10 71 t=29960 \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa2/w1_slave b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa2/w1_slave new file mode 100644 index 00000000000000..342fa51640922b --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa2/w1_slave @@ -0,0 +1,2 @@ +17 01 4b 46 7f ff 0c 10 71 : crc=71 YES +17 01 4b 46 7f ff 0c 10 71 t=10762 \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa3/w1_slave b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa3/w1_slave new file mode 100644 index 00000000000000..f1ec47dfe753f6 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/testdata/devices/28-01204e9d2fa3/w1_slave @@ -0,0 +1,2 @@ +17 01 4b 46 7f ff 0c 10 71 : crc=71 YES +17 01 4b 46 7f ff 0c 10 71 t=22926 \ No newline at end of file diff --git a/src/go/plugin/go.d/modules/w1sensor/w1sensor.go b/src/go/plugin/go.d/modules/w1sensor/w1sensor.go new file mode 100644 index 00000000000000..3e640ab4239f8f --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/w1sensor.go @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package w1sensor + +import ( + _ "embed" + "errors" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("w1sensor", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 1, + }, + Create: func() module.Module { return New() }, + Config: func() any { return &Config{} }, + }) +} + +func New() *W1sensor { + return &W1sensor{ + Config: Config{ + SensorsPath: "/sys/bus/w1/devices", + }, + charts: &module.Charts{}, + seenSensors: make(map[string]bool), + } +} + +type Config struct { + UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"` + SensorsPath string `yaml:"sensors_path,omitempty" json:"sensors_path"` +} + +type ( + W1sensor struct { + module.Base + Config `yaml:",inline" json:""` + + charts *module.Charts + + seenSensors map[string]bool + } +) + +func (w *W1sensor) Configuration() any { + return w.Config +} + +func (w *W1sensor) Init() error { + if w.SensorsPath == "" { + w.Errorf("sensors_path required but not set") + return errors.New("no sensors path specified") + } + + return nil +} + +func (w *W1sensor) Check() error { + mx, err := w.collect() + if err != nil { + w.Error(err) + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil +} + +func (w *W1sensor) Charts() *module.Charts { + return w.charts +} + +func (w *W1sensor) Collect() map[string]int64 { + mx, err := w.collect() + if err != nil { + w.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} + +func (w *W1sensor) Cleanup() {} diff --git a/src/go/plugin/go.d/modules/w1sensor/w1sensor_test.go b/src/go/plugin/go.d/modules/w1sensor/w1sensor_test.go new file mode 100644 index 00000000000000..a92d1475b409d4 --- /dev/null +++ b/src/go/plugin/go.d/modules/w1sensor/w1sensor_test.go @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package w1sensor + +import ( + "os" + "testing" + + "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataConfigJSON, _ = os.ReadFile("testdata/config.json") + dataConfigYAML, _ = os.ReadFile("testdata/config.yaml") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataConfigJSON": dataConfigJSON, + "dataConfigYAML": dataConfigYAML, + } { + require.NotNil(t, data, name) + } +} + +func TestW1sensor_Configuration(t *testing.T) { + module.TestConfigurationSerialize(t, &W1sensor{}, dataConfigJSON, dataConfigYAML) +} + +func TestW1sensor_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "fails if 'sensors_path' is not set": { + wantFail: true, + config: Config{ + SensorsPath: "", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w1 := New() + w1.Config = test.config + + if test.wantFail { + assert.Error(t, w1.Init()) + } else { + assert.NoError(t, w1.Init()) + } + }) + } +} + +func TestAP_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func() *W1sensor + }{ + "not initialized exec": { + prepare: func() *W1sensor { + return New() + }, + }, + "after check": { + prepare: func() *W1sensor { + w1 := prepareCaseOk() + _ = w1.Check() + return w1 + }, + }, + "after collect": { + prepare: func() *W1sensor { + w1 := prepareCaseOk() + _ = w1.Collect() + return w1 + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w1 := test.prepare() + + assert.NotPanics(t, w1.Cleanup) + }) + } +} + +func TestW1sensor_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestW1sensor_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func() *W1sensor + wantFail bool + }{ + "success case": { + wantFail: false, + prepareMock: prepareCaseOk, + }, + "no sensors dir": { + wantFail: true, + prepareMock: prepareCaseNoSensorsDir, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w1 := test.prepareMock() + + if test.wantFail { + assert.Error(t, w1.Check()) + } else { + assert.NoError(t, w1.Check()) + } + }) + } +} + +func TestW1Sensors_Collect(t *testing.T) { + tests := map[string]struct { + prepareMock func() *W1sensor + wantMetrics map[string]int64 + wantCharts int + }{ + "success case": { + prepareMock: prepareCaseOk, + wantCharts: 4, + wantMetrics: map[string]int64{ + "w1sensor_28-01204e9d2fa0_temperature": 124, + "w1sensor_28-01204e9d2fa1_temperature": 299, + "w1sensor_28-01204e9d2fa2_temperature": 107, + "w1sensor_28-01204e9d2fa3_temperature": 229, + }, + }, + "no sensors dir": { + prepareMock: prepareCaseNoSensorsDir, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w1 := test.prepareMock() + + mx := w1.Collect() + + assert.Equal(t, test.wantMetrics, mx) + + assert.Equal(t, test.wantCharts, len(*w1.Charts()), "wantCharts") + + module.TestMetricsHasAllChartsDims(t, w1.Charts(), mx) + }) + } +} + +func prepareCaseOk() *W1sensor { + w1 := New() + w1.SensorsPath = "testdata/devices" + return w1 +} + +func prepareCaseNoSensorsDir() *W1sensor { + w1 := New() + w1.SensorsPath = "testdata/devices!" + return w1 +} diff --git a/src/health/health.c b/src/health/health.c index 7039a193cc0a06..78559d7f47fbb8 100644 --- a/src/health/health.c +++ b/src/health/health.c @@ -14,7 +14,7 @@ struct health_plugin_globals health_globals = { .use_summary_for_notifications = true, .health_log_entries_max = HEALTH_LOG_ENTRIES_DEFAULT, - .health_log_history = HEALTH_LOG_HISTORY_DEFAULT, + .health_log_retention_s = HEALTH_LOG_RETENTION_DEFAULT, .default_warn_repeat_every = 0, .default_crit_repeat_every = 0, @@ -55,17 +55,17 @@ static void health_load_config_defaults(void) { health_globals.config.use_summary_for_notifications); health_globals.config.default_warn_repeat_every = - config_get_duration(CONFIG_SECTION_HEALTH, "default repeat warning", "never"); + config_get_duration_seconds(CONFIG_SECTION_HEALTH, "default repeat warning", 0); health_globals.config.default_crit_repeat_every = - config_get_duration(CONFIG_SECTION_HEALTH, "default repeat critical", "never"); + config_get_duration_seconds(CONFIG_SECTION_HEALTH, "default repeat critical", 0); health_globals.config.health_log_entries_max = config_get_number(CONFIG_SECTION_HEALTH, "in memory max health log entries", health_globals.config.health_log_entries_max); - health_globals.config.health_log_history = - config_get_number(CONFIG_SECTION_HEALTH, "health log history", HEALTH_LOG_DEFAULT_HISTORY); + health_globals.config.health_log_retention_s = + config_get_duration_seconds(CONFIG_SECTION_HEALTH, "health log retention", HEALTH_LOG_RETENTION_DEFAULT); snprintfz(filename, FILENAME_MAX, "%s/alarm-notify.sh", netdata_configured_primary_plugins_dir); health_globals.config.default_exec = @@ -76,14 +76,13 @@ static void health_load_config_defaults(void) { NULL, SIMPLE_PATTERN_EXACT, true); health_globals.config.run_at_least_every_seconds = - (int)config_get_number(CONFIG_SECTION_HEALTH, - "run at least every seconds", - health_globals.config.run_at_least_every_seconds); + (int)config_get_duration_seconds(CONFIG_SECTION_HEALTH, "run at least every", + health_globals.config.run_at_least_every_seconds); health_globals.config.postpone_alarms_during_hibernation_for_seconds = - config_get_number(CONFIG_SECTION_HEALTH, - "postpone alarms during hibernation for seconds", - health_globals.config.postpone_alarms_during_hibernation_for_seconds); + config_get_duration_seconds(CONFIG_SECTION_HEALTH, + "postpone alarms during hibernation for", + health_globals.config.postpone_alarms_during_hibernation_for_seconds); health_globals.config.default_recipient = string_strdupz("root"); @@ -115,27 +114,27 @@ static void health_load_config_defaults(void) { (long)health_globals.config.health_log_entries_max); } - if (health_globals.config.health_log_history < HEALTH_LOG_MINIMUM_HISTORY) { + if (health_globals.config.health_log_retention_s < HEALTH_LOG_MINIMUM_HISTORY) { nd_log(NDLS_DAEMON, NDLP_WARNING, - "Health configuration has invalid health log history %u. Using minimum %d", - health_globals.config.health_log_history, HEALTH_LOG_MINIMUM_HISTORY); + "Health configuration has invalid health log retention %u. Using minimum %d", + health_globals.config.health_log_retention_s, HEALTH_LOG_MINIMUM_HISTORY); - health_globals.config.health_log_history = HEALTH_LOG_MINIMUM_HISTORY; - config_set_number(CONFIG_SECTION_HEALTH, "health log history", health_globals.config.health_log_history); + health_globals.config.health_log_retention_s = HEALTH_LOG_MINIMUM_HISTORY; + config_set_duration_seconds(CONFIG_SECTION_HEALTH, "health log retention", health_globals.config.health_log_retention_s); } nd_log(NDLS_DAEMON, NDLP_DEBUG, "Health log history is set to %u seconds (%u days)", - health_globals.config.health_log_history, health_globals.config.health_log_history / 86400); + health_globals.config.health_log_retention_s, health_globals.config.health_log_retention_s / 86400); } -inline char *health_user_config_dir(void) { +inline const char *health_user_config_dir(void) { char buffer[FILENAME_MAX + 1]; snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_user_config_dir); return config_get(CONFIG_SECTION_DIRECTORIES, "health config", buffer); } -inline char *health_stock_config_dir(void) { +inline const char *health_stock_config_dir(void) { char buffer[FILENAME_MAX + 1]; snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_stock_config_dir); return config_get(CONFIG_SECTION_DIRECTORIES, "stock health config", buffer); diff --git a/src/health/health.h b/src/health/health.h index 4f962eaa584f34..cdd089623f54af 100644 --- a/src/health/health.h +++ b/src/health/health.h @@ -34,8 +34,8 @@ void health_entry_flags_to_json_array(BUFFER *wb, const char *key, HEALTH_ENTRY_ #define HEALTH_LISTEN_BACKLOG 4096 #endif -#ifndef HEALTH_LOG_DEFAULT_HISTORY -#define HEALTH_LOG_DEFAULT_HISTORY 432000 +#ifndef HEALTH_LOG_RETENTION_DEFAULT +#define HEALTH_LOG_RETENTION_DEFAULT (5 * 86400) #endif #ifndef HEALTH_LOG_MINIMUM_HISTORY @@ -75,8 +75,8 @@ ALARM_ENTRY* health_create_alarm_entry( void health_alarm_log_add_entry(RRDHOST *host, ALARM_ENTRY *ae); -char *health_user_config_dir(void); -char *health_stock_config_dir(void); +const char *health_user_config_dir(void); +const char *health_stock_config_dir(void); void health_alarm_log_free(RRDHOST *host); void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae); diff --git a/src/health/health_config.c b/src/health/health_config.c index c17f7e21dcd581..8eaadf4947406f 100644 --- a/src/health/health_config.c +++ b/src/health/health_config.c @@ -29,14 +29,14 @@ static inline int health_parse_delay( while(*s && isspace((uint8_t)*s)) *s++ = '\0'; if(!strcasecmp(key, "up")) { - if (!config_parse_duration(value, delay_up_duration)) { + if (!duration_parse_seconds(value, delay_up_duration)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", line, filename, value, key); } else given_up = 1; } else if(!strcasecmp(key, "down")) { - if (!config_parse_duration(value, delay_down_duration)) { + if (!duration_parse_seconds(value, delay_down_duration)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", line, filename, value, key); } @@ -51,7 +51,7 @@ static inline int health_parse_delay( else given_multiplier = 1; } else if(!strcasecmp(key, "max")) { - if (!config_parse_duration(value, delay_max_duration)) { + if (!duration_parse_seconds(value, delay_max_duration)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", line, filename, value, key); } @@ -139,13 +139,13 @@ static inline int health_parse_repeat( return 1; } if(!strcasecmp(key, "warning")) { - if (!config_parse_duration(value, (int*)warn_repeat_every)) { + if (!duration_parse_seconds(value, (int *)warn_repeat_every)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", line, file, value, key); } } else if(!strcasecmp(key, "critical")) { - if (!config_parse_duration(value, (int*)crit_repeat_every)) { + if (!duration_parse_seconds(value, (int *)crit_repeat_every)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid value '%s' for '%s' keyword", line, file, value, key); } @@ -273,7 +273,7 @@ static inline int health_parse_db_lookup(size_t line, const char *filename, char while(*s && !isspace((uint8_t)*s)) s++; while(*s && isspace((uint8_t)*s)) *s++ = '\0'; - if(!config_parse_duration(key, &ac->after)) { + if(!duration_parse_seconds(key, &ac->after)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid duration '%s' after group method", line, filename, key); return 0; @@ -294,7 +294,7 @@ static inline int health_parse_db_lookup(size_t line, const char *filename, char while(*s && !isspace((uint8_t)*s)) s++; while(*s && isspace((uint8_t)*s)) *s++ = '\0'; - if (!config_parse_duration(value, &ac->before)) { + if (!duration_parse_seconds(value, &ac->before)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword", line, filename, value, key); } @@ -304,7 +304,7 @@ static inline int health_parse_db_lookup(size_t line, const char *filename, char while(*s && !isspace((uint8_t)*s)) s++; while(*s && isspace((uint8_t)*s)) *s++ = '\0'; - if (!config_parse_duration(value, &ac->update_every)) { + if (!duration_parse_seconds(value, &ac->update_every)) { netdata_log_error("Health configuration at line %zu of file '%s': invalid duration '%s' for '%s' keyword", line, filename, value, key); } @@ -725,7 +725,7 @@ int health_readfile(const char *filename, void *data __maybe_unused, bool stock_ health_parse_db_lookup(line, filename, value, ac); } else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) { - if(!config_parse_duration(value, &ac->update_every)) + if(!duration_parse_seconds(value, &ac->update_every)) netdata_log_error( "Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' " "cannot parse duration: '%s'.", diff --git a/src/health/health_event_loop.c b/src/health/health_event_loop.c index d9f5c2a05e2c53..b7cb52c4901cb8 100644 --- a/src/health/health_event_loop.c +++ b/src/health/health_event_loop.c @@ -130,7 +130,7 @@ static void health_initialize_rrdhost(RRDHOST *host) { rrdhost_flag_set(host, RRDHOST_FLAG_INITIALIZED_HEALTH); host->health_log.max = health_globals.config.health_log_entries_max; - host->health_log.health_log_history = health_globals.config.health_log_history; + host->health_log.health_log_retention_s = health_globals.config.health_log_retention_s; host->health.health_default_exec = string_dup(health_globals.config.default_exec); host->health.health_default_recipient = string_dup(health_globals.config.default_recipient); host->health.use_summary_for_notifications = health_globals.config.use_summary_for_notifications; diff --git a/src/health/health_internals.h b/src/health/health_internals.h index 638a9619598703..a86e629560f543 100644 --- a/src/health/health_internals.h +++ b/src/health/health_internals.h @@ -9,7 +9,7 @@ #define HEALTH_LOG_ENTRIES_MAX 100000U #define HEALTH_LOG_ENTRIES_MIN 10U -#define HEALTH_LOG_HISTORY_DEFAULT (5 * 86400) +#define HEALTH_LOG_RETENTION_DEFAULT (5 * 86400) #define HEALTH_CONF_MAX_LINE 4096 @@ -76,7 +76,7 @@ struct health_plugin_globals { bool use_summary_for_notifications; unsigned int health_log_entries_max; - uint32_t health_log_history; // the health log history in seconds to be kept in db + uint32_t health_log_retention_s; // the health log retention in seconds to be kept in db STRING *silencers_filename; STRING *default_exec; diff --git a/src/health/notifications/alarm-notify.sh.in b/src/health/notifications/alarm-notify.sh.in index c7c44cb116a287..d7baa7345022ea 100755 --- a/src/health/notifications/alarm-notify.sh.in +++ b/src/health/notifications/alarm-notify.sh.in @@ -769,6 +769,9 @@ fi # check custom [ -z "${DEFAULT_RECIPIENT_CUSTOM}" ] && SEND_CUSTOM="NO" +# check ilert +[ -z "${ILERT_ALERT_SOURCE_URL}" ] && SEND_ILERT="NO" + # ----------------------------------------------------------------------------- # check the availability of targets @@ -798,7 +801,8 @@ check_supported_targets() { [ "${SEND_DYNATRACE}" = "YES" ] || [ "${SEND_OPSGENIE}" = "YES" ] || [ "${SEND_GOTIFY}" = "YES" ] || - [ "${SEND_NTFY}" = "YES" ]; then + [ "${SEND_NTFY}" = "YES" ] || + [ "${SEND_ILERT}" = "YES" ]; then # if we need curl, check for the curl command if [ -z "${curl}" ]; then curl="$(command -v curl 2>/dev/null)" @@ -828,6 +832,7 @@ check_supported_targets() { SEND_OPSGENIE="NO" SEND_GOTIFY="NO" SEND_NTFY="NO" + SEND_ILERT="NO" fi fi @@ -983,7 +988,8 @@ for method in "${SEND_EMAIL}" \ "${SEND_DYNATRACE}" \ "${SEND_OPSGENIE}" \ "${SEND_GOTIFY}" \ - "${SEND_NTFY}" ; do + "${SEND_NTFY}" \ + "${SEND_ILERT}" ; do if [ "${method}" == "YES" ]; then proceed=1 @@ -2431,6 +2437,50 @@ send_ntfy() { return 1 } +# ----------------------------------------------------------------------------- +# ilert sender + +send_ilert() { + local payload httpcode + [ "${SEND_ILERT}" != "YES" ] && return 1 + + if [ -z "${ILERT_ALERT_SOURCE_URL}" ] ; then + info "Can't send ilert notification, because ILERT_ALERT_SOURCE_URL is not defined" + return 1 + fi + + payload=$(cat < + +# ilert + + + + + +ilert is an alerting and incident management tool. It helps teams reduce response times by enhancing monitoring and ticketing tools with reliable alerts, automatic escalations, on-call schedules, and features for incident response, communication, and status updates. +Sending notification to ilert via Netdata's Agent alert notification feature includes links, images and resolving of corresponding alerts. + + + + + +## Setup + +### Prerequisites + +#### + +- A Netdata alert source in ilert. You can create a [Netdata alert source](https://docs.ilert.com/inbound-integrations/netdata) in [ilert](https://www.ilert.com/). +- Access to the terminal where Netdata Agent is running + + + +### Configuration + +#### File + +The configuration file name for this integration is `health_alarm_notify.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config health_alarm_notify.conf +``` +#### Options + +The following options can be defined for this notification + +
Config Options + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| SEND_ILERT | Set `SEND_ILERT` to YES | YES | yes | +| ILERT_ALERT_SOURCE_URL | Set `ILERT_ALERT_SOURCE_URL` to your Netdata alert source url in ilert. | | yes | + +
+ +#### Examples + +##### Basic Configuration + + + +```yaml +SEND_ILERT="YES" +ILERT_ALERT_SOURCE_URL="https://api.ilert.com/api/v1/events/netdata/{API-KEY}" + +``` + + +## Troubleshooting + +### Test Notification + +You can run the following command by hand, to test alerts configuration: + +```bash +# become user netdata +sudo su -s /bin/bash netdata + +# enable debugging info on the console +export NETDATA_ALARM_NOTIFY_DEBUG=1 + +# send test alarms to sysadmin +/usr/libexec/netdata/plugins.d/alarm-notify.sh test + +# send test alarms to any role +/usr/libexec/netdata/plugins.d/alarm-notify.sh test "ROLE" +``` + +Note that this will test _all_ alert mechanisms for the selected role. + + diff --git a/src/health/notifications/ilert/metadata.yaml b/src/health/notifications/ilert/metadata.yaml new file mode 100644 index 00000000000000..7e2454834a7495 --- /dev/null +++ b/src/health/notifications/ilert/metadata.yaml @@ -0,0 +1,55 @@ +# yamllint disable rule:line-length +--- +- id: "notify-ilert" + meta: + name: "ilert" + link: "https://www.ilert.com/" + categories: + - notify.agent + icon_filename: "ilert.svg" + keywords: + - ilert + overview: + notification_description: | + ilert is an alerting and incident management tool. It helps teams reduce response times by enhancing monitoring and ticketing tools with reliable alerts, automatic escalations, on-call schedules, and features for incident response, communication, and status updates. + Sending notification to ilert via Netdata's Agent alert notification feature includes links, images and resolving of corresponding alerts. + notification_limitations: "" + setup: + prerequisites: + list: + - title: "" + description: | + - A Netdata alert source in ilert. You can create a [Netdata alert source](https://docs.ilert.com/inbound-integrations/netdata) in [ilert](https://www.ilert.com/). + - Access to the terminal where Netdata Agent is running + configuration: + file: + name: "health_alarm_notify.conf" + options: + description: "The following options can be defined for this notification" + folding: + title: "Config Options" + enabled: true + list: + - name: "SEND_ILERT" + default_value: "YES" + description: "Set `SEND_ILERT` to YES" + required: true + - name: "ILERT_ALERT_SOURCE_URL" + default_value: "" + description: "Set `ILERT_ALERT_SOURCE_URL` to your Netdata alert source url in ilert." + required: true + examples: + folding: + enabled: true + title: "" + list: + - name: "Basic Configuration" + folding: + enabled: false + description: "" + config: | + SEND_ILERT="YES" + ILERT_ALERT_SOURCE_URL="https://api.ilert.com/api/v1/events/netdata/{API-KEY}" + troubleshooting: + problems: + list: [] diff --git a/src/libnetdata/aral/aral.c b/src/libnetdata/aral/aral.c index 64b63d8e06f1d6..9532d63dde8ace 100644 --- a/src/libnetdata/aral/aral.c +++ b/src/libnetdata/aral/aral.c @@ -72,7 +72,7 @@ struct aral { struct { bool enabled; const char *filename; - char **cache_dir; + const char **cache_dir; } mmap; } config; @@ -709,7 +709,7 @@ size_t aral_element_size(ARAL *ar) { } ARAL *aral_create(const char *name, size_t element_size, size_t initial_page_elements, size_t max_page_size, - struct aral_statistics *stats, const char *filename, char **cache_dir, bool mmap, bool lockless) { + struct aral_statistics *stats, const char *filename, const char **cache_dir, bool mmap, bool lockless) { ARAL *ar = callocz(1, sizeof(ARAL)); ar->config.options = (lockless) ? ARAL_LOCKLESS : 0; ar->config.requested_element_size = element_size; @@ -1078,7 +1078,7 @@ int aral_stress_test(size_t threads, size_t elements, size_t seconds) { } int aral_unittest(size_t elements) { - char *cache_dir = "/tmp/"; + const char *cache_dir = "/tmp/"; struct aral_unittest_config auc = { .single_threaded = true, diff --git a/src/libnetdata/aral/aral.h b/src/libnetdata/aral/aral.h index 2e749bc4c20fd3..1ff0672ebe6e6e 100644 --- a/src/libnetdata/aral/aral.h +++ b/src/libnetdata/aral/aral.h @@ -28,7 +28,7 @@ struct aral_statistics { }; ARAL *aral_create(const char *name, size_t element_size, size_t initial_page_elements, size_t max_page_size, - struct aral_statistics *stats, const char *filename, char **cache_dir, bool mmap, bool lockless); + struct aral_statistics *stats, const char *filename, const char **cache_dir, bool mmap, bool lockless); size_t aral_element_size(ARAL *ar); size_t aral_overhead(ARAL *ar); size_t aral_structures(ARAL *ar); diff --git a/src/libnetdata/clocks/clocks.c b/src/libnetdata/clocks/clocks.c index 5da450a2dc8948..7527f9997d2b94 100644 --- a/src/libnetdata/clocks/clocks.c +++ b/src/libnetdata/clocks/clocks.c @@ -452,7 +452,7 @@ static inline collected_number uptime_from_boottime(void) { } static procfile *read_proc_uptime_ff = NULL; -static inline collected_number read_proc_uptime(char *filename) { +static inline collected_number read_proc_uptime(const char *filename) { if(unlikely(!read_proc_uptime_ff)) { read_proc_uptime_ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT); if(unlikely(!read_proc_uptime_ff)) return 0; @@ -473,7 +473,7 @@ static inline collected_number read_proc_uptime(char *filename) { return (collected_number)(strtondd(procfile_lineword(read_proc_uptime_ff, 0, 0), NULL) * 1000.0); } -inline collected_number uptime_msec(char *filename){ +inline collected_number uptime_msec(const char *filename){ static int use_boottime = -1; if(unlikely(use_boottime == -1)) { diff --git a/src/libnetdata/clocks/clocks.h b/src/libnetdata/clocks/clocks.h index f989fd6b807b58..befc009578facc 100644 --- a/src/libnetdata/clocks/clocks.h +++ b/src/libnetdata/clocks/clocks.h @@ -18,7 +18,12 @@ struct timespec { typedef uint64_t nsec_t; typedef uint64_t msec_t; typedef uint64_t usec_t; + +typedef int64_t snsec_t; typedef int64_t susec_t; +typedef int64_t smsec_t; + +typedef int64_t stime_t; typedef struct heartbeat { usec_t realtime; @@ -151,7 +156,7 @@ time_t now_sec(clockid_t clk_id); usec_t now_usec(clockid_t clk_id); int now_timeval(clockid_t clk_id, struct timeval *tv); -collected_number uptime_msec(char *filename); +collected_number uptime_msec(const char *filename); extern usec_t clock_monotonic_resolution; extern usec_t clock_realtime_resolution; diff --git a/src/libnetdata/config/appconfig.c b/src/libnetdata/config/appconfig.c index 1c151b7029bd4c..f26417ac35d737 100644 --- a/src/libnetdata/config/appconfig.c +++ b/src/libnetdata/config/appconfig.c @@ -1,970 +1,82 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../libnetdata.h" +#include "appconfig_internals.h" -/* - * @Input: - * Connector / instance to add to an internal structure - * @Return - * The current head of the linked list of connector_instance - * - */ - -_CONNECTOR_INSTANCE *add_connector_instance(struct section *connector, struct section *instance) -{ - static struct _connector_instance *global_connector_instance = NULL; - struct _connector_instance *local_ci, *local_ci_tmp; - - if (unlikely(!connector)) { - if (unlikely(!instance)) - return global_connector_instance; - - local_ci = global_connector_instance; - while (local_ci) { - local_ci_tmp = local_ci->next; - freez(local_ci); - local_ci = local_ci_tmp; - } - global_connector_instance = NULL; - return NULL; - } - - local_ci = callocz(1, sizeof(struct _connector_instance)); - local_ci->instance = instance; - local_ci->connector = connector; - strncpyz(local_ci->instance_name, instance->name, CONFIG_MAX_NAME); - strncpyz(local_ci->connector_name, connector->name, CONFIG_MAX_NAME); - local_ci->next = global_connector_instance; - global_connector_instance = local_ci; - - return global_connector_instance; -} - -int is_valid_connector(char *type, int check_reserved) -{ - int rc = 1; - - if (unlikely(!type)) - return 0; - - if (!check_reserved) { - if (unlikely(is_valid_connector(type,1))) { - return 0; - } - //if (unlikely(*type == ':') - // return 0; - char *separator = strrchr(type, ':'); - if (likely(separator)) { - *separator = '\0'; - rc = separator - type; - } else - return 0; - } -// else { -// if (unlikely(is_valid_connector(type,1))) { -// netdata_log_error("Section %s invalid -- reserved name", type); -// return 0; -// } -// } - - if (!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) { - return rc; - } else if (!strcmp(type, "graphite:http") || !strcmp(type, "graphite:https")) { - return rc; - } else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) { - return rc; - } else if (!strcmp(type, "json:http") || !strcmp(type, "json:https")) { - return rc; - } else if (!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) { - return rc; - } else if (!strcmp(type, "opentsdb:http") || !strcmp(type, "opentsdb:https")) { - return rc; - } else if (!strcmp(type, "prometheus_remote_write")) { - return rc; - } else if (!strcmp(type, "prometheus_remote_write:http") || !strcmp(type, "prometheus_remote_write:https")) { - return rc; - } else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) { - return rc; - } else if (!strcmp(type, "pubsub") || !strcmp(type, "pubsub:plaintext")) { - return rc; - } else if (!strcmp(type, "mongodb") || !strcmp(type, "mongodb:plaintext")) { - return rc; - } - - return 0; -} - -// ---------------------------------------------------------------------------- -// locking - -inline void appconfig_wrlock(struct config *root) { - netdata_mutex_lock(&root->mutex); -} - -inline void appconfig_unlock(struct config *root) { - netdata_mutex_unlock(&root->mutex); -} - -inline void config_section_wrlock(struct section *co) { - netdata_mutex_lock(&co->mutex); -} - -inline void config_section_unlock(struct section *co) { - netdata_mutex_unlock(&co->mutex); -} - - -// ---------------------------------------------------------------------------- -// config name-value index - -static int appconfig_option_compare(void *a, void *b) { - if(((struct config_option *)a)->hash < ((struct config_option *)b)->hash) return -1; - else if(((struct config_option *)a)->hash > ((struct config_option *)b)->hash) return 1; - else return strcmp(((struct config_option *)a)->name, ((struct config_option *)b)->name); -} - -#define appconfig_option_index_add(co, cv) (struct config_option *)avl_insert_lock(&((co)->values_index), (avl_t *)(cv)) -#define appconfig_option_index_del(co, cv) (struct config_option *)avl_remove_lock(&((co)->values_index), (avl_t *)(cv)) - -static struct config_option *appconfig_option_index_find(struct section *co, const char *name, uint32_t hash) { - struct config_option tmp; - tmp.hash = (hash)?hash:simple_hash(name); - tmp.name = (char *)name; - - return (struct config_option *)avl_search_lock(&(co->values_index), (avl_t *) &tmp); -} - - -// ---------------------------------------------------------------------------- -// config sections index - -int appconfig_section_compare(void *a, void *b) { - if(((struct section *)a)->hash < ((struct section *)b)->hash) return -1; - else if(((struct section *)a)->hash > ((struct section *)b)->hash) return 1; - else return strcmp(((struct section *)a)->name, ((struct section *)b)->name); -} - -#define appconfig_index_add(root, cfg) (struct section *)avl_insert_lock(&(root)->index, (avl_t *)(cfg)) -#define appconfig_index_del(root, cfg) (struct section *)avl_remove_lock(&(root)->index, (avl_t *)(cfg)) - -static struct section *appconfig_index_find(struct config *root, const char *name, uint32_t hash) { - struct section tmp; - tmp.hash = (hash)?hash:simple_hash(name); - tmp.name = (char *)name; - - return (struct section *)avl_search_lock(&root->index, (avl_t *) &tmp); -} - - -// ---------------------------------------------------------------------------- -// config section methods - -static inline struct section *appconfig_section_find(struct config *root, const char *section) { - return appconfig_index_find(root, section, 0); -} - -static inline struct section *appconfig_section_create(struct config *root, const char *section) { - netdata_log_debug(D_CONFIG, "Creating section '%s'.", section); - - struct section *co = callocz(1, sizeof(struct section)); - co->name = strdupz(section); - co->hash = simple_hash(co->name); - netdata_mutex_init(&co->mutex); - - avl_init_lock(&co->values_index, appconfig_option_compare); - - if(unlikely(appconfig_index_add(root, co) != co)) - netdata_log_error("INTERNAL ERROR: indexing of section '%s', already exists.", co->name); +int appconfig_exists(struct config *root, const char *section, const char *name) { + struct config_section *sect = appconfig_section_find(root, section); + if(!sect) return 0; - appconfig_wrlock(root); - struct section *co2 = root->last_section; - if(co2) { - co2->next = co; - } else { - root->first_section = co; - } - root->last_section = co; - appconfig_unlock(root); + struct config_option *opt = appconfig_option_find(sect, name); + if(!opt) return 0; - return co; + return 1; } -void appconfig_section_destroy_non_loaded(struct config *root, const char *section) -{ - struct section *co; - struct config_option *cv, *cv_next; - - netdata_log_debug(D_CONFIG, "Destroying section '%s'.", section); - - co = appconfig_section_find(root, section); - if(!co) { - netdata_log_error("Could not destroy section '%s'. Not found.", section); - return; - } - - config_section_wrlock(co); - for(cv = co->values; cv ; cv = cv->next) { - if (cv->flags & CONFIG_VALUE_LOADED) { - /* Do not destroy values that were loaded from the configuration files. */ - config_section_unlock(co); - return; - } - } - for(cv = co->values ; cv ; cv = cv_next) { - cv_next = cv->next; - if(unlikely(!appconfig_option_index_del(co, cv))) - netdata_log_error("Cannot remove config option '%s' from section '%s'.", cv->name, co->name); - freez(cv->value); - freez(cv->name); - freez(cv); - } - co->values = NULL; - config_section_unlock(co); - - if (unlikely(!appconfig_index_del(root, co))) { - netdata_log_error("Cannot remove section '%s' from config.", section); +void appconfig_set_default_raw_value(struct config *root, const char *section, const char *name, const char *value) { + struct config_section *sect = appconfig_section_find(root, section); + if(!sect) { + appconfig_set_raw_value(root, section, name, value, CONFIG_VALUE_TYPE_UNKNOWN); return; } - - appconfig_wrlock(root); - - if (root->first_section == co) { - root->first_section = co->next; - - if (root->last_section == co) - root->last_section = root->first_section; - } else { - struct section *co_cur = root->first_section, *co_prev = NULL; - - while(co_cur && co_cur != co) { - co_prev = co_cur; - co_cur = co_cur->next; - } - - if (co_cur) { - co_prev->next = co_cur->next; - - if (root->last_section == co_cur) - root->last_section = co_prev; - } - } - - appconfig_unlock(root); - - avl_destroy_lock(&co->values_index); - freez(co->name); - pthread_mutex_destroy(&co->mutex); - freez(co); -} - -void appconfig_section_option_destroy_non_loaded(struct config *root, const char *section, const char *name) -{ - netdata_log_debug(D_CONFIG, "Destroying section option '%s -> %s'.", section, name); - struct section *co; - co = appconfig_section_find(root, section); - if (!co) { - netdata_log_error("Could not destroy section option '%s -> %s'. The section not found.", section, name); + struct config_option *opt = appconfig_option_find(sect, name); + if(!opt) { + appconfig_set_raw_value(root, section, name, value, CONFIG_VALUE_TYPE_UNKNOWN); return; } - config_section_wrlock(co); + opt->flags |= CONFIG_VALUE_USED; - struct config_option *cv; - - cv = appconfig_option_index_find(co, name, simple_hash(name)); - - if (cv && cv->flags & CONFIG_VALUE_LOADED) { - config_section_unlock(co); + if(opt->flags & CONFIG_VALUE_LOADED) return; - } - if (unlikely(!(cv && appconfig_option_index_del(co, cv)))) { - config_section_unlock(co); - netdata_log_error("Could not destroy section option '%s -> %s'. The option not found.", section, name); - return; - } + if(string_strcmp(opt->value, value) != 0) { + opt->flags |= CONFIG_VALUE_CHANGED; - if (co->values == cv) { - co->values = co->values->next; - } else { - struct config_option *cv_cur = co->values, *cv_prev = NULL; - while (cv_cur && cv_cur != cv) { - cv_prev = cv_cur; - cv_cur = cv_cur->next; - } - if (cv_cur) { - cv_prev->next = cv_cur->next; - } + string_freez(opt->value); + opt->value = string_strdupz(value); } - - freez(cv->value); - freez(cv->name); - freez(cv); - - config_section_unlock(co); - return; -} - -// ---------------------------------------------------------------------------- -// config name-value methods - -static inline struct config_option *appconfig_value_create(struct section *co, const char *name, const char *value) { - netdata_log_debug(D_CONFIG, "Creating config entry for name '%s', value '%s', in section '%s'.", name, value, co->name); - - struct config_option *cv = callocz(1, sizeof(struct config_option)); - cv->name = strdupz(name); - cv->hash = simple_hash(cv->name); - cv->value = strdupz(value); - - struct config_option *found = appconfig_option_index_add(co, cv); - if(found != cv) { - netdata_log_error("indexing of config '%s' in section '%s': already exists - using the existing one.", cv->name, co->name); - freez(cv->value); - freez(cv->name); - freez(cv); - return found; - } - - config_section_wrlock(co); - struct config_option *cv2 = co->values; - if(cv2) { - while (cv2->next) cv2 = cv2->next; - cv2->next = cv; - } - else co->values = cv; - config_section_unlock(co); - - return cv; } -int appconfig_exists(struct config *root, const char *section, const char *name) { - struct config_option *cv; - - netdata_log_debug(D_CONFIG, "request to get config in section '%s', name '%s'", section, name); - - struct section *co = appconfig_section_find(root, section); - if(!co) return 0; - - cv = appconfig_option_index_find(co, name, 0); - if(!cv) return 0; - - return 1; -} - -int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new) { - struct config_option *cv_old, *cv_new; - int ret = -1; - - netdata_log_debug(D_CONFIG, "request to rename config in section '%s', old name '%s', to section '%s', new name '%s'", section_old, name_old, section_new, name_new); - - struct section *co_old = appconfig_section_find(root, section_old); - if(!co_old) return ret; - - struct section *co_new = appconfig_section_find(root, section_new); - if(!co_new) co_new = appconfig_section_create(root, section_new); - - config_section_wrlock(co_old); - if(co_old != co_new) - config_section_wrlock(co_new); - - cv_old = appconfig_option_index_find(co_old, name_old, 0); - if(!cv_old) goto cleanup; - - cv_new = appconfig_option_index_find(co_new, name_new, 0); - if(cv_new) goto cleanup; - - if(unlikely(appconfig_option_index_del(co_old, cv_old) != cv_old)) - netdata_log_error("INTERNAL ERROR: deletion of config '%s' from section '%s', deleted the wrong config entry.", cv_old->name, co_old->name); - - if(co_old->values == cv_old) { - co_old->values = cv_old->next; - } - else { - struct config_option *t; - for(t = co_old->values; t && t->next != cv_old ;t = t->next) ; - if(!t || t->next != cv_old) - netdata_log_error("INTERNAL ERROR: cannot find variable '%s' in section '%s' of the config - but it should be there.", cv_old->name, co_old->name); - else - t->next = cv_old->next; - } - - freez(cv_old->name); - cv_old->name = strdupz(name_new); - cv_old->hash = simple_hash(cv_old->name); - - cv_new = cv_old; - cv_new->next = co_new->values; - co_new->values = cv_new; - - if(unlikely(appconfig_option_index_add(co_new, cv_old) != cv_old)) - netdata_log_error("INTERNAL ERROR: re-indexing of config '%s' in section '%s', already exists.", cv_old->name, co_new->name); - - ret = 0; - -cleanup: - if(co_old != co_new) - config_section_unlock(co_new); - config_section_unlock(co_old); - return ret; -} - -char *appconfig_get_by_section(struct section *co, const char *name, const char *default_value) -{ - struct config_option *cv; - - // Only calls internal to this file check for a NULL result and they do not supply a NULL arg. - // External caller should treat NULL as an error case. - cv = appconfig_option_index_find(co, name, 0); - if (!cv) { - if (!default_value) return NULL; - cv = appconfig_value_create(co, name, default_value); - if (!cv) return NULL; - } - cv->flags |= CONFIG_VALUE_USED; - - if((cv->flags & CONFIG_VALUE_LOADED) || (cv->flags & CONFIG_VALUE_CHANGED)) { - // this is a loaded value from the config file - // if it is different than the default, mark it - if(!(cv->flags & CONFIG_VALUE_CHECKED)) { - if(default_value && strcmp(cv->value, default_value) != 0) cv->flags |= CONFIG_VALUE_CHANGED; - cv->flags |= CONFIG_VALUE_CHECKED; - } - } - - return(cv->value); -} - - -char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value) -{ - if (default_value == NULL) - netdata_log_debug(D_CONFIG, "request to get config in section '%s', name '%s' or fail", section, name); - else - netdata_log_debug(D_CONFIG, "request to get config in section '%s', name '%s', default_value '%s'", section, name, default_value); - - struct section *co = appconfig_section_find(root, section); - if (!co && !default_value) - return NULL; - if(!co) co = appconfig_section_create(root, section); - - return appconfig_get_by_section(co, name, default_value); -} +bool stream_conf_needs_dbengine(struct config *root) { + struct config_section *sect; + bool ret = false; -long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value) -{ - char buffer[100], *s; - sprintf(buffer, "%lld", value); + APPCONFIG_LOCK(root); + for(sect = root->sections; sect; sect = sect->next) { + if(string_strcmp(sect->name, "stream") == 0) + continue; // the first section is not relevant - s = appconfig_get(root, section, name, buffer); - if(!s) return value; - - return strtoll(s, NULL, 0); -} - -NETDATA_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value) -{ - char buffer[100], *s; - sprintf(buffer, "%0.5" NETDATA_DOUBLE_MODIFIER, value); - - s = appconfig_get(root, section, name, buffer); - if(!s) return value; - - return str2ndd(s, NULL); -} - -inline int appconfig_test_boolean_value(char *s) { - if(!strcasecmp(s, "yes") || !strcasecmp(s, "true") || !strcasecmp(s, "on") - || !strcasecmp(s, "auto") || !strcasecmp(s, "on demand")) - return 1; - - return 0; -} - -int appconfig_get_boolean_by_section(struct section *co, const char *name, int value) { - char *s; - - s = appconfig_get_by_section(co, name, (!value)?"no":"yes"); - if(!s) return value; - - return appconfig_test_boolean_value(s); -} - -int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value) -{ - char *s; - if(value) s = "yes"; - else s = "no"; - - s = appconfig_get(root, section, name, s); - if(!s) return value; - - return appconfig_test_boolean_value(s); -} - -int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value) -{ - char *s; - - if(value == CONFIG_BOOLEAN_AUTO) - s = "auto"; - - else if(value == CONFIG_BOOLEAN_NO) - s = "no"; - - else - s = "yes"; - - s = appconfig_get(root, section, name, s); - if(!s) return value; - - if(!strcmp(s, "yes") || !strcmp(s, "true") || !strcmp(s, "on")) - return CONFIG_BOOLEAN_YES; - else if(!strcmp(s, "no") || !strcmp(s, "false") || !strcmp(s, "off")) - return CONFIG_BOOLEAN_NO; - else if(!strcmp(s, "auto") || !strcmp(s, "on demand")) - return CONFIG_BOOLEAN_AUTO; - - return value; -} - -const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value) -{ - struct config_option *cv; - - netdata_log_debug(D_CONFIG, "request to set default config in section '%s', name '%s', value '%s'", section, name, value); - - struct section *co = appconfig_section_find(root, section); - if(!co) return appconfig_set(root, section, name, value); - - cv = appconfig_option_index_find(co, name, 0); - if(!cv) return appconfig_set(root, section, name, value); - - cv->flags |= CONFIG_VALUE_USED; - - if(cv->flags & CONFIG_VALUE_LOADED) - return cv->value; - - if(strcmp(cv->value, value) != 0) { - cv->flags |= CONFIG_VALUE_CHANGED; - - freez(cv->value); - cv->value = strdupz(value); - } - - return cv->value; -} - -const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value) -{ - struct config_option *cv; - - netdata_log_debug(D_CONFIG, "request to set config in section '%s', name '%s', value '%s'", section, name, value); - - struct section *co = appconfig_section_find(root, section); - if(!co) co = appconfig_section_create(root, section); - - cv = appconfig_option_index_find(co, name, 0); - if(!cv) cv = appconfig_value_create(co, name, value); - cv->flags |= CONFIG_VALUE_USED; - - if(strcmp(cv->value, value) != 0) { - cv->flags |= CONFIG_VALUE_CHANGED; - - freez(cv->value); - cv->value = strdupz(value); - } - - return value; -} - -long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value) -{ - char buffer[100]; - sprintf(buffer, "%lld", value); - - appconfig_set(root, section, name, buffer); - - return value; -} - -NETDATA_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value) -{ - char buffer[100]; - sprintf(buffer, "%0.5" NETDATA_DOUBLE_MODIFIER, value); - - appconfig_set(root, section, name, buffer); - - return value; -} - -int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value) -{ - char *s; - if(value) s = "yes"; - else s = "no"; - - appconfig_set(root, section, name, s); - - return value; -} - -int appconfig_get_duration(struct config *root, const char *section, const char *name, const char *value) -{ - int result = 0; - const char *s; - - s = appconfig_get(root, section, name, value); - if(!s) goto fallback; - - if(!config_parse_duration(s, &result)) { - netdata_log_error("config option '[%s].%s = %s' is configured with an valid duration", section, name, s); - goto fallback; - } - - return result; - - fallback: - if(!config_parse_duration(value, &result)) - netdata_log_error("INTERNAL ERROR: default duration supplied for option '[%s].%s = %s' is not a valid duration", section, name, value); - - return result; -} - -// ---------------------------------------------------------------------------- -// config load/save - -int appconfig_load(struct config *root, char *filename, int overwrite_used, const char *section_name) -{ - int line = 0; - struct section *co = NULL; - int is_exporter_config = 0; - int _connectors = 0; // number of exporting connector sections we have - char working_instance[CONFIG_MAX_NAME + 1]; - char working_connector[CONFIG_MAX_NAME + 1]; - struct section *working_connector_section = NULL; - int global_exporting_section = 0; - - char buffer[CONFIG_FILE_LINE_MAX + 1], *s; - - if(!filename) filename = CONFIG_DIR "/" CONFIG_FILENAME; - - netdata_log_debug(D_CONFIG, "CONFIG: opening config file '%s'", filename); - - FILE *fp = fopen(filename, "r"); - if(!fp) { - if(errno != ENOENT) - netdata_log_info("CONFIG: cannot open file '%s'. Using internal defaults.", filename); - - return 0; - } - - uint32_t section_hash = 0; - if(section_name) { - section_hash = simple_hash(section_name); - } - is_exporter_config = (strstr(filename, EXPORTING_CONF) != NULL); - - while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) { - buffer[CONFIG_FILE_LINE_MAX] = '\0'; - line++; - - s = trim(buffer); - if(!s || *s == '#') { - netdata_log_debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', it is empty.", line, filename); - continue; - } - - int len = (int) strlen(s); - if(*s == '[' && s[len - 1] == ']') { - // new section - s[len - 1] = '\0'; - s++; - - if (is_exporter_config) { - global_exporting_section = - !(strcmp(s, CONFIG_SECTION_EXPORTING)) || !(strcmp(s, CONFIG_SECTION_PROMETHEUS)); - if (unlikely(!global_exporting_section)) { - int rc; - rc = is_valid_connector(s, 0); - if (likely(rc)) { - strncpyz(working_connector, s, CONFIG_MAX_NAME); - s = s + rc + 1; - if (unlikely(!(*s))) { - _connectors++; - sprintf(buffer, "instance_%d", _connectors); - s = buffer; - } - strncpyz(working_instance, s, CONFIG_MAX_NAME); - working_connector_section = NULL; - if (unlikely(appconfig_section_find(root, working_instance))) { - netdata_log_error("Instance (%s) already exists", working_instance); - co = NULL; - continue; - } - } else { - co = NULL; - netdata_log_error("Section (%s) does not specify a valid connector", s); - continue; - } - } - } - - co = appconfig_section_find(root, s); - if(!co) co = appconfig_section_create(root, s); - - if(co && section_name && overwrite_used && section_hash == co->hash && !strcmp(section_name, co->name)) { - config_section_wrlock(co); - struct config_option *cv2 = co->values; - while (cv2) { - struct config_option *save = cv2->next; - struct config_option *found = appconfig_option_index_del(co, cv2); - if(found != cv2) - netdata_log_error("INTERNAL ERROR: Cannot remove '%s' from section '%s', it was not inserted before.", - cv2->name, co->name); - - freez(cv2->name); - freez(cv2->value); - freez(cv2); - cv2 = save; - } - co->values = NULL; - config_section_unlock(co); - } - - continue; - } - - if(!co) { - // line outside a section - netdata_log_error("CONFIG: ignoring line %d ('%s') of file '%s', it is outside all sections.", line, s, filename); - continue; - } - - if(section_name && overwrite_used && section_hash != co->hash && strcmp(section_name, co->name)) { - continue; - } - - char *name = s; - char *value = strchr(s, '='); - if(!value) { - netdata_log_error("CONFIG: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename); - continue; - } - *value = '\0'; - value++; - - name = trim(name); - value = trim(value); - - if(!name || *name == '#') { - netdata_log_error("CONFIG: ignoring line %d of file '%s', name is empty.", line, filename); + struct config_option *opt = appconfig_get_raw_value_of_option_in_section(sect, "enabled", NULL, CONFIG_VALUE_TYPE_UNKNOWN, NULL); + if(!opt || !appconfig_test_boolean_value(string2str(opt->value))) continue; - } - - if(!value) value = ""; - - struct config_option *cv = appconfig_option_index_find(co, name, 0); - - if (!cv) { - cv = appconfig_value_create(co, name, value); - if (likely(is_exporter_config) && unlikely(!global_exporting_section)) { - if (unlikely(!working_connector_section)) { - working_connector_section = appconfig_section_find(root, working_connector); - if (!working_connector_section) - working_connector_section = appconfig_section_create(root, working_connector); - if (likely(working_connector_section)) { - add_connector_instance(working_connector_section, co); - } - } - } - } else { - if (((cv->flags & CONFIG_VALUE_USED) && overwrite_used) || !(cv->flags & CONFIG_VALUE_USED)) { - netdata_log_debug( - D_CONFIG, "CONFIG: line %d of file '%s', overwriting '%s/%s'.", line, filename, co->name, cv->name); - freez(cv->value); - cv->value = strdupz(value); - } else - netdata_log_debug( - D_CONFIG, - "CONFIG: ignoring line %d of file '%s', '%s/%s' is already present and used.", - line, - filename, - co->name, - cv->name); - } - cv->flags |= CONFIG_VALUE_LOADED; - } - - fclose(fp); - return 1; -} - -void appconfig_generate(struct config *root, BUFFER *wb, int only_changed, bool netdata_conf) -{ - int i, pri; - struct section *co; - struct config_option *cv; - - { - int found_host_labels = 0; - for (co = root->first_section; co; co = co->next) - if(!strcmp(co->name, CONFIG_SECTION_HOST_LABEL)) - found_host_labels = 1; - - if(netdata_conf && !found_host_labels) { - appconfig_section_create(root, CONFIG_SECTION_HOST_LABEL); - appconfig_get(root, CONFIG_SECTION_HOST_LABEL, "name", "value"); + opt = appconfig_get_raw_value_of_option_in_section(sect, "db", NULL, CONFIG_VALUE_TYPE_UNKNOWN, NULL); + if(opt && string_strcmp(opt->value, "dbengine") == 0) { + ret = true; + break; } } + APPCONFIG_UNLOCK(root); - if(netdata_conf) { - buffer_strcat(wb, - "# netdata configuration\n" - "#\n" - "# You can download the latest version of this file, using:\n" - "#\n" - "# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n" - "# or\n" - "# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n" - "#\n" - "# You can uncomment and change any of the options below.\n" - "# The value shown in the commented settings, is the default value.\n" - "#\n" - "\n# global netdata configuration\n"); - } - - for(i = 0; i <= 17 ;i++) { - appconfig_wrlock(root); - for(co = root->first_section; co ; co = co->next) { - if(!strcmp(co->name, CONFIG_SECTION_GLOBAL)) pri = 0; - else if(!strcmp(co->name, CONFIG_SECTION_DB)) pri = 1; - else if(!strcmp(co->name, CONFIG_SECTION_DIRECTORIES)) pri = 2; - else if(!strcmp(co->name, CONFIG_SECTION_LOGS)) pri = 3; - else if(!strcmp(co->name, CONFIG_SECTION_ENV_VARS)) pri = 4; - else if(!strcmp(co->name, CONFIG_SECTION_HOST_LABEL)) pri = 5; - else if(!strcmp(co->name, CONFIG_SECTION_SQLITE)) pri = 6; - else if(!strcmp(co->name, CONFIG_SECTION_CLOUD)) pri = 7; - else if(!strcmp(co->name, CONFIG_SECTION_ML)) pri = 8; - else if(!strcmp(co->name, CONFIG_SECTION_HEALTH)) pri = 9; - else if(!strcmp(co->name, CONFIG_SECTION_WEB)) pri = 10; - else if(!strcmp(co->name, CONFIG_SECTION_WEBRTC)) pri = 11; - // by default, new sections will get pri = 12 (set at the end, below) - else if(!strcmp(co->name, CONFIG_SECTION_REGISTRY)) pri = 13; - else if(!strcmp(co->name, CONFIG_SECTION_GLOBAL_STATISTICS)) pri = 14; - else if(!strcmp(co->name, CONFIG_SECTION_PLUGINS)) pri = 15; - else if(!strcmp(co->name, CONFIG_SECTION_STATSD)) pri = 16; - else if(!strncmp(co->name, "plugin:", 7)) pri = 17; // << change the loop too if you change this - else pri = 12; // this is used for any new (currently unknown) sections - - if(i == pri) { - int loaded = 0; - int used = 0; - int changed = 0; - int count = 0; - - config_section_wrlock(co); - for(cv = co->values; cv ; cv = cv->next) { - used += (cv->flags & CONFIG_VALUE_USED)?1:0; - loaded += (cv->flags & CONFIG_VALUE_LOADED)?1:0; - changed += (cv->flags & CONFIG_VALUE_CHANGED)?1:0; - count++; - } - config_section_unlock(co); - - if(!count) continue; - if(only_changed && !changed && !loaded) continue; - - if(!used) { - buffer_sprintf(wb, "\n# section '%s' is not used.", co->name); - } - - buffer_sprintf(wb, "\n[%s]\n", co->name); - - config_section_wrlock(co); - for(cv = co->values; cv ; cv = cv->next) { - - if(used && !(cv->flags & CONFIG_VALUE_USED)) { - buffer_sprintf(wb, "\n\t# option '%s' is not used.\n", cv->name); - } - buffer_sprintf(wb, "\t%s%s = %s\n", - ( - !(cv->flags & CONFIG_VALUE_LOADED) && - !(cv->flags & CONFIG_VALUE_CHANGED) && - (cv->flags & CONFIG_VALUE_USED) - )?"# ":"", cv->name, cv->value); - } - config_section_unlock(co); - } - } - appconfig_unlock(root); - } + return ret; } -/** - * Parse Duration - * - * Parse the string setting the result - * - * @param string the timestamp string - * @param result the output variable - * - * @return It returns 1 on success and 0 otherwise - */ -int config_parse_duration(const char* string, int* result) { - while(*string && isspace((uint8_t)*string)) string++; - - if(unlikely(!*string)) goto fallback; - - if(*string == 'n' && !strcmp(string, "never")) { - // this is a valid option - *result = 0; - return 1; - } +bool stream_conf_has_uuid_section(struct config *root) { + struct config_section *sect = NULL; + bool is_parent = false; - // make sure it is a number - if(!(isdigit((uint8_t)*string) || *string == '+' || *string == '-')) goto fallback; + APPCONFIG_LOCK(root); + for (sect = root->sections; sect; sect = sect->next) { + nd_uuid_t uuid; - char *e = NULL; - NETDATA_DOUBLE n = str2ndd(string, &e); - if(e && *e) { - switch (*e) { - case 'Y': - *result = (int) (n * 31536000); - break; - case 'M': - *result = (int) (n * 2592000); - break; - case 'w': - *result = (int) (n * 604800); - break; - case 'd': - *result = (int) (n * 86400); - break; - case 'h': - *result = (int) (n * 3600); - break; - case 'm': - *result = (int) (n * 60); - break; - case 's': - default: - *result = (int) (n); - break; + if (uuid_parse(string2str(sect->name), uuid) != -1 && + appconfig_get_boolean_by_section(sect, "enabled", 0)) { + is_parent = true; + break; } } - else - *result = (int)(n); - - return 1; - - fallback: - *result = 0; - return 0; -} + APPCONFIG_UNLOCK(root); -struct section *appconfig_get_section(struct config *root, const char *name) -{ - return appconfig_section_find(root, name); + return is_parent; } diff --git a/src/libnetdata/config/appconfig.h b/src/libnetdata/config/appconfig.h index bdb6c4bd5afae6..f1551b387c148f 100644 --- a/src/libnetdata/config/appconfig.h +++ b/src/libnetdata/config/appconfig.h @@ -103,7 +103,6 @@ #define CONFIG_SECTION_GLOBAL_STATISTICS "global statistics" #define CONFIG_SECTION_DB "db" - // these are used to limit the configuration names and values lengths // they are not enforced by config.c functions (they will strdup() all strings, no matter of their length) #define CONFIG_MAX_NAME 1024 @@ -113,94 +112,43 @@ // Config definitions #define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2) -#define CONFIG_VALUE_LOADED 0x01 // has been loaded from the config -#define CONFIG_VALUE_USED 0x02 // has been accessed from the program -#define CONFIG_VALUE_CHANGED 0x04 // has been changed from the loaded value or the internal default value -#define CONFIG_VALUE_CHECKED 0x08 // has been checked if the value is different from the default - -struct config_option { - avl_t avl_node; // the index entry of this entry - this has to be first! - - uint8_t flags; - uint32_t hash; // a simple hash to speed up searching - // we first compare hashes, and only if the hashes are equal we do string comparisons - - char *name; - char *value; - - struct config_option *next; // config->mutex protects just this -}; - -struct section { - avl_t avl_node; // the index entry of this section - this has to be first! - - uint32_t hash; // a simple hash to speed up searching - // we first compare hashes, and only if the hashes are equal we do string comparisons - - char *name; - - struct section *next; // global config_mutex protects just this - - struct config_option *values; - avl_tree_lock values_index; - - netdata_mutex_t mutex; // this locks only the writers, to ensure atomic updates - // readers are protected using the rwlock in avl_tree_lock -}; +struct config_section; struct config { - struct section *first_section; - struct section *last_section; // optimize inserting at the end - netdata_mutex_t mutex; + struct config_section *sections; + SPINLOCK spinlock; avl_tree_lock index; }; -#define CONFIG_BOOLEAN_INVALID 100 // an invalid value to check for validity (used as default initialization when needed) +#define APPCONFIG_INITIALIZER (struct config) { \ + .sections = NULL, \ + .spinlock = NETDATA_SPINLOCK_INITIALIZER, \ + .index = { \ + .avl_tree = { \ + .root = NULL, \ + .compar = appconfig_section_compare, \ + }, \ + .rwlock = AVL_LOCK_INITIALIZER, \ + }, \ + } -#define CONFIG_BOOLEAN_NO 0 // disabled -#define CONFIG_BOOLEAN_YES 1 // enabled +int appconfig_load(struct config *root, char *filename, int overwrite_used, const char *section_name); -#ifndef CONFIG_BOOLEAN_AUTO -#define CONFIG_BOOLEAN_AUTO 2 // enabled if it has useful info when enabled -#endif +typedef bool (*appconfig_foreach_value_cb_t)(void *data, const char *name, const char *value); +size_t appconfig_foreach_value_in_section(struct config *root, const char *section, appconfig_foreach_value_cb_t cb, void *data); -int appconfig_load(struct config *root, char *filename, int overwrite_used, const char *section_name); -void config_section_wrlock(struct section *co); -void config_section_unlock(struct section *co); - -char *appconfig_get_by_section(struct section *co, const char *name, const char *default_value); -char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value); -long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value); -NETDATA_DOUBLE appconfig_get_float(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value); -int appconfig_get_boolean_by_section(struct section *co, const char *name, int value); -int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value); -int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value); -int appconfig_get_duration(struct config *root, const char *section, const char *name, const char *value); - -const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value); -const char *appconfig_set_default(struct config *root, const char *section, const char *name, const char *value); -long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value); -NETDATA_DOUBLE appconfig_set_float(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value); -int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value); +// sets a raw value, only if it is not loaded from the config +void appconfig_set_default_raw_value(struct config *root, const char *section, const char *name, const char *value); int appconfig_exists(struct config *root, const char *section, const char *name); int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new); +int appconfig_move_everywhere(struct config *root, const char *name_old, const char *name_new); void appconfig_generate(struct config *root, BUFFER *wb, int only_changed, bool netdata_conf); int appconfig_section_compare(void *a, void *b); -void appconfig_section_destroy_non_loaded(struct config *root, const char *section); -void appconfig_section_option_destroy_non_loaded(struct config *root, const char *section, const char *name); - -int config_parse_duration(const char* string, int* result); - -struct section *appconfig_get_section(struct config *root, const char *name); - -void appconfig_wrlock(struct config *root); -void appconfig_unlock(struct config *root); - -int appconfig_test_boolean_value(char *s); +bool appconfig_test_boolean_value(const char *s); struct connector_instance { char instance_name[CONFIG_MAX_NAME + 1]; @@ -208,13 +156,37 @@ struct connector_instance { }; typedef struct _connector_instance { - struct section *connector; // actual connector - struct section *instance; // This instance + struct config_section *connector; // actual connector + struct config_section *instance; // This instance char instance_name[CONFIG_MAX_NAME + 1]; char connector_name[CONFIG_MAX_NAME + 1]; struct _connector_instance *next; // Next instance } _CONNECTOR_INSTANCE; -_CONNECTOR_INSTANCE *add_connector_instance(struct section *connector, struct section *instance); +_CONNECTOR_INSTANCE *add_connector_instance(struct config_section *connector, struct config_section *instance); + +// ---------------------------------------------------------------------------- +// shortcuts for the default netdata configuration + +#define config_load(filename, overwrite_used, section) appconfig_load(&netdata_config, filename, overwrite_used, section) + +#define config_set_default_raw_value(section, name, value) appconfig_set_default_raw_value(&netdata_config, section, name, value) + +#define config_exists(section, name) appconfig_exists(&netdata_config, section, name) +#define config_move(section_old, name_old, section_new, name_new) appconfig_move(&netdata_config, section_old, name_old, section_new, name_new) + +#define netdata_conf_generate(buffer, only_changed) appconfig_generate(&netdata_config, buffer, only_changed, true) + +#define config_section_destroy(section) appconfig_section_destroy_non_loaded(&netdata_config, section) +#define config_section_option_destroy(section, name) appconfig_section_option_destroy_non_loaded(&netdata_config, section, name) + +bool stream_conf_needs_dbengine(struct config *root); +bool stream_conf_has_uuid_section(struct config *root); + +#include "appconfig_api_text.h" +#include "appconfig_api_numbers.h" +#include "appconfig_api_boolean.h" +#include "appconfig_api_sizes.h" +#include "appconfig_api_durations.h" -#endif /* NETDATA_CONFIG_H */ \ No newline at end of file +#endif // NETDATA_CONFIG_H diff --git a/src/libnetdata/config/appconfig_api_boolean.c b/src/libnetdata/config/appconfig_api_boolean.c new file mode 100644 index 00000000000000..abe5157366f282 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_boolean.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" +#include "appconfig_api_boolean.h" + +bool appconfig_test_boolean_value(const char *s) { + if(!strcasecmp(s, "yes") || !strcasecmp(s, "true") || !strcasecmp(s, "on") + || !strcasecmp(s, "auto") || !strcasecmp(s, "on demand")) + return true; + + return false; +} + +int appconfig_get_boolean_by_section(struct config_section *sect, const char *name, int value) { + struct config_option *opt = appconfig_get_raw_value_of_option_in_section( + sect, name, (!value) ? "no" : "yes", CONFIG_VALUE_TYPE_BOOLEAN, NULL); + if(!opt) return value; + + return appconfig_test_boolean_value(string2str(opt->value)); +} + +int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value) { + const char *s; + if(value) s = "yes"; + else s = "no"; + + struct config_option *opt = appconfig_get_raw_value(root, section, name, s, CONFIG_VALUE_TYPE_BOOLEAN, NULL); + if(!opt) return value; + s = string2str(opt->value); + + return appconfig_test_boolean_value(s); +} + +int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value) { + const char *s; + + if(value == CONFIG_BOOLEAN_AUTO) + s = "auto"; + + else if(value == CONFIG_BOOLEAN_NO) + s = "no"; + + else + s = "yes"; + + struct config_option *opt = appconfig_get_raw_value(root, section, name, s, CONFIG_VALUE_TYPE_BOOLEAN_ONDEMAND, NULL); + if(!opt) return value; + + s = string2str(opt->value); + if(!strcmp(s, "yes") || !strcmp(s, "true") || !strcmp(s, "on")) + return CONFIG_BOOLEAN_YES; + else if(!strcmp(s, "no") || !strcmp(s, "false") || !strcmp(s, "off")) + return CONFIG_BOOLEAN_NO; + else if(!strcmp(s, "auto") || !strcmp(s, "on demand")) + return CONFIG_BOOLEAN_AUTO; + + return value; +} + +int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value) { + const char *s; + if(value) s = "yes"; + else s = "no"; + + appconfig_set_raw_value(root, section, name, s, CONFIG_VALUE_TYPE_BOOLEAN); + + return value; +} diff --git a/src/libnetdata/config/appconfig_api_boolean.h b/src/libnetdata/config/appconfig_api_boolean.h new file mode 100644 index 00000000000000..2b05fce6030b02 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_boolean.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_APPCONFIG_API_BOOLEAN_H +#define NETDATA_APPCONFIG_API_BOOLEAN_H + +#define CONFIG_BOOLEAN_INVALID 100 // an invalid value to check for validity (used as default initialization when needed) + +#define CONFIG_BOOLEAN_NO 0 // disabled +#define CONFIG_BOOLEAN_YES 1 // enabled + +#ifndef CONFIG_BOOLEAN_AUTO +#define CONFIG_BOOLEAN_AUTO 2 // enabled if it has useful info when enabled +#endif + +int appconfig_get_boolean(struct config *root, const char *section, const char *name, int value); +#define config_get_boolean(section, name, value) appconfig_get_boolean(&netdata_config, section, name, value) + +int appconfig_get_boolean_ondemand(struct config *root, const char *section, const char *name, int value); +#define config_get_boolean_ondemand(section, name, value) appconfig_get_boolean_ondemand(&netdata_config, section, name, value) + +int appconfig_set_boolean(struct config *root, const char *section, const char *name, int value); +#define config_set_boolean(section, name, value) appconfig_set_boolean(&netdata_config, section, name, value) + +#endif //NETDATA_APPCONFIG_API_BOOLEAN_H diff --git a/src/libnetdata/config/appconfig_api_durations.c b/src/libnetdata/config/appconfig_api_durations.c new file mode 100644 index 00000000000000..88c462ac64a905 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_durations.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" +#include "appconfig_api_durations.h" + + +static STRING *reformat_duration_seconds(STRING *value) { + int result = 0; + if(!duration_parse_seconds(string2str(value), &result)) + return value; + + char buf[128]; + if(duration_snprintf_time_t(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) { + string_freez(value); + return string_strdupz(buf); + } + + return value; +} + +time_t appconfig_get_duration_seconds(struct config *root, const char *section, const char *name, time_t default_value) { + char default_str[128]; + duration_snprintf_time_t(default_str, sizeof(default_str), default_value); + + struct config_option *opt = appconfig_get_raw_value( + root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_SECS, reformat_duration_seconds); + if(!opt) + return default_value; + + const char *s = string2str(opt->value); + + int result = 0; + if(!duration_parse_seconds(s, &result)) { + appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_SECS); + netdata_log_error("config option '[%s].%s = %s' is configured with an invalid duration", section, name, s); + return default_value; + } + + return ABS(result); +} + +time_t appconfig_set_duration_seconds(struct config *root, const char *section, const char *name, time_t value) { + char str[128]; + duration_snprintf_time_t(str, sizeof(str), value); + + appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_DURATION_IN_SECS); + return value; +} + +static STRING *reformat_duration_ms(STRING *value) { + int64_t result = 0; + if(!duration_parse_msec_t(string2str(value), &result)) + return value; + + char buf[128]; + if(duration_snprintf_msec_t(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) { + string_freez(value); + return string_strdupz(buf); + } + + return value; +} + +msec_t appconfig_get_duration_ms(struct config *root, const char *section, const char *name, msec_t default_value) { + char default_str[128]; + duration_snprintf_msec_t(default_str, sizeof(default_str), default_value); + + struct config_option *opt = appconfig_get_raw_value( + root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_MS, reformat_duration_ms); + if(!opt) + return default_value; + + const char *s = string2str(opt->value); + + smsec_t result = 0; + if(!duration_parse_msec_t(s, &result)) { + appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_MS); + netdata_log_error("config option '[%s].%s = %s' is configured with an invalid duration", section, name, s); + return default_value; + } + + return ABS(result); +} + +msec_t appconfig_set_duration_ms(struct config *root, const char *section, const char *name, msec_t value) { + char str[128]; + duration_snprintf_msec_t(str, sizeof(str), (smsec_t)value); + + appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_DURATION_IN_MS); + return value; +} + +static STRING *reformat_duration_days(STRING *value) { + int64_t result = 0; + if(!duration_parse_days(string2str(value), &result)) + return value; + + char buf[128]; + if(duration_snprintf_days(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) { + string_freez(value); + return string_strdupz(buf); + } + + return value; +} + +unsigned appconfig_get_duration_days(struct config *root, const char *section, const char *name, unsigned default_value) { + char default_str[128]; + duration_snprintf_days(default_str, sizeof(default_str), (int)default_value); + + struct config_option *opt = appconfig_get_raw_value( + root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_DAYS, reformat_duration_days); + if(!opt) + return default_value; + + const char *s = string2str(opt->value); + + int64_t result = 0; + if(!duration_parse_days(s, &result)) { + appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_DURATION_IN_DAYS); + netdata_log_error("config option '[%s].%s = %s' is configured with an invalid duration", section, name, s); + return default_value; + } + + return (unsigned)ABS(result); +} + +unsigned appconfig_set_duration_days(struct config *root, const char *section, const char *name, unsigned value) { + char str[128]; + duration_snprintf_days(str, sizeof(str), value); + appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_DURATION_IN_DAYS); + return value; +} + diff --git a/src/libnetdata/config/appconfig_api_durations.h b/src/libnetdata/config/appconfig_api_durations.h new file mode 100644 index 00000000000000..26d6c6ba307861 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_durations.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_APPCONFIG_API_DURATIONS_H +#define NETDATA_APPCONFIG_API_DURATIONS_H + +msec_t appconfig_get_duration_ms(struct config *root, const char *section, const char *name, msec_t default_value); +msec_t appconfig_set_duration_ms(struct config *root, const char *section, const char *name, msec_t value); +#define config_get_duration_ms(section, name, value) appconfig_get_duration_ms(&netdata_config, section, name, value) +#define config_set_duration_ms(section, name, value) appconfig_set_duration_ms(&netdata_config, section, name, value) + +time_t appconfig_get_duration_seconds(struct config *root, const char *section, const char *name, time_t default_value); +time_t appconfig_set_duration_seconds(struct config *root, const char *section, const char *name, time_t value); +#define config_get_duration_seconds(section, name, value) appconfig_get_duration_seconds(&netdata_config, section, name, value) +#define config_set_duration_seconds(section, name, value) appconfig_set_duration_seconds(&netdata_config, section, name, value) + +unsigned appconfig_get_duration_days(struct config *root, const char *section, const char *name, unsigned default_value); +unsigned appconfig_set_duration_days(struct config *root, const char *section, const char *name, unsigned value); +#define config_get_duration_days(section, name, value) appconfig_get_duration_days(&netdata_config, section, name, value) +#define config_set_duration_days(section, name, value) appconfig_set_duration_days(&netdata_config, section, name, value) + +#endif //NETDATA_APPCONFIG_API_DURATIONS_H diff --git a/src/libnetdata/config/appconfig_api_numbers.c b/src/libnetdata/config/appconfig_api_numbers.c new file mode 100644 index 00000000000000..cc3776c1865306 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_numbers.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" +#include "appconfig_api_numbers.h" + +long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value) { + char buffer[100]; + sprintf(buffer, "%lld", value); + + struct config_option *opt = appconfig_get_raw_value(root, section, name, buffer, CONFIG_VALUE_TYPE_INTEGER, NULL); + if(!opt) return value; + + const char *s = string2str(opt->value); + return strtoll(s, NULL, 0); +} + +NETDATA_DOUBLE appconfig_get_double(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value) { + char buffer[100]; + sprintf(buffer, "%0.5" NETDATA_DOUBLE_MODIFIER, value); + + struct config_option *opt = appconfig_get_raw_value(root, section, name, buffer, CONFIG_VALUE_TYPE_DOUBLE, NULL); + if(!opt) return value; + + const char *s = string2str(opt->value); + return str2ndd(s, NULL); +} + +long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value) { + char buffer[100]; + sprintf(buffer, "%lld", value); + + appconfig_set_raw_value(root, section, name, buffer, CONFIG_VALUE_TYPE_INTEGER); + return value; +} + +NETDATA_DOUBLE appconfig_set_double(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value) { + char buffer[100]; + sprintf(buffer, "%0.5" NETDATA_DOUBLE_MODIFIER, value); + + appconfig_set_raw_value(root, section, name, buffer, CONFIG_VALUE_TYPE_DOUBLE); + return value; +} + diff --git a/src/libnetdata/config/appconfig_api_numbers.h b/src/libnetdata/config/appconfig_api_numbers.h new file mode 100644 index 00000000000000..58d382e3d055f5 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_numbers.h @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_APPCONFIG_API_NUMBERS_H +#define NETDATA_APPCONFIG_API_NUMBERS_H + +long long appconfig_get_number(struct config *root, const char *section, const char *name, long long value); +long long appconfig_set_number(struct config *root, const char *section, const char *name, long long value); +#define config_get_number(section, name, value) appconfig_get_number(&netdata_config, section, name, value) +#define config_set_number(section, name, value) appconfig_set_number(&netdata_config, section, name, value) + +NETDATA_DOUBLE appconfig_get_double(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value); +NETDATA_DOUBLE appconfig_set_double(struct config *root, const char *section, const char *name, NETDATA_DOUBLE value); +#define config_get_double(section, name, value) appconfig_get_double(&netdata_config, section, name, value) +#define config_set_double(section, name, value) appconfig_set_float(&netdata_config, section, name, value) + +#endif //NETDATA_APPCONFIG_API_NUMBERS_H diff --git a/src/libnetdata/config/appconfig_api_sizes.c b/src/libnetdata/config/appconfig_api_sizes.c new file mode 100644 index 00000000000000..67b1dce9ef131a --- /dev/null +++ b/src/libnetdata/config/appconfig_api_sizes.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" +#include "appconfig_api_sizes.h" + +static STRING *reformat_size_bytes(STRING *value) { + uint64_t result = 0; + if(!size_parse_bytes(string2str(value), &result)) + return value; + + char buf[128]; + if(size_snprintf_bytes(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) { + string_freez(value); + return string_strdupz(buf); + } + + return value; +} + +uint64_t appconfig_get_size_bytes(struct config *root, const char *section, const char *name, uint64_t default_value) { + char default_str[128]; + size_snprintf_bytes(default_str, sizeof(default_str), (int)default_value); + + struct config_option *opt = + appconfig_get_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_SIZE_IN_BYTES, reformat_size_bytes); + if(!opt) + return default_value; + + const char *s = string2str(opt->value); + uint64_t result = 0; + if(!size_parse_bytes(s, &result)) { + appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_SIZE_IN_BYTES); + netdata_log_error("config option '[%s].%s = %s' is configured with an invalid size", section, name, s); + return default_value; + } + + return result; +} + +uint64_t appconfig_set_size_bytes(struct config *root, const char *section, const char *name, uint64_t value) { + char str[128]; + size_snprintf_bytes(str, sizeof(str), value); + appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_SIZE_IN_BYTES); + return value; +} + +static STRING *reformat_size_mb(STRING *value) { + uint64_t result = 0; + if(!size_parse_mb(string2str(value), &result)) + return value; + + char buf[128]; + if(size_snprintf_mb(buf, sizeof(buf), result) > 0 && string_strcmp(value, buf) != 0) { + string_freez(value); + return string_strdupz(buf); + } + + return value; +} + +uint64_t appconfig_get_size_mb(struct config *root, const char *section, const char *name, uint64_t default_value) { + char default_str[128]; + size_snprintf_mb(default_str, sizeof(default_str), (int)default_value); + + struct config_option *opt = + appconfig_get_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_SIZE_IN_MB, reformat_size_mb); + if(!opt) + return default_value; + + const char *s = string2str(opt->value); + uint64_t result = 0; + if(!size_parse_mb(s, &result)) { + appconfig_set_raw_value(root, section, name, default_str, CONFIG_VALUE_TYPE_SIZE_IN_MB); + netdata_log_error("config option '[%s].%s = %s' is configured with an invalid size", section, name, s); + return default_value; + } + + return (unsigned)result; +} + +uint64_t appconfig_set_size_mb(struct config *root, const char *section, const char *name, uint64_t value) { + char str[128]; + size_snprintf_mb(str, sizeof(str), value); + appconfig_set_raw_value(root, section, name, str, CONFIG_VALUE_TYPE_SIZE_IN_MB); + return value; +} diff --git a/src/libnetdata/config/appconfig_api_sizes.h b/src/libnetdata/config/appconfig_api_sizes.h new file mode 100644 index 00000000000000..98ef209fed4afc --- /dev/null +++ b/src/libnetdata/config/appconfig_api_sizes.h @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_APPCONFIG_API_SIZES_H +#define NETDATA_APPCONFIG_API_SIZES_H + +uint64_t appconfig_get_size_bytes(struct config *root, const char *section, const char *name, uint64_t default_value); +uint64_t appconfig_set_size_bytes(struct config *root, const char *section, const char *name, uint64_t value); +#define config_get_size_bytes(section, name, value) appconfig_get_size_bytes(&netdata_config, section, name, value) +#define config_set_size_bytes(section, name, value) appconfig_set_size_bytes(&netdata_config, section, name, value) + +uint64_t appconfig_get_size_mb(struct config *root, const char *section, const char *name, uint64_t default_value); +uint64_t appconfig_set_size_mb(struct config *root, const char *section, const char *name, uint64_t value); +#define config_get_size_mb(section, name, value) appconfig_get_size_mb(&netdata_config, section, name, value) +#define config_set_size_mb(section, name, value) appconfig_set_size_mb(&netdata_config, section, name, value) + +#endif //NETDATA_APPCONFIG_API_SIZES_H diff --git a/src/libnetdata/config/appconfig_api_text.c b/src/libnetdata/config/appconfig_api_text.c new file mode 100644 index 00000000000000..b314972f0f265b --- /dev/null +++ b/src/libnetdata/config/appconfig_api_text.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" +#include "appconfig_api_text.h" + +const char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value) { + struct config_option *opt = appconfig_get_raw_value(root, section, name, default_value, CONFIG_VALUE_TYPE_TEXT, NULL); + if(!opt) + return default_value; + + return string2str(opt->value); +} + +const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value) { + struct config_option *opt = appconfig_set_raw_value(root, section, name, value, CONFIG_VALUE_TYPE_TEXT); + return string2str(opt->value); +} diff --git a/src/libnetdata/config/appconfig_api_text.h b/src/libnetdata/config/appconfig_api_text.h new file mode 100644 index 00000000000000..7e1e85f7e9dff4 --- /dev/null +++ b/src/libnetdata/config/appconfig_api_text.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_APPCONFIG_API_TEXT_H +#define NETDATA_APPCONFIG_API_TEXT_H + +const char *appconfig_get(struct config *root, const char *section, const char *name, const char *default_value); +const char *appconfig_set(struct config *root, const char *section, const char *name, const char *value); +#define config_get(section, name, default_value) appconfig_get(&netdata_config, section, name, default_value) +#define config_set(section, name, default_value) appconfig_set(&netdata_config, section, name, default_value) + + +#endif //NETDATA_APPCONFIG_API_TEXT_H diff --git a/src/libnetdata/config/appconfig_cleanup.c b/src/libnetdata/config/appconfig_cleanup.c new file mode 100644 index 00000000000000..22f4ac3e9f8aa2 --- /dev/null +++ b/src/libnetdata/config/appconfig_cleanup.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +void appconfig_section_destroy_non_loaded(struct config *root, const char *section) +{ + struct config_section *sect; + struct config_option *opt; + + netdata_log_debug(D_CONFIG, "Destroying section '%s'.", section); + + sect = appconfig_section_find(root, section); + if(!sect) { + netdata_log_error("Could not destroy section '%s'. Not found.", section); + return; + } + + SECTION_LOCK(sect); + + // find if there is any loaded option + for(opt = sect->values; opt; opt = opt->next) { + if (opt->flags & CONFIG_VALUE_LOADED) { + // do not destroy values that were loaded from the configuration files. + SECTION_UNLOCK(sect); + return; + } + } + + // no option is loaded, free them all + appconfig_section_remove_and_delete(root, sect, false, true); +} + +void appconfig_section_option_destroy_non_loaded(struct config *root, const char *section, const char *name) { + struct config_section *sect; + sect = appconfig_section_find(root, section); + if (!sect) { + netdata_log_error("Could not destroy section option '%s -> %s'. The section not found.", section, name); + return; + } + + SECTION_LOCK(sect); + + struct config_option *opt = appconfig_option_find(sect, name); + if (opt && opt->flags & CONFIG_VALUE_LOADED) { + SECTION_UNLOCK(sect); + return; + } + + if (unlikely(!(opt && appconfig_option_del(sect, opt)))) { + SECTION_UNLOCK(sect); + netdata_log_error("Could not destroy section option '%s -> %s'. The option not found.", section, name); + return; + } + + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(sect->values, opt, prev, next); + + appconfig_option_free(opt); + SECTION_UNLOCK(sect); +} + diff --git a/src/libnetdata/config/appconfig_conf_file.c b/src/libnetdata/config/appconfig_conf_file.c new file mode 100644 index 00000000000000..4ac8b376e4c530 --- /dev/null +++ b/src/libnetdata/config/appconfig_conf_file.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +ENUM_STR_MAP_DEFINE(CONFIG_VALUE_TYPES) = { + { .id = CONFIG_VALUE_TYPE_UNKNOWN, .name ="unknown", }, + { .id = CONFIG_VALUE_TYPE_TEXT, .name ="text", }, + { .id = CONFIG_VALUE_TYPE_HOSTNAME, .name ="hostname", }, + { .id = CONFIG_VALUE_TYPE_USERNAME, .name ="username", }, + { .id = CONFIG_VALUE_TYPE_FILENAME, .name ="filename", }, + { .id = CONFIG_VALUE_TYPE_PATH, .name ="path", }, + { .id = CONFIG_VALUE_TYPE_SIMPLE_PATTERN, .name ="simple pattern", }, + { .id = CONFIG_VALUE_TYPE_URL, .name ="URL", }, + { .id = CONFIG_VALUE_TYPE_ENUM, .name ="one of keywords", }, + { .id = CONFIG_VALUE_TYPE_BITMAP, .name ="any of keywords", }, + { .id = CONFIG_VALUE_TYPE_INTEGER, .name ="number (integer)", }, + { .id = CONFIG_VALUE_TYPE_DOUBLE, .name ="number (double)", }, + { .id = CONFIG_VALUE_TYPE_BOOLEAN, .name ="yes or no", }, + { .id = CONFIG_VALUE_TYPE_BOOLEAN_ONDEMAND, .name ="yes, no, or auto", }, + { .id = CONFIG_VALUE_TYPE_DURATION_IN_SECS, .name ="duration (seconds)", }, + { .id = CONFIG_VALUE_TYPE_DURATION_IN_MS, .name ="duration (ms)", }, + { .id = CONFIG_VALUE_TYPE_DURATION_IN_DAYS, .name ="duration (days)", }, + { .id = CONFIG_VALUE_TYPE_SIZE_IN_BYTES, .name ="size (bytes)", }, + { .id = CONFIG_VALUE_TYPE_SIZE_IN_MB, .name ="size (MiB)", }, +}; + +ENUM_STR_DEFINE_FUNCTIONS(CONFIG_VALUE_TYPES, CONFIG_VALUE_TYPE_UNKNOWN, "unknown"); + + +// ---------------------------------------------------------------------------- +// config load/save + +int appconfig_load(struct config *root, char *filename, int overwrite_used, const char *section_name) { + int line = 0; + struct config_section *sect = NULL; + int is_exporter_config = 0; + int _connectors = 0; // number of exporting connector sections we have + char working_instance[CONFIG_MAX_NAME + 1]; + char working_connector[CONFIG_MAX_NAME + 1]; + struct config_section *working_connector_section = NULL; + int global_exporting_section = 0; + + char buffer[CONFIG_FILE_LINE_MAX + 1], *s; + + if(!filename) filename = CONFIG_DIR "/" CONFIG_FILENAME; + + netdata_log_debug(D_CONFIG, "CONFIG: opening config file '%s'", filename); + + FILE *fp = fopen(filename, "r"); + if(!fp) { + if(errno != ENOENT) + netdata_log_info("CONFIG: cannot open file '%s'. Using internal defaults.", filename); + + return 0; + } + + CLEAN_STRING *section_string = string_strdupz(section_name); + is_exporter_config = (strstr(filename, EXPORTING_CONF) != NULL); + + while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) { + buffer[CONFIG_FILE_LINE_MAX] = '\0'; + line++; + + s = trim(buffer); + if(!s || *s == '#') { + netdata_log_debug(D_CONFIG, "CONFIG: ignoring line %d of file '%s', it is empty.", line, filename); + continue; + } + + int len = (int) strlen(s); + if(*s == '[' && s[len - 1] == ']') { + // new section + s[len - 1] = '\0'; + s++; + + if (is_exporter_config) { + global_exporting_section = !(strcmp(s, CONFIG_SECTION_EXPORTING)) || !(strcmp(s, CONFIG_SECTION_PROMETHEUS)); + + if (unlikely(!global_exporting_section)) { + int rc; + rc = is_valid_connector(s, 0); + if (likely(rc)) { + strncpyz(working_connector, s, CONFIG_MAX_NAME); + s = s + rc + 1; + if (unlikely(!(*s))) { + _connectors++; + sprintf(buffer, "instance_%d", _connectors); + s = buffer; + } + strncpyz(working_instance, s, CONFIG_MAX_NAME); + working_connector_section = NULL; + if (unlikely(appconfig_section_find(root, working_instance))) { + netdata_log_error("Instance (%s) already exists", working_instance); + sect = NULL; + continue; + } + } + else { + sect = NULL; + netdata_log_error("Section (%s) does not specify a valid connector", s); + continue; + } + } + } + + sect = appconfig_section_find(root, s); + if(!sect) + sect = appconfig_section_create(root, s); + + if(sect && section_string && overwrite_used && section_string == sect->name) { + SECTION_LOCK(sect); + + while(sect->values) + appconfig_option_remove_and_delete(sect, sect->values, true); + + SECTION_UNLOCK(sect); + } + + continue; + } + + if(!sect) { + // line outside a section + netdata_log_error("CONFIG: ignoring line %d ('%s') of file '%s', it is outside all sections.", line, s, filename); + continue; + } + + if(section_string && overwrite_used && section_string != sect->name) + continue; + + char *name = s; + char *value = strchr(s, '='); + if(!value) { + netdata_log_error("CONFIG: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename); + continue; + } + *value = '\0'; + value++; + + name = trim(name); + value = trim(value); + + if(!name || *name == '#') { + netdata_log_error("CONFIG: ignoring line %d of file '%s', name is empty.", line, filename); + continue; + } + + if(!value) value = ""; + + struct config_option *opt = appconfig_option_find(sect, name); + + if (!opt) { + opt = appconfig_option_create(sect, name, value); + if (likely(is_exporter_config) && unlikely(!global_exporting_section)) { + if (unlikely(!working_connector_section)) { + working_connector_section = appconfig_section_find(root, working_connector); + if (!working_connector_section) + working_connector_section = appconfig_section_create(root, working_connector); + if (likely(working_connector_section)) { + add_connector_instance(working_connector_section, sect); + } + } + } + } + else { + if (((opt->flags & CONFIG_VALUE_USED) && overwrite_used) || !(opt->flags & CONFIG_VALUE_USED)) { + string_freez(opt->value); + opt->value = string_strdupz(value); + } + } + opt->flags |= CONFIG_VALUE_LOADED; + } + + fclose(fp); + + return 1; +} + +void appconfig_generate(struct config *root, BUFFER *wb, int only_changed, bool netdata_conf) +{ + int i, pri; + struct config_section *sect; + struct config_option *opt; + + { + int found_host_labels = 0; + for (sect = root->sections; sect; sect = sect->next) + if(!string_strcmp(sect->name, CONFIG_SECTION_HOST_LABEL)) + found_host_labels = 1; + + if(netdata_conf && !found_host_labels) { + appconfig_section_create(root, CONFIG_SECTION_HOST_LABEL); + appconfig_get_raw_value(root, CONFIG_SECTION_HOST_LABEL, "name", "value", CONFIG_VALUE_TYPE_TEXT, NULL); + } + } + + if(netdata_conf) { + buffer_strcat(wb, + "# netdata configuration\n" + "#\n" + "# You can download the latest version of this file, using:\n" + "#\n" + "# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n" + "# or\n" + "# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf\n" + "#\n" + "# You can uncomment and change any of the options below.\n" + "# The value shown in the commented settings, is the default value.\n" + "#\n" + "\n# global netdata configuration\n"); + } + + for(i = 0; i <= 17 ;i++) { + APPCONFIG_LOCK(root); + for(sect = root->sections; sect; sect = sect->next) { + if(!string_strcmp(sect->name, CONFIG_SECTION_GLOBAL)) pri = 0; + else if(!string_strcmp(sect->name, CONFIG_SECTION_DB)) pri = 1; + else if(!string_strcmp(sect->name, CONFIG_SECTION_DIRECTORIES)) pri = 2; + else if(!string_strcmp(sect->name, CONFIG_SECTION_LOGS)) pri = 3; + else if(!string_strcmp(sect->name, CONFIG_SECTION_ENV_VARS)) pri = 4; + else if(!string_strcmp(sect->name, CONFIG_SECTION_HOST_LABEL)) pri = 5; + else if(!string_strcmp(sect->name, CONFIG_SECTION_SQLITE)) pri = 6; + else if(!string_strcmp(sect->name, CONFIG_SECTION_CLOUD)) pri = 7; + else if(!string_strcmp(sect->name, CONFIG_SECTION_ML)) pri = 8; + else if(!string_strcmp(sect->name, CONFIG_SECTION_HEALTH)) pri = 9; + else if(!string_strcmp(sect->name, CONFIG_SECTION_WEB)) pri = 10; + else if(!string_strcmp(sect->name, CONFIG_SECTION_WEBRTC)) pri = 11; + // by default, new sections will get pri = 12 (set at the end, below) + else if(!string_strcmp(sect->name, CONFIG_SECTION_REGISTRY)) pri = 13; + else if(!string_strcmp(sect->name, CONFIG_SECTION_GLOBAL_STATISTICS)) pri = 14; + else if(!string_strcmp(sect->name, CONFIG_SECTION_PLUGINS)) pri = 15; + else if(!string_strcmp(sect->name, CONFIG_SECTION_STATSD)) pri = 16; + else if(!string_strncmp(sect->name, "plugin:", 7)) pri = 17; // << change the loop too if you change this + else pri = 12; // this is used for any new (currently unknown) sections + + if(i == pri) { + int loaded = 0; + int used = 0; + int changed = 0; + int count = 0; + + SECTION_LOCK(sect); + for(opt = sect->values; opt; opt = opt->next) { + used += (opt->flags & CONFIG_VALUE_USED)?1:0; + loaded += (opt->flags & CONFIG_VALUE_LOADED)?1:0; + changed += (opt->flags & CONFIG_VALUE_CHANGED)?1:0; + count++; + } + SECTION_UNLOCK(sect); + + if(!count) continue; + if(only_changed && !changed && !loaded) continue; + + if(!used) + buffer_sprintf(wb, "\n# section '%s' is not used.", string2str(sect->name)); + + buffer_sprintf(wb, "\n[%s]\n", string2str(sect->name)); + + size_t options_added = 0; + bool last_had_comments = false; + SECTION_LOCK(sect); + for(opt = sect->values; opt; opt = opt->next) { + bool unused = used && !(opt->flags & CONFIG_VALUE_USED); + bool migrated = used && (opt->flags & CONFIG_VALUE_MIGRATED); + bool reformatted = used && (opt->flags & CONFIG_VALUE_REFORMATTED); + bool show_default = used && (opt->flags & (CONFIG_VALUE_LOADED|CONFIG_VALUE_CHANGED) && opt->value_default); + + if((unused || migrated || reformatted || show_default)) { + if(options_added) + buffer_strcat(wb, "\n"); + + buffer_sprintf(wb, "\t#| >>> [%s].%s <<<\n", + string2str(sect->name), string2str(opt->name)); + + last_had_comments = true; + } + else if(last_had_comments) { + buffer_strcat(wb, "\n"); + last_had_comments = false; + } + + if(unused) + buffer_sprintf(wb, "\t#| found in the config file, but is not used\n"); + + if(migrated && reformatted) + buffer_sprintf(wb, "\t#| migrated from: [%s].%s = %s\n", + string2str(opt->migrated.section), string2str(opt->migrated.name), + string2str(opt->value_original)); + else { + if (migrated) + buffer_sprintf(wb, "\t#| migrated from: [%s].%s\n", + string2str(opt->migrated.section), string2str(opt->migrated.name)); + + if (reformatted) + buffer_sprintf(wb, "\t#| reformatted from: %s\n", + string2str(opt->value_original)); + } + + if(show_default) + buffer_sprintf(wb, "\t#| datatype: %s, default value: %s\n", + CONFIG_VALUE_TYPES_2str(opt->type), + string2str(opt->value_default)); + + buffer_sprintf(wb, "\t%s%s = %s\n", + ( + !(opt->flags & CONFIG_VALUE_LOADED) && + !(opt->flags & CONFIG_VALUE_CHANGED) && + (opt->flags & CONFIG_VALUE_USED) + ) ? "# " : "", + string2str(opt->name), + string2str(opt->value)); + + options_added++; + } + SECTION_UNLOCK(sect); + } + } + APPCONFIG_UNLOCK(root); + } +} diff --git a/src/libnetdata/config/appconfig_exporters.c b/src/libnetdata/config/appconfig_exporters.c new file mode 100644 index 00000000000000..1fafb298cda9cf --- /dev/null +++ b/src/libnetdata/config/appconfig_exporters.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +/* + * @Input: + * Connector / instance to add to an internal structure + * @Return + * The current head of the linked list of connector_instance + * + */ + +_CONNECTOR_INSTANCE *add_connector_instance(struct config_section *connector, struct config_section *instance) +{ + static struct _connector_instance *global_connector_instance = NULL; + struct _connector_instance *local_ci, *local_ci_tmp; + + if (unlikely(!connector)) { + if (unlikely(!instance)) + return global_connector_instance; + + local_ci = global_connector_instance; + while (local_ci) { + local_ci_tmp = local_ci->next; + freez(local_ci); + local_ci = local_ci_tmp; + } + global_connector_instance = NULL; + return NULL; + } + + local_ci = callocz(1, sizeof(struct _connector_instance)); + local_ci->instance = instance; + local_ci->connector = connector; + strncpyz(local_ci->instance_name, string2str(instance->name), CONFIG_MAX_NAME); + strncpyz(local_ci->connector_name, string2str(connector->name), CONFIG_MAX_NAME); + local_ci->next = global_connector_instance; + global_connector_instance = local_ci; + + return global_connector_instance; +} + +int is_valid_connector(char *type, int check_reserved) { + int rc = 1; + + if (unlikely(!type)) + return 0; + + if (!check_reserved) { + if (unlikely(is_valid_connector(type,1))) { + return 0; + } + //if (unlikely(*type == ':') + // return 0; + char *separator = strrchr(type, ':'); + if (likely(separator)) { + *separator = '\0'; + rc = (int)(separator - type); + } else + return 0; + } + // else { + // if (unlikely(is_valid_connector(type,1))) { + // netdata_log_error("Section %s invalid -- reserved name", type); + // return 0; + // } + // } + + if (!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) { + return rc; + } else if (!strcmp(type, "graphite:http") || !strcmp(type, "graphite:https")) { + return rc; + } else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) { + return rc; + } else if (!strcmp(type, "json:http") || !strcmp(type, "json:https")) { + return rc; + } else if (!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) { + return rc; + } else if (!strcmp(type, "opentsdb:http") || !strcmp(type, "opentsdb:https")) { + return rc; + } else if (!strcmp(type, "prometheus_remote_write")) { + return rc; + } else if (!strcmp(type, "prometheus_remote_write:http") || !strcmp(type, "prometheus_remote_write:https")) { + return rc; + } else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) { + return rc; + } else if (!strcmp(type, "pubsub") || !strcmp(type, "pubsub:plaintext")) { + return rc; + } else if (!strcmp(type, "mongodb") || !strcmp(type, "mongodb:plaintext")) { + return rc; + } + + return 0; +} + diff --git a/src/libnetdata/config/appconfig_internals.h b/src/libnetdata/config/appconfig_internals.h new file mode 100644 index 00000000000000..492e8ce5c7e704 --- /dev/null +++ b/src/libnetdata/config/appconfig_internals.h @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_APPCONFIG_INTERNALS_H +#define NETDATA_APPCONFIG_INTERNALS_H + +#include "appconfig.h" + +typedef enum __attribute__((packed)) { + CONFIG_VALUE_TYPE_UNKNOWN = 0, + CONFIG_VALUE_TYPE_TEXT, + CONFIG_VALUE_TYPE_HOSTNAME, + CONFIG_VALUE_TYPE_USERNAME, + CONFIG_VALUE_TYPE_FILENAME, + CONFIG_VALUE_TYPE_PATH, + CONFIG_VALUE_TYPE_SIMPLE_PATTERN, + CONFIG_VALUE_TYPE_URL, + CONFIG_VALUE_TYPE_ENUM, + CONFIG_VALUE_TYPE_BITMAP, + CONFIG_VALUE_TYPE_INTEGER, + CONFIG_VALUE_TYPE_DOUBLE, + CONFIG_VALUE_TYPE_BOOLEAN, + CONFIG_VALUE_TYPE_BOOLEAN_ONDEMAND, + CONFIG_VALUE_TYPE_DURATION_IN_SECS, + CONFIG_VALUE_TYPE_DURATION_IN_MS, + CONFIG_VALUE_TYPE_DURATION_IN_DAYS, + CONFIG_VALUE_TYPE_SIZE_IN_BYTES, + CONFIG_VALUE_TYPE_SIZE_IN_MB, +} CONFIG_VALUE_TYPES; + +typedef enum __attribute__((packed)) { + CONFIG_VALUE_LOADED = (1 << 0), // has been loaded from the config + CONFIG_VALUE_USED = (1 << 1), // has been accessed from the program + CONFIG_VALUE_CHANGED = (1 << 2), // has been changed from the loaded value or the internal default value + CONFIG_VALUE_CHECKED = (1 << 3), // has been checked if the value is different from the default + CONFIG_VALUE_MIGRATED = (1 << 4), // has been migrated from an old config + CONFIG_VALUE_REFORMATTED = (1 << 5), // has been reformatted with the official formatting +} CONFIG_VALUE_FLAGS; + +struct config_option { + avl_t avl_node; // the index entry of this entry - this has to be first! + + CONFIG_VALUE_TYPES type; + CONFIG_VALUE_FLAGS flags; + + STRING *name; + STRING *value; + + STRING *value_original; // the original value of this option (the first value it got, independently on how it got it) + STRING *value_default; // the internal default value of this option (the first value it got, from appconfig_get_XXX()) + + // when we move options around, this is where we keep the original + // section and name (of the first migration) + struct { + STRING *section; + STRING *name; + } migrated; + + struct config_option *prev, *next; // config->mutex protects just this +}; + +struct config_section { + avl_t avl_node; // the index entry of this section - this has to be first! + + STRING *name; + + struct config_option *values; + avl_tree_lock values_index; + + SPINLOCK spinlock; + struct config_section *prev, *next; // global config_mutex protects just this +}; + +// ---------------------------------------------------------------------------- +// locking + +#define APPCONFIG_LOCK(root) spinlock_lock(&((root)->spinlock)) +#define APPCONFIG_UNLOCK(root) spinlock_unlock(&((root)->spinlock)) +#define SECTION_LOCK(sect) spinlock_lock(&((sect)->spinlock)) +#define SECTION_UNLOCK(sect) spinlock_unlock(&((sect)->spinlock)); + +// config sections +void appconfig_section_free(struct config_section *sect); +void appconfig_section_remove_and_delete(struct config *root, struct config_section *sect, bool have_root_lock, bool have_sect_lock); +#define appconfig_section_add(root, cfg) (struct config_section *)avl_insert_lock(&(root)->index, (avl_t *)(cfg)) +#define appconfig_section_del(root, cfg) (struct config_section *)avl_remove_lock(&(root)->index, (avl_t *)(cfg)) +struct config_section *appconfig_section_find(struct config *root, const char *name); +struct config_section *appconfig_section_create(struct config *root, const char *section); + +// config options +void appconfig_option_cleanup(struct config_option *opt); +void appconfig_option_free(struct config_option *opt); +void appconfig_option_remove_and_delete(struct config_section *sect, struct config_option *opt, bool have_sect_lock); +void appconfig_option_remove_and_delete_all(struct config_section *sect, bool have_sect_lock); +int appconfig_option_compare(void *a, void *b); +#define appconfig_option_add(co, cv) (struct config_option *)avl_insert_lock(&((co)->values_index), (avl_t *)(cv)) +#define appconfig_option_del(co, cv) (struct config_option *)avl_remove_lock(&((co)->values_index), (avl_t *)(cv)) +struct config_option *appconfig_option_find(struct config_section *sect, const char *name); +struct config_option *appconfig_option_create(struct config_section *sect, const char *name, const char *value); + +// lookup +int appconfig_get_boolean_by_section(struct config_section *sect, const char *name, int value); + +typedef STRING *(*reformat_t)(STRING *value); +struct config_option *appconfig_get_raw_value_of_option_in_section(struct config_section *sect, const char *option, const char *default_value, CONFIG_VALUE_TYPES type, reformat_t cb); +struct config_option *appconfig_get_raw_value(struct config *root, const char *section, const char *option, const char *default_value, CONFIG_VALUE_TYPES type, reformat_t cb); + +void appconfig_set_raw_value_of_option(struct config_option *opt, const char *value, CONFIG_VALUE_TYPES type); +struct config_option *appconfig_set_raw_value_of_option_in_section(struct config_section *sect, const char *option, const char *value, CONFIG_VALUE_TYPES type); +struct config_option *appconfig_set_raw_value(struct config *root, const char *section, const char *option, const char *value, CONFIG_VALUE_TYPES type); + +// cleanup +void appconfig_section_destroy_non_loaded(struct config *root, const char *section); +void appconfig_section_option_destroy_non_loaded(struct config *root, const char *section, const char *name); + +// exporters +_CONNECTOR_INSTANCE *add_connector_instance(struct config_section *connector, struct config_section *instance); +int is_valid_connector(char *type, int check_reserved); + +#endif //NETDATA_APPCONFIG_INTERNALS_H diff --git a/src/libnetdata/config/appconfig_migrate.c b/src/libnetdata/config/appconfig_migrate.c new file mode 100644 index 00000000000000..0c21ec06ce3d16 --- /dev/null +++ b/src/libnetdata/config/appconfig_migrate.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +int appconfig_move(struct config *root, const char *section_old, const char *name_old, const char *section_new, const char *name_new) { + struct config_option *opt_old, *opt_new; + int ret = -1; + + netdata_log_debug(D_CONFIG, "request to rename config in section '%s', old name '%s', to section '%s', new name '%s'", section_old, name_old, section_new, name_new); + + struct config_section *sect_old = appconfig_section_find(root, section_old); + if(!sect_old) return ret; + + struct config_section *sect_new = appconfig_section_find(root, section_new); + if(!sect_new) sect_new = appconfig_section_create(root, section_new); + + SECTION_LOCK(sect_old); + if(sect_old != sect_new) + SECTION_LOCK(sect_new); + + opt_old = appconfig_option_find(sect_old, name_old); + if(!opt_old) goto cleanup; + + opt_new = appconfig_option_find(sect_new, name_new); + if(opt_new) goto cleanup; + + if(unlikely(appconfig_option_del(sect_old, opt_old) != opt_old)) + netdata_log_error("INTERNAL ERROR: deletion of config '%s' from section '%s', deleted the wrong config entry.", + string2str(opt_old->name), string2str(sect_old->name)); + + // remember the old position of the item + struct config_option *opt_old_next = (sect_old == sect_new) ? opt_old->next : NULL; + + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(sect_old->values, opt_old, prev, next); + + nd_log(NDLS_DAEMON, NDLP_WARNING, + "CONFIG: option '[%s].%s' has been migrated to '[%s].%s'.", + section_old, name_old, + section_new, name_new); + + if(!opt_old->migrated.name) { + string_freez(opt_old->migrated.section); + opt_old->migrated.section = string_dup(sect_old->name); + opt_old->migrated.name = opt_old->name; + } + else + string_freez(opt_old->name); + + opt_old->name = string_strdupz(name_new); + opt_old->flags |= CONFIG_VALUE_MIGRATED; + + opt_new = opt_old; + + // put in the list, but try to keep the order + if(opt_old_next && sect_old == sect_new) + DOUBLE_LINKED_LIST_INSERT_ITEM_BEFORE_UNSAFE(sect_new->values, opt_old_next, opt_new, prev, next); + else { + // we don't have the old next item (probably a different section?) + // find the last MIGRATED one + struct config_option *t = sect_new->values ? sect_new->values->prev : NULL; + for (; t && t != sect_new->values ; t = t->prev) { + if (t->flags & CONFIG_VALUE_MIGRATED) + break; + } + if (t == sect_new->values) + DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(sect_new->values, opt_new, prev, next); + else + DOUBLE_LINKED_LIST_INSERT_ITEM_AFTER_UNSAFE(sect_new->values, t, opt_new, prev, next); + } + + if(unlikely(appconfig_option_add(sect_new, opt_old) != opt_old)) + netdata_log_error("INTERNAL ERROR: re-indexing of config '%s' in section '%s', already exists.", + string2str(opt_old->name), string2str(sect_new->name)); + + ret = 0; + +cleanup: + if(sect_old != sect_new) + SECTION_UNLOCK(sect_new); + SECTION_UNLOCK(sect_old); + return ret; +} + +int appconfig_move_everywhere(struct config *root, const char *name_old, const char *name_new) { + int ret = -1; + APPCONFIG_LOCK(root); + struct config_section *sect; + for(sect = root->sections; sect; sect = sect->next) { + if(appconfig_move(root, string2str(sect->name), name_old, string2str(sect->name), name_new) == 0) + ret = 0; + } + APPCONFIG_UNLOCK(root); + return ret; +} + diff --git a/src/libnetdata/config/appconfig_options.c b/src/libnetdata/config/appconfig_options.c new file mode 100644 index 00000000000000..f619d08a607181 --- /dev/null +++ b/src/libnetdata/config/appconfig_options.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +// ---------------------------------------------------------------------------- +// config options index + +int appconfig_option_compare(void *a, void *b) { + if(((struct config_option *)a)->name < ((struct config_option *)b)->name) return -1; + else if(((struct config_option *)a)->name > ((struct config_option *)b)->name) return 1; + else return string_cmp(((struct config_option *)a)->name, ((struct config_option *)b)->name); +} + +struct config_option *appconfig_option_find(struct config_section *sect, const char *name) { + struct config_option opt_tmp = { + .name = string_strdupz(name), + }; + + struct config_option *rc = (struct config_option *)avl_search_lock(&(sect->values_index), (avl_t *) &opt_tmp); + + appconfig_option_cleanup(&opt_tmp); + return rc; +} + +// ---------------------------------------------------------------------------- +// config options methods + +void appconfig_option_cleanup(struct config_option *opt) { + string_freez(opt->value); + string_freez(opt->name); + string_freez(opt->migrated.section); + string_freez(opt->migrated.name); + string_freez(opt->value_original); + string_freez(opt->value_default); + + opt->value = NULL; + opt->name = NULL; + opt->migrated.section = NULL; + opt->migrated.name = NULL; + opt->value_original = NULL; + opt->value_default = NULL; +} + +void appconfig_option_free(struct config_option *opt) { + appconfig_option_cleanup(opt); + freez(opt); +} + +struct config_option *appconfig_option_create(struct config_section *sect, const char *name, const char *value) { + struct config_option *opt = callocz(1, sizeof(struct config_option)); + opt->name = string_strdupz(name); + opt->value = string_strdupz(value); + opt->value_original = string_dup(opt->value); + + struct config_option *opt_found = appconfig_option_add(sect, opt); + if(opt_found != opt) { + nd_log(NDLS_DAEMON, NDLP_INFO, + "CONFIG: config '%s' in section '%s': already exists - using the existing one.", + string2str(opt->name), string2str(sect->name)); + appconfig_option_free(opt); + return opt_found; + } + + SECTION_LOCK(sect); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(sect->values, opt, prev, next); + SECTION_UNLOCK(sect); + + return opt; +} + +void appconfig_option_remove_and_delete(struct config_section *sect, struct config_option *opt, bool have_sect_lock) { + struct config_option *opt_found = appconfig_option_del(sect, opt); + if(opt_found != opt) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "INTERNAL ERROR: Cannot remove '%s' from section '%s', it was not inserted before.", + string2str(opt->name), string2str(sect->name)); + return; + } + + if(!have_sect_lock) + SECTION_LOCK(sect); + + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(sect->values, opt, prev, next); + + if(!have_sect_lock) + SECTION_UNLOCK(sect); + + appconfig_option_free(opt); +} + +void appconfig_option_remove_and_delete_all(struct config_section *sect, bool have_sect_lock) { + if(!have_sect_lock) + SECTION_LOCK(sect); + + while(sect->values) + appconfig_option_remove_and_delete(sect, sect->values, true); + + if(!have_sect_lock) + SECTION_UNLOCK(sect); +} + +void appconfig_get_raw_value_of_option(struct config_option *opt, const char *default_value, CONFIG_VALUE_TYPES type, reformat_t cb) { + opt->flags |= CONFIG_VALUE_USED; + + if(type != CONFIG_VALUE_TYPE_UNKNOWN) + opt->type = type; + + if((opt->flags & CONFIG_VALUE_LOADED) || (opt->flags & CONFIG_VALUE_CHANGED)) { + // this is a loaded value from the config file + // if it is different from the default, mark it + if(!(opt->flags & CONFIG_VALUE_CHECKED)) { + if(!(opt->flags & CONFIG_VALUE_REFORMATTED) && cb) { + STRING *value_old = opt->value; + opt->value = cb(opt->value); + if(opt->value != value_old) + opt->flags |= CONFIG_VALUE_REFORMATTED; + } + + if(default_value && string_strcmp(opt->value, default_value) != 0) + opt->flags |= CONFIG_VALUE_CHANGED; + + opt->flags |= CONFIG_VALUE_CHECKED; + } + } + + if(!opt->value_default) + opt->value_default = string_strdupz(default_value); +} + +struct config_option *appconfig_get_raw_value_of_option_in_section(struct config_section *sect, const char *option, const char *default_value, CONFIG_VALUE_TYPES type, reformat_t cb) { + // Only calls internal to this file check for a NULL result, and they do not supply a NULL arg. + // External caller should treat NULL as an error case. + struct config_option *opt = appconfig_option_find(sect, option); + if (!opt) { + if (!default_value) return NULL; + opt = appconfig_option_create(sect, option, default_value); + if (!opt) return NULL; + } + + appconfig_get_raw_value_of_option(opt, default_value, type, cb); + return opt; +} + +struct config_option *appconfig_get_raw_value(struct config *root, const char *section, const char *option, const char *default_value, CONFIG_VALUE_TYPES type, reformat_t cb) { + struct config_section *sect = appconfig_section_find(root, section); + if(!sect) { + if(!default_value) return NULL; + sect = appconfig_section_create(root, section); + } + + return appconfig_get_raw_value_of_option_in_section(sect, option, default_value, type, cb); +} + +void appconfig_set_raw_value_of_option(struct config_option *opt, const char *value, CONFIG_VALUE_TYPES type) { + opt->flags |= CONFIG_VALUE_USED; + + if(opt->type == CONFIG_VALUE_TYPE_UNKNOWN) + opt->type = type; + + if(string_strcmp(opt->value, value) != 0) { + opt->flags |= CONFIG_VALUE_CHANGED; + + string_freez(opt->value); + opt->value = string_strdupz(value); + } +} + +struct config_option *appconfig_set_raw_value_of_option_in_section(struct config_section *sect, const char *option, const char *value, CONFIG_VALUE_TYPES type) { + struct config_option *opt = appconfig_option_find(sect, option); + if(!opt) + opt = appconfig_option_create(sect, option, value); + + appconfig_set_raw_value_of_option(opt, value, type); + return opt; +} + +struct config_option *appconfig_set_raw_value(struct config *root, const char *section, const char *option, const char *value, CONFIG_VALUE_TYPES type) { + struct config_section *sect = appconfig_section_find(root, section); + if(!sect) + sect = appconfig_section_create(root, section); + + return appconfig_set_raw_value_of_option_in_section(sect, option, value, type); +} diff --git a/src/libnetdata/config/appconfig_sections.c b/src/libnetdata/config/appconfig_sections.c new file mode 100644 index 00000000000000..2180803a925d4d --- /dev/null +++ b/src/libnetdata/config/appconfig_sections.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +// ---------------------------------------------------------------------------- +// config sections index + +int appconfig_section_compare(void *a, void *b) { + if(((struct config_section *)a)->name < ((struct config_section *)b)->name) return -1; + else if(((struct config_section *)a)->name > ((struct config_section *)b)->name) return 1; + else return string_cmp(((struct config_section *)a)->name, ((struct config_section *)b)->name); +} + +struct config_section *appconfig_section_find(struct config *root, const char *name) { + struct config_section sect_tmp = { + .name = string_strdupz(name), + }; + + struct config_section *rc = (struct config_section *)avl_search_lock(&root->index, (avl_t *) §_tmp); + string_freez(sect_tmp.name); + return rc; +} + +// ---------------------------------------------------------------------------- +// config section methods + +void appconfig_section_free(struct config_section *sect) { + avl_destroy_lock(§->values_index); + string_freez(sect->name); + freez(sect); +} + +void appconfig_section_remove_and_delete(struct config *root, struct config_section *sect, bool have_root_lock, bool have_sect_lock) { + struct config_section *sect_found = appconfig_section_del(root, sect); + if(sect_found != sect) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "INTERNAL ERROR: Cannot remove section '%s', it was not inserted before.", + string2str(sect->name)); + return; + } + + appconfig_option_remove_and_delete_all(sect, have_sect_lock); + + if(!have_root_lock) + APPCONFIG_LOCK(root); + + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(root->sections, sect, prev, next); + + if(!have_root_lock) + APPCONFIG_UNLOCK(root); + + // if the caller has the section lock, we will unlock it, to cleanup + if(have_sect_lock) + SECTION_UNLOCK(sect); + + appconfig_section_free(sect); +} + +struct config_section *appconfig_section_create(struct config *root, const char *section) { + struct config_section *sect = callocz(1, sizeof(struct config_section)); + sect->name = string_strdupz(section); + spinlock_init(§->spinlock); + + avl_init_lock(§->values_index, appconfig_option_compare); + + struct config_section *sect_found = appconfig_section_add(root, sect); + if(sect_found != sect) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "CONFIG: section '%s', already exists, using existing.", + string2str(sect->name)); + appconfig_section_free(sect); + return sect_found; + } + + APPCONFIG_LOCK(root); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(root->sections, sect, prev, next); + APPCONFIG_UNLOCK(root); + + return sect; +} + + diff --git a/src/libnetdata/config/appconfig_traversal.c b/src/libnetdata/config/appconfig_traversal.c new file mode 100644 index 00000000000000..f26def2c236369 --- /dev/null +++ b/src/libnetdata/config/appconfig_traversal.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "appconfig_internals.h" + +size_t appconfig_foreach_value_in_section(struct config *root, const char *section, appconfig_foreach_value_cb_t cb, void *data) { + size_t used = 0; + struct config_section *co = appconfig_section_find(root, section); + if(co) { + SECTION_LOCK(co); + struct config_option *cv; + for(cv = co->values; cv ; cv = cv->next) { + if(cb(data, string2str(cv->name), string2str(cv->value))) { + cv->flags |= CONFIG_VALUE_USED; + used++; + } + } + SECTION_UNLOCK(co); + } + + return used; +} diff --git a/src/libnetdata/ebpf/ebpf.c b/src/libnetdata/ebpf/ebpf.c index 4e7c85943bcdd0..27042a794a1a64 100644 --- a/src/libnetdata/ebpf/ebpf.c +++ b/src/libnetdata/ebpf/ebpf.c @@ -1014,7 +1014,7 @@ int ebpf_load_config(struct config *config, char *filename) } -static netdata_run_mode_t ebpf_select_mode(char *mode) +static netdata_run_mode_t ebpf_select_mode(const char *mode) { if (!strcasecmp(mode,EBPF_CFG_LOAD_MODE_RETURN )) return MODE_RETURN; @@ -1041,7 +1041,7 @@ static void ebpf_select_mode_string(char *output, size_t len, netdata_run_mode_t * * @return It returns the value to be used. */ -netdata_ebpf_load_mode_t epbf_convert_string_to_load_mode(char *str) +netdata_ebpf_load_mode_t epbf_convert_string_to_load_mode(const char *str) { if (!strcasecmp(str, EBPF_CFG_CORE_PROGRAM)) return EBPF_LOAD_CORE; @@ -1094,7 +1094,7 @@ static char *ebpf_convert_collect_pid_to_string(netdata_apps_level_t level) * * @return it returns the level associated to the string or default when it is a wrong value */ -netdata_apps_level_t ebpf_convert_string_to_apps_level(char *str) +netdata_apps_level_t ebpf_convert_string_to_apps_level(const char *str) { if (!strcasecmp(str, EBPF_CFG_PID_REAL_PARENT)) return NETDATA_APPS_LEVEL_REAL_PARENT; @@ -1114,7 +1114,7 @@ netdata_apps_level_t ebpf_convert_string_to_apps_level(char *str) * @param str value read from configuration file. * @param lmode load mode used by collector. */ -netdata_ebpf_program_loaded_t ebpf_convert_core_type(char *str, netdata_run_mode_t lmode) +netdata_ebpf_program_loaded_t ebpf_convert_core_type(const char *str, netdata_run_mode_t lmode) { if (!strcasecmp(str, EBPF_CFG_ATTACH_TRACEPOINT)) return EBPF_LOAD_TRACEPOINT; @@ -1174,7 +1174,7 @@ struct btf *ebpf_parse_btf_file(const char *filename) * @param path is the fullpath * @param filename is the file inside BTF path. */ -struct btf *ebpf_load_btf_file(char *path, char *filename) +struct btf *ebpf_load_btf_file(const char *path, const char *filename) { char fullpath[PATH_MAX + 1]; snprintfz(fullpath, PATH_MAX, "%s/%s", path, filename); @@ -1299,7 +1299,7 @@ void ebpf_update_module_using_config(ebpf_module_t *modules, netdata_ebpf_load_m { char default_value[EBPF_MAX_MODE_LENGTH + 1]; ebpf_select_mode_string(default_value, EBPF_MAX_MODE_LENGTH, modules->mode); - char *load_mode = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_LOAD_MODE, default_value); + const char *load_mode = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_LOAD_MODE, default_value); modules->mode = ebpf_select_mode(load_mode); modules->update_every = (int)appconfig_get_number(modules->cfg, EBPF_GLOBAL_SECTION, @@ -1318,17 +1318,17 @@ void ebpf_update_module_using_config(ebpf_module_t *modules, netdata_ebpf_load_m EBPF_CFG_LIFETIME, EBPF_DEFAULT_LIFETIME); char *value = ebpf_convert_load_mode_to_string(modules->load & NETDATA_EBPF_LOAD_METHODS); - char *type_format = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_TYPE_FORMAT, value); + const char *type_format = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_TYPE_FORMAT, value); netdata_ebpf_load_mode_t load = epbf_convert_string_to_load_mode(type_format); load = ebpf_select_load_mode(btf_file, load, kver, is_rh); modules->load = origin | load; - char *core_attach = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_CORE_ATTACH, EBPF_CFG_ATTACH_TRAMPOLINE); + const char *core_attach = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_CORE_ATTACH, EBPF_CFG_ATTACH_TRAMPOLINE); netdata_ebpf_program_loaded_t fill_lm = ebpf_convert_core_type(core_attach, modules->mode); ebpf_update_target_with_conf(modules, fill_lm); value = ebpf_convert_collect_pid_to_string(modules->apps_level); - char *collect_pid = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_COLLECT_PID, value); + const char *collect_pid = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_COLLECT_PID, value); modules->apps_level = ebpf_convert_string_to_apps_level(collect_pid); modules->maps_per_core = appconfig_get_boolean(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_MAPS_PER_CORE, diff --git a/src/libnetdata/ebpf/ebpf.h b/src/libnetdata/ebpf/ebpf.h index 1c612ad32b8b06..66a596711456fd 100644 --- a/src/libnetdata/ebpf/ebpf.h +++ b/src/libnetdata/ebpf/ebpf.h @@ -470,13 +470,13 @@ int ebpf_disable_tracing_values(char *subsys, char *eventname); // BTF helpers #define NETDATA_EBPF_MAX_SYSCALL_LENGTH 255 -netdata_ebpf_load_mode_t epbf_convert_string_to_load_mode(char *str); -netdata_ebpf_program_loaded_t ebpf_convert_core_type(char *str, netdata_run_mode_t lmode); +netdata_ebpf_load_mode_t epbf_convert_string_to_load_mode(const char *str); +netdata_ebpf_program_loaded_t ebpf_convert_core_type(const char *str, netdata_run_mode_t lmode); void ebpf_select_host_prefix(char *output, size_t length, char *syscall, int kver); #ifdef LIBBPF_MAJOR_VERSION void ebpf_adjust_thread_load(ebpf_module_t *mod, struct btf *file); struct btf *ebpf_parse_btf_file(const char *filename); -struct btf *ebpf_load_btf_file(char *path, char *filename); +struct btf *ebpf_load_btf_file(const char *path, const char *filename); int ebpf_is_function_inside_btf(struct btf *file, char *function); void ebpf_update_map_type(struct bpf_map *map, ebpf_local_maps_t *w); void ebpf_define_map_type(ebpf_local_maps_t *maps, int maps_per_core, int kver); diff --git a/src/libnetdata/gorilla/gorilla.cc b/src/libnetdata/gorilla/gorilla.cc index c7601836528cdc..e3d6124181df13 100644 --- a/src/libnetdata/gorilla/gorilla.cc +++ b/src/libnetdata/gorilla/gorilla.cc @@ -162,10 +162,11 @@ bool gorilla_writer_write(gorilla_writer_t *gw, uint32_t number) __atomic_fetch_add(&hdr->nbits, 1, __ATOMIC_RELAXED); if (!is_xor_lzc_same) { - if (hdr->nbits + 1 >= gw->capacity) + size_t bits_needed = (bit_size() == 32) ? 5 : 6; + if ((hdr->nbits + bits_needed) >= gw->capacity) return false; - bit_buffer_write(data, hdr->nbits, xor_lzc, (bit_size() == 32) ? 5 : 6); - __atomic_fetch_add(&hdr->nbits, (bit_size() == 32) ? 5 : 6, __ATOMIC_RELAXED); + bit_buffer_write(data, hdr->nbits, xor_lzc, bits_needed); + __atomic_fetch_add(&hdr->nbits, bits_needed, __ATOMIC_RELAXED); } // write the bits of the XOR'd value without the LZC prefix diff --git a/src/libnetdata/libjudy/judy-malloc.c b/src/libnetdata/libjudy/judy-malloc.c new file mode 100644 index 00000000000000..ec736393dbd509 --- /dev/null +++ b/src/libnetdata/libjudy/judy-malloc.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "judy-malloc.h" + +#define MAX_JUDY_SIZE_TO_ARAL 24 +static bool judy_sizes_config[MAX_JUDY_SIZE_TO_ARAL + 1] = { + [3] = true, + [4] = true, + [5] = true, + [6] = true, + [7] = true, + [8] = true, + [10] = true, + [11] = true, + [15] = true, + [23] = true, +}; +static ARAL *judy_sizes_aral[MAX_JUDY_SIZE_TO_ARAL + 1] = {}; + +struct aral_statistics judy_sizes_aral_statistics = {}; + +__attribute__((constructor)) void aral_judy_init(void) { + for(size_t Words = 0; Words <= MAX_JUDY_SIZE_TO_ARAL; Words++) + if(judy_sizes_config[Words]) { + char buf[30+1]; + snprintfz(buf, sizeof(buf) - 1, "judy-%zu", Words * sizeof(Word_t)); + judy_sizes_aral[Words] = aral_create( + buf, + Words * sizeof(Word_t), + 0, + 65536, + &judy_sizes_aral_statistics, + NULL, NULL, false, false); + } +} + +size_t judy_aral_overhead(void) { + return aral_overhead_from_stats(&judy_sizes_aral_statistics); +} + +size_t judy_aral_structures(void) { + return aral_structures_from_stats(&judy_sizes_aral_statistics); +} + +static ARAL *judy_size_aral(Word_t Words) { + if(Words <= MAX_JUDY_SIZE_TO_ARAL && judy_sizes_aral[Words]) + return judy_sizes_aral[Words]; + + return NULL; +} + +inline Word_t JudyMalloc(Word_t Words) { + Word_t Addr; + + ARAL *ar = judy_size_aral(Words); + if(ar) + Addr = (Word_t) aral_mallocz(ar); + else + Addr = (Word_t) mallocz(Words * sizeof(Word_t)); + + return(Addr); +} + +inline void JudyFree(void * PWord, Word_t Words) { + ARAL *ar = judy_size_aral(Words); + if(ar) + aral_freez(ar, PWord); + else + freez(PWord); +} + +Word_t JudyMallocVirtual(Word_t Words) { + return JudyMalloc(Words); +} + +void JudyFreeVirtual(void * PWord, Word_t Words) { + JudyFree(PWord, Words); +} diff --git a/src/libnetdata/libjudy/judy-malloc.h b/src/libnetdata/libjudy/judy-malloc.h new file mode 100644 index 00000000000000..65cba982b0c4ec --- /dev/null +++ b/src/libnetdata/libjudy/judy-malloc.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_JUDY_MALLOC_H +#define NETDATA_JUDY_MALLOC_H + +#include "../libnetdata.h" + +size_t judy_aral_overhead(void); +size_t judy_aral_structures(void); + +#endif //NETDATA_JUDY_MALLOC_H diff --git a/src/libnetdata/libnetdata.c b/src/libnetdata/libnetdata.c index 52c5545fc6a9bf..17dbd85428e4f5 100644 --- a/src/libnetdata/libnetdata.c +++ b/src/libnetdata/libnetdata.c @@ -13,88 +13,13 @@ struct rlimit rlimit_nofile = { .rlim_cur = 1024, .rlim_max = 1024 }; #if defined(MADV_MERGEABLE) -int enable_ksm = 1; +int enable_ksm = CONFIG_BOOLEAN_AUTO; #else int enable_ksm = 0; #endif volatile sig_atomic_t netdata_exit = 0; -#define MAX_JUDY_SIZE_TO_ARAL 24 -static bool judy_sizes_config[MAX_JUDY_SIZE_TO_ARAL + 1] = { - [3] = true, - [4] = true, - [5] = true, - [6] = true, - [7] = true, - [8] = true, - [10] = true, - [11] = true, - [15] = true, - [23] = true, -}; -static ARAL *judy_sizes_aral[MAX_JUDY_SIZE_TO_ARAL + 1] = {}; - -struct aral_statistics judy_sizes_aral_statistics = {}; - -void aral_judy_init(void) { - for(size_t Words = 0; Words <= MAX_JUDY_SIZE_TO_ARAL; Words++) - if(judy_sizes_config[Words]) { - char buf[30+1]; - snprintfz(buf, sizeof(buf) - 1, "judy-%zu", Words * sizeof(Word_t)); - judy_sizes_aral[Words] = aral_create( - buf, - Words * sizeof(Word_t), - 0, - 65536, - &judy_sizes_aral_statistics, - NULL, NULL, false, false); - } -} - -size_t judy_aral_overhead(void) { - return aral_overhead_from_stats(&judy_sizes_aral_statistics); -} - -size_t judy_aral_structures(void) { - return aral_structures_from_stats(&judy_sizes_aral_statistics); -} - -static ARAL *judy_size_aral(Word_t Words) { - if(Words <= MAX_JUDY_SIZE_TO_ARAL && judy_sizes_aral[Words]) - return judy_sizes_aral[Words]; - - return NULL; -} - -inline Word_t JudyMalloc(Word_t Words) { - Word_t Addr; - - ARAL *ar = judy_size_aral(Words); - if(ar) - Addr = (Word_t) aral_mallocz(ar); - else - Addr = (Word_t) mallocz(Words * sizeof(Word_t)); - - return(Addr); -} - -inline void JudyFree(void * PWord, Word_t Words) { - ARAL *ar = judy_size_aral(Words); - if(ar) - aral_freez(ar, PWord); - else - freez(PWord); -} - -Word_t JudyMallocVirtual(Word_t Words) { - return JudyMalloc(Words); -} - -void JudyFreeVirtual(void * PWord, Word_t Words) { - JudyFree(PWord, Words); -} - // ---------------------------------------------------------------------------- // memory allocation functions that handle failures diff --git a/src/libnetdata/libnetdata.h b/src/libnetdata/libnetdata.h index a7418d53401aa5..0963d63df68dee 100644 --- a/src/libnetdata/libnetdata.h +++ b/src/libnetdata/libnetdata.h @@ -307,9 +307,7 @@ typedef uint32_t uid_t; #define WARNUNUSED #endif -void aral_judy_init(void); -size_t judy_aral_overhead(void); -size_t judy_aral_structures(void); +#include "libjudy/judy-malloc.h" #define ABS(x) (((x) < 0)? (-(x)) : (x)) #define MIN(a,b) (((a)<(b))?(a):(b)) @@ -435,7 +433,7 @@ void netdata_cleanup_and_exit(int ret, const char *action, const char *action_re void netdata_cleanup_and_exit(int ret, const char *action, const char *action_result, const char *action_data) NORETURN; #endif -extern char *netdata_configured_host_prefix; +extern const char *netdata_configured_host_prefix; #include "os/os.h" @@ -459,6 +457,7 @@ extern char *netdata_configured_host_prefix; #include "inlined.h" #include "line_splitter/line_splitter.h" #include "clocks/clocks.h" +#include "parsers/parsers.h" #include "datetime/iso8601.h" #include "datetime/rfc3339.h" #include "datetime/rfc7231.h" @@ -639,7 +638,6 @@ void timing_action(TIMING_ACTION action, TIMING_STEP step); int hash256_string(const unsigned char *string, size_t size, char *hash); extern bool unittest_running; -#define API_RELATIVE_TIME_MAX (3 * 365 * 86400) bool rrdr_relative_window_to_absolute(time_t *after, time_t *before, time_t now); bool rrdr_relative_window_to_absolute_query(time_t *after, time_t *before, time_t *now_ptr, bool unittest); diff --git a/src/libnetdata/log/README.md b/src/libnetdata/log/README.md index ef9ca1ef3fa45d..43a7c07f2bdcf3 100644 --- a/src/libnetdata/log/README.md +++ b/src/libnetdata/log/README.md @@ -73,7 +73,7 @@ In `netdata.conf`, there are the following settings: ``` [logs] # logs to trigger flood protection = 1000 - # logs flood protection period = 60 + # logs flood protection period = 1m # facility = daemon # level = info # daemon = journal diff --git a/src/libnetdata/log/log.c b/src/libnetdata/log/log.c index bbb0eb23e62393..f6e40512b156c5 100644 --- a/src/libnetdata/log/log.c +++ b/src/libnetdata/log/log.c @@ -580,7 +580,14 @@ void nd_log_set_user_settings(ND_LOG_SOURCES source, const char *setting) { *slash = '\0'; slash++; ls->limits.logs_per_period = ls->limits.logs_per_period_backup = str2u(value); - ls->limits.throttle_period = str2u(slash); + + int period; + if(!duration_parse_seconds(slash, &period)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "Error while parsing period '%s'", slash); + period = ND_LOG_DEFAULT_THROTTLE_PERIOD; + } + + ls->limits.throttle_period = period; } else { ls->limits.logs_per_period = ls->limits.logs_per_period_backup = str2u(value); @@ -589,8 +596,9 @@ void nd_log_set_user_settings(ND_LOG_SOURCES source, const char *setting) { } } else - nd_log(NDLS_DAEMON, NDLP_ERR, "Error while parsing configuration of log source '%s'. " - "In config '%s', '%s' is not understood.", + nd_log(NDLS_DAEMON, NDLP_ERR, + "Error while parsing configuration of log source '%s'. " + "In config '%s', '%s' is not understood.", nd_log_id2source(source), setting, name); } } diff --git a/src/libnetdata/parsers/duration.c b/src/libnetdata/parsers/duration.c new file mode 100644 index 00000000000000..a826318f78b086 --- /dev/null +++ b/src/libnetdata/parsers/duration.c @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "duration.h" + +#ifdef NSEC_PER_USEC +#undef NSEC_PER_USEC +#endif +#define NSEC_PER_USEC (1000ULL) + +#ifdef USEC_PER_MS +#undef USEC_PER_MS +#endif +#define USEC_PER_MS (1000ULL) + +#ifdef NSEC_PER_SEC +#undef NSEC_PER_SEC +#endif +#define NSEC_PER_SEC (1000000000ULL) + +#define NSEC_PER_MS (USEC_PER_MS * NSEC_PER_USEC) +#define NSEC_PER_MIN (NSEC_PER_SEC * 60ULL) +#define NSEC_PER_HOUR (NSEC_PER_MIN * 60ULL) +#define NSEC_PER_DAY (NSEC_PER_HOUR * 24ULL) +#define NSEC_PER_WEEK (NSEC_PER_DAY * 7ULL) +#define NSEC_PER_MONTH (NSEC_PER_DAY * 30ULL) +#define NSEC_PER_QUARTER (NSEC_PER_MONTH * 3ULL) + +// more accurate, but not an integer multiple of days, weeks, months +#define NSEC_PER_YEAR (NSEC_PER_DAY * 365ULL) + +// Define a structure to map time units to their multipliers +static const struct duration_unit { + const char *unit; + const bool formatter; // true when this unit should be used when formatting to string + const snsec_t multiplier; +} units[] = { + + // IMPORTANT: the order of this array is crucial! + // The array should be sorted from the smaller unit to the biggest unit. + + { .unit = "ns", .formatter = true, .multiplier = 1 }, // UCUM + { .unit = "us", .formatter = true, .multiplier = NSEC_PER_USEC }, // UCUM + { .unit = "ms", .formatter = true, .multiplier = NSEC_PER_MS }, // UCUM + { .unit = "s", .formatter = true, .multiplier = NSEC_PER_SEC }, // UCUM + { .unit = "m", .formatter = true, .multiplier = NSEC_PER_MIN }, // - + { .unit = "min", .formatter = false, .multiplier = NSEC_PER_MIN }, // UCUM + { .unit = "h", .formatter = true, .multiplier = NSEC_PER_HOUR }, // UCUM + { .unit = "d", .formatter = true, .multiplier = NSEC_PER_DAY }, // UCUM + { .unit = "w", .formatter = false, .multiplier = NSEC_PER_WEEK }, // - + { .unit = "wk", .formatter = false, .multiplier = NSEC_PER_WEEK }, // UCUM + { .unit = "mo", .formatter = true, .multiplier = NSEC_PER_MONTH }, // UCUM + { .unit = "M", .formatter = false, .multiplier = NSEC_PER_MONTH }, // compatibility + { .unit = "q", .formatter = false, .multiplier = NSEC_PER_QUARTER }, // - + { .unit = "y", .formatter = true, .multiplier = NSEC_PER_YEAR }, // - + { .unit = "Y", .formatter = false, .multiplier = NSEC_PER_YEAR }, // compatibility + { .unit = "a", .formatter = false, .multiplier = NSEC_PER_YEAR }, // UCUM +}; + +static inline const struct duration_unit *duration_find_unit(const char *unit) { + if(!unit || !*unit) + unit = "ns"; + + for (size_t i = 0; i < sizeof(units) / sizeof(units[0]); i++) { + const struct duration_unit *du = &units[i]; + if ((uint8_t)unit[0] == (uint8_t)du->unit[0] && strcmp(unit, du->unit) == 0) + return du; + } + + return NULL; +} + +inline int64_t duration_round_to_resolution(int64_t value, int64_t resolution) { + if(value > 0) + return (value + ((resolution - 1) / 2)) / resolution; + + if(value < 0) + return (value - ((resolution - 1) / 2)) / resolution; + + return 0; +} + +// ------------------------------------------------------------------------------------------------------------------- +// parse a duration string + +bool duration_parse(const char *duration, int64_t *result, const char *default_unit) { + if (!duration || !*duration) { + *result = 0; + return false; + } + + const struct duration_unit *du_def = duration_find_unit(default_unit); + if(!du_def) { + *result = 0; + return false; + } + + int64_t sign = 1; + const char *s = duration; + while (isspace((uint8_t)*s)) s++; + if(*s == '-') { + s++; + sign = -1; + } + + int64_t v = 0; + + while (*s) { + // Skip leading spaces + while (isspace((uint8_t)*s)) s++; + + // compatibility + if(*s == 'n' && strcmp(s, "never") == 0) { + *result = 0; + return true; + } + + if(*s == 'o' && strcmp(s, "off") == 0) { + *result = 0; + return true; + } + + // Parse the number + const char *number_start = s; + NETDATA_DOUBLE value = str2ndd(s, (char **)&s); + + // If no valid number found, return default + if (s == number_start) { + *result = 0; + return false; + } + + // Skip spaces between number and unit + while (isspace((uint8_t)*s)) s++; + + const char *unit_start = s; + while (isalpha((uint8_t)*s)) s++; + + char unit[4]; + size_t unit_len = s - unit_start; + const struct duration_unit *du; + if (unit_len == 0) + du = du_def; + else { + if (unit_len >= sizeof(unit)) unit_len = sizeof(unit) - 1; + strncpyz(unit, unit_start, unit_len); + du = duration_find_unit(unit); + if(!du) { + *result = 0; + return false; + } + } + + v += (int64_t)round(value * (NETDATA_DOUBLE)du->multiplier); + } + + v *= sign; + + if(du_def->multiplier == 1) + *result = v; + else + *result = duration_round_to_resolution(v, du_def->multiplier); + + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- +// generate a string to represent a duration + +ssize_t duration_snprintf(char *dst, size_t dst_size, int64_t value, const char *unit) { + if (!dst || dst_size == 0) return -1; + if (dst_size == 1) { + dst[0] = '\0'; + return -2; + } + + if(value == 0) + return snprintfz(dst, dst_size, "off"); + + const char *sign = ""; + if(value < 0) { + sign = "-"; + value = -value; + } + + const struct duration_unit *du_min = duration_find_unit(unit); + size_t offset = 0; + + int64_t nsec = value * du_min->multiplier; + + // Iterate through units from largest to smallest + for (size_t i = sizeof(units) / sizeof(units[0]) - 1; i > 0 && nsec > 0; i--) { + const struct duration_unit *du = &units[i]; + if(!units[i].formatter && du != du_min) + continue; + + // IMPORTANT: + // The week (7 days) is not aligned to the quarter (~91 days) or the year (365.25 days). + // To make sure that the value returned can be parsed back without loss, + // we have to round the value per unit (inside this loop), not globally. + // Otherwise, we have to make sure that all larger units are integer multiples of the smaller ones. + + int64_t multiplier = units[i].multiplier; + int64_t rounded = (du == du_min) ? (duration_round_to_resolution(nsec, multiplier) * multiplier) : nsec; + + int64_t unit_count = rounded / multiplier; + if (unit_count > 0) { + int written = snprintfz(dst + offset, dst_size - offset, + "%s%" PRIi64 "%s", sign, unit_count, units[i].unit); + + if (written < 0) + return -3; + + sign = ""; + offset += written; + + if (offset >= dst_size) { + // buffer overflow + return (ssize_t)offset; + } + + if(unit_count * multiplier >= nsec) + break; + else + nsec -= unit_count * multiplier; + } + + if(du == du_min) + // we should not go to smaller units + break; + } + + if (offset == 0) + // nothing has been written + offset = snprintfz(dst, dst_size, "off"); + + return (ssize_t)offset; +} + +// -------------------------------------------------------------------------------------------------------------------- +// compatibility for parsing seconds in int. + +bool duration_parse_seconds(const char *str, int *result) { + int64_t v; + + if(duration_parse_time_t(str, &v)) { + *result = (int)v; + return true; + } + + return false; +} diff --git a/src/libnetdata/parsers/duration.h b/src/libnetdata/parsers/duration.h new file mode 100644 index 00000000000000..60e1167d886e11 --- /dev/null +++ b/src/libnetdata/parsers/duration.h @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef LIBNETDATA_PARSERS_DURATION_H +#define LIBNETDATA_PARSERS_DURATION_H + +#include "parsers.h" + +int64_t duration_round_to_resolution(int64_t value, int64_t resolution); + +// duration (string to number) +bool duration_parse(const char *duration, int64_t *result, const char *default_unit); +#define duration_parse_nsec_t(duration, ns_ptr) duration_parse(duration, ns_ptr, "ns") +#define duration_parse_usec_t(duration, us_ptr) duration_parse(duration, us_ptr, "us") +#define duration_parse_msec_t(duration, ms_ptr) duration_parse(duration, ms_ptr, "ms") +#define duration_parse_time_t(duration, secs_ptr) duration_parse(duration, secs_ptr, "s") +#define duration_parse_mins(duration, mins_ptr) duration_parse(duration, mins_ptr, "m") +#define duration_parse_hours(duration, hours_ptr) duration_parse(duration, hours_ptr, "h") +#define duration_parse_days(duration, days_ptr) duration_parse(duration, days_ptr, "d") + +// duration (number to string) +ssize_t duration_snprintf(char *dst, size_t dst_size, int64_t value, const char *unit); +#define duration_snprintf_nsec_t(dst, dst_size, ns) duration_snprintf(dst, dst_size, ns, "ns") +#define duration_snprintf_usec_t(dst, dst_size, us) duration_snprintf(dst, dst_size, us, "us") +#define duration_snprintf_msec_t(dst, dst_size, ms) duration_snprintf(dst, dst_size, ms, "ms") +#define duration_snprintf_time_t(dst, dst_size, secs) duration_snprintf(dst, dst_size, secs, "s") +#define duration_snprintf_mins(dst, dst_size, mins) duration_snprintf(dst, dst_size, mins, "m") +#define duration_snprintf_hours(dst, dst_size, hours) duration_snprintf(dst, dst_size, hours, "h") +#define duration_snprintf_days(dst, dst_size, days) duration_snprintf(dst, dst_size, days, "d") + +bool duration_parse_seconds(const char *str, int *result); + +#endif //LIBNETDATA_PARSERS_DURATION_H diff --git a/src/libnetdata/parsers/duration.html b/src/libnetdata/parsers/duration.html new file mode 100644 index 00000000000000..8f6f8a41650f0f --- /dev/null +++ b/src/libnetdata/parsers/duration.html @@ -0,0 +1,205 @@ + + + + + + + Duration Converter + + + +

Duration Converter

+ +
+ + + + + + + + + + + + +
UnitValueFormattedCheck
+ + + + diff --git a/src/libnetdata/parsers/durations.md b/src/libnetdata/parsers/durations.md new file mode 100644 index 00000000000000..e952faa1a2aa4f --- /dev/null +++ b/src/libnetdata/parsers/durations.md @@ -0,0 +1,94 @@ +## Durations in Netdata + +Netdata provides a flexible and powerful way to specify durations for various configurations and operations, such as alerts, database retention, and other configuration options. Durations can be expressed in a variety of units, ranging from nanoseconds to years, allowing users to define time intervals in a human-readable format. + +### Supported Duration Units + +Netdata supports a wide range of duration units. The system follows the Unified Code for Units of Measure (UCUM) standard where applicable. Below is a table of all the supported units, their corresponding representations, and their compatibility: + +| Symbol | Description | Value | Compatibility | Formatter | +|:------:|:------------:|:--------:|:-------------:|:---------:| +| `ns` | Nanoseconds | `1ns` | UCUM | **Yes** | +| `us` | Microseconds | `1000ns` | UCUM | **Yes** | +| `ms` | Milliseconds | `1000us` | UCUM | **Yes** | +| `s` | Seconds | `1000ms` | UCUM | **Yes** | +| `m` | Minutes | `60s` | Natural | **Yes** | +| `min` | Minutes | `60s` | UCUM | No | +| `h` | Hours | `60m` | UCUM | **Yes** | +| `d` | Days | `24h` | UCUM | **Yes** | +| `w` | Weeks | `7d` | Natural | No | +| `wk` | Weeks | `7d` | UCUM | No | +| `mo` | Months | `30d` | UCUM | **Yes** | +| `M` | Months | `30d` | Backwards | No | +| `q` | Quarters | `3mo` | Natural | No | +| `y` | Years | `365d` | Natural | **Yes** | +| `Y` | Years | `365d` | Backwards | No | +| `a` | Years | `365d` | UCUM | No | + +- **UCUM**: The unit is specified in the Unified Code for Units of Measure (UCUM) standard. +- **Natural**: We feel that this is more natural for expressing durations with single letter units. +- **Backwards**: This unit has been used in the past in Netdata, and we support it for backwards compatibility. + +### Duration Expression Format + +Netdata allows users to express durations in both simple and complex formats. + +- **Simple Formats**: A duration can be specified using a number followed by a unit, such as `5m` (5 minutes), `2h` (2 hours), or `1d` (1 day). Fractional numbers are also supported, such as `1.5d`, `3.5mo` or `1.2y`. + +- **Complex Formats**: A duration can also be composed of multiple units added together. For example: + - `1y2mo3w4d` represents 1 year, 2 months, 3 weeks, and 4 days. + - `15d-12h` represents 15 days minus 12 hours (which equals 14 days and 12 hours). + +Each number given in durations can be either positive or negative. For example `1h15m` is 1 hour and 15 minutes, but `1h-15m` results to `45m`. + +The same unit can be given multiple times, so that `1d0.5d` is `1d12h` and `1d-0.5d` is `12h`. + +The order of units in the expressions is irrelevant, so that `1m2h3d` is the same to `3d2h1m`. + +The system will parse durations with spaces in them, but we suggest to write them down in compact form, without spaces. This is required, especially in alerts configuration, since spaces in durations will affect how parent expressions are tokenized. + +### Duration Rounding + +Netdata provides various functions to parse and round durations according to specific needs: + +- **Default Rounding to Seconds**: Most duration uses in Netdata are rounded to the nearest second. For example, a duration of `1.4s` would round to `1s`, while `1.5s` would round to `2s`. + +- **Rounding to Larger Units**: In some cases, such as database retention, durations are rounded to larger units like days. Even when rounding to a larger unit, durations can still be expressed in smaller units (e.g., `24h86400s` for `2d`). + +### Maximum and Minimum Duration Limits + +Netdata's duration expressions can handle durations ranging from the minimum possible value of `-INT64_MAX` to the maximum of `INT64_MAX` in nanoseconds. This range translates approximately to durations between -292 years to +292 years. + +### Inconsistencies in Duration Units + +While Netdata provides a flexible system for specifying durations, some inconsistencies arise due to the way different units are defined: + +- **1 Year (`y`) = 365 Days (`d`)**: In Netdata, a year is defined as 365 days. This is an approximation, since the average year is about 365.25 days. + +- **1 Month (`mo`) = 30 Days (`d`)**: Similarly, a month in Netdata is defined as 30 days, which is also an approximation. In reality, months vary in length (28 to 31 days). + +- **1 Quarter (`q`) = 3 Months (`mo`) = 90 Days (`d`)**: A quarter is defined as 3 months, or 90 days, which aligns with the approximation of each month being 30 days. + +These definitions can lead to some unexpected results when performing arithmetic with durations: + +**Example of Inconsistency**: + +`1y-1d` in Netdata calculates to `364d` but also as `12mo4d` because `1y = 365d` and `1mo = 30d`. This is inconsistent because `1y` is defined as `12mo5d` or `4q5d` (given the approximations above). + +### Negative Durations + +When the first letter of a duration expression is the minus character, Netdata parses the entire expression as positive and then it negates the result. for example: `-1m15s` is `-75s`, not `-45s`. To get `-45s` the expression should be `-1m-15s`. So the initial `-` is treated like `-(expression)`. + +The same rule is applied when generating duration expressions. + +### Example Duration Expressions + +Here are some examples of valid duration expressions: + +1. **`30s`**: 30 seconds. +2. **`5m`**: 5 minutes. +3. **`2h30m`**: 2 hours and 30 minutes. +4. **`1.5d`**: 1 day and 12 hours. +5. **`1w3d4h`**: 1 week, 3 days, and 4 hours. +6. **`1y2mo3d`**: 1 year, 2 months, and 3 days. +7. **`15d-12h`**: 14 days and 12 hours. diff --git a/src/libnetdata/parsers/parsers.h b/src/libnetdata/parsers/parsers.h new file mode 100644 index 00000000000000..fc7477e619e4b4 --- /dev/null +++ b/src/libnetdata/parsers/parsers.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_PARSERS_H +#define NETDATA_PARSERS_H + +#include "../libnetdata.h" +#include "size.h" +#include "duration.h" +#include "timeframe.h" + +#endif //NETDATA_PARSERS_H diff --git a/src/libnetdata/parsers/size.c b/src/libnetdata/parsers/size.c new file mode 100644 index 00000000000000..b6bce196e02abc --- /dev/null +++ b/src/libnetdata/parsers/size.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "size.h" + +// Define multipliers for base 2 (binary) units +#define SIZE_MULTIPLIER_BASE2 1024ULL +#define SIZE_MULTIPLIER_KiB (SIZE_MULTIPLIER_BASE2) +#define SIZE_MULTIPLIER_MiB (SIZE_MULTIPLIER_KiB * SIZE_MULTIPLIER_BASE2) +#define SIZE_MULTIPLIER_GiB (SIZE_MULTIPLIER_MiB * SIZE_MULTIPLIER_BASE2) +#define SIZE_MULTIPLIER_TiB (SIZE_MULTIPLIER_GiB * SIZE_MULTIPLIER_BASE2) +#define SIZE_MULTIPLIER_PiB (SIZE_MULTIPLIER_TiB * SIZE_MULTIPLIER_BASE2) +//#define SIZE_MULTIPLIER_EiB (SIZE_MULTIPLIER_PiB * SIZE_MULTIPLIER_BASE2) +//#define SIZE_MULTIPLIER_ZiB (SIZE_MULTIPLIER_EiB * SIZE_MULTIPLIER_BASE2) +//#define SIZE_MULTIPLIER_YiB (SIZE_MULTIPLIER_ZiB * SIZE_MULTIPLIER_BASE2) + +// Define multipliers for base 10 (decimal) units +#define SIZE_MULTIPLIER_BASE10 1000ULL +#define SIZE_MULTIPLIER_K (SIZE_MULTIPLIER_BASE10) +#define SIZE_MULTIPLIER_M (SIZE_MULTIPLIER_K * SIZE_MULTIPLIER_BASE10) +#define SIZE_MULTIPLIER_G (SIZE_MULTIPLIER_M * SIZE_MULTIPLIER_BASE10) +#define SIZE_MULTIPLIER_T (SIZE_MULTIPLIER_G * SIZE_MULTIPLIER_BASE10) +#define SIZE_MULTIPLIER_P (SIZE_MULTIPLIER_T * SIZE_MULTIPLIER_BASE10) +//#define SIZE_MULTIPLIER_E (SIZE_MULTIPLIER_P * SIZE_MULTIPLIER_BASE10) +//#define SIZE_MULTIPLIER_Z (SIZE_MULTIPLIER_E * SIZE_MULTIPLIER_BASE10) +//#define SIZE_MULTIPLIER_Y (SIZE_MULTIPLIER_Z * SIZE_MULTIPLIER_BASE10) + +// Define a structure to map size units to their multipliers +static const struct size_unit { + const char *unit; + const uint8_t base; + const bool formatter; // true when this unit should be used when formatting to string + const uint64_t multiplier; +} size_units[] = { + // the order of this table is important: smaller to bigger units! + + { .unit = "B", .base = 2, .formatter = true, .multiplier = 1ULL }, + { .unit = "k", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_K }, + { .unit = "K", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_K }, + { .unit = "KB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_K }, + { .unit = "KiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_KiB }, + { .unit = "M", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_M }, + { .unit = "MB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_M }, + { .unit = "MiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_MiB }, + { .unit = "G", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_G }, + { .unit = "GB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_G }, + { .unit = "GiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_GiB }, + { .unit = "T", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_T }, + { .unit = "TB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_T }, + { .unit = "TiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_TiB }, + { .unit = "P", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_P }, + { .unit = "PB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_P }, + { .unit = "PiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_PiB }, +// { .unit = "E", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_E }, +// { .unit = "EB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_E }, +// { .unit = "EiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_EiB }, +// { .unit = "Z", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_Z }, +// { .unit = "ZB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_Z }, +// { .unit = "ZiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_ZiB }, +// { .unit = "Y", .base = 10, .formatter = true, .multiplier = SIZE_MULTIPLIER_Y }, +// { .unit = "YB", .base = 10, .formatter = false, .multiplier = SIZE_MULTIPLIER_Y }, +// { .unit = "YiB", .base = 2, .formatter = true, .multiplier = SIZE_MULTIPLIER_YiB }, +}; + +static inline const struct size_unit *size_find_unit(const char *unit) { + if (!unit || !*unit) unit = "B"; + + for (size_t i = 0; i < sizeof(size_units) / sizeof(size_units[0]); i++) { + const struct size_unit *su = &size_units[i]; + if ((uint8_t)unit[0] == (uint8_t)su->unit[0] && strcmp(unit, su->unit) == 0) + return su; + } + + return NULL; +} + +static inline double size_round_to_resolution_dbl2(uint64_t value, uint64_t resolution) { + double converted = (double)value / (double)resolution; + return round(converted * 100.0) / 100.0; +} + +static inline uint64_t size_round_to_resolution_int(uint64_t value, uint64_t resolution) { + return (value + (resolution / 2)) / resolution; +} + +// ------------------------------------------------------------------------------------------------------------------- +// parse a size string + +bool size_parse(const char *size_str, uint64_t *result, const char *default_unit) { + if (!size_str || !*size_str) { + *result = 0; + return false; + } + + const struct size_unit *su_def = size_find_unit(default_unit); + if(!su_def) { + *result = 0; + return false; + } + + const char *s = size_str; + + // Skip leading spaces + while (isspace((uint8_t)*s)) s++; + + if(strcmp(s, "off") == 0) { + *result = 0; + return true; + } + + // Parse the number + const char *number_start = s; + NETDATA_DOUBLE value = strtondd(s, (char **)&s); + + // If no valid number found, return false + if (s == number_start || value < 0) { + *result = 0; + return false; + } + + // Skip spaces between number and unit + while (isspace((uint8_t)*s)) s++; + + const char *unit_start = s; + while (isalpha((uint8_t)*s)) s++; + + char unit[4]; + size_t unit_len = s - unit_start; + const struct size_unit *su; + if (unit_len == 0) + su = su_def; + else { + if (unit_len >= sizeof(unit)) unit_len = sizeof(unit) - 1; + strncpy(unit, unit_start, unit_len); + unit[unit_len] = '\0'; + su = size_find_unit(unit); + if (!su) { + *result = 0; + return false; + } + } + + uint64_t bytes = (uint64_t)round(value * (NETDATA_DOUBLE)su->multiplier); + *result = size_round_to_resolution_int(bytes, su_def->multiplier); + + return true; +} + +// -------------------------------------------------------------------------------------------------------------------- +// generate a string to represent a size + +ssize_t size_snprintf(char *dst, size_t dst_size, uint64_t value, const char *unit) { + if (!dst || dst_size == 0) return -1; + if (dst_size == 1) { + dst[0] = '\0'; + return -2; + } + + if (value == 0) + return snprintfz(dst, dst_size, "off"); + + const struct size_unit *su_def = size_find_unit(unit); + if(!su_def) return -3; + + // use the units multiplier to find the units + uint64_t bytes = value * su_def->multiplier; + + // Find the best unit to represent the size with up to 2 fractional digits + const struct size_unit *su_best = su_def; + for (size_t i = 0; i < sizeof(size_units) / sizeof(size_units[0]); i++) { + const struct size_unit *su = &size_units[i]; + if (su->base != su_def->base || // not the right base + su->multiplier < su_def->multiplier || // the multiplier is too small + (!su->formatter && su != su_def) || // it is not to be used in formatting (except our unit) + (bytes < su->multiplier && su != su_def) ) // the converted value will be <1.0 + continue; + + double converted = size_round_to_resolution_dbl2(bytes, su->multiplier); + + uint64_t reversed_bytes = (uint64_t)(converted * (double)su->multiplier); + + if (reversed_bytes == bytes) + // no precision loss, this is good to use + su_best = su; + } + + double converted = size_round_to_resolution_dbl2(bytes, su_best->multiplier); + + // print it either with 0, 1 or 2 fractional digits + int written; + if(converted == (double)((uint64_t)converted)) + written = snprintfz(dst, dst_size, "%.0f%s", converted, su_best->unit); + else if(converted * 10.0 == (double)((uint64_t)(converted * 10.0))) + written = snprintfz(dst, dst_size, "%.1f%s", converted, su_best->unit); + else + written = snprintfz(dst, dst_size, "%.2f%s", converted, su_best->unit); + + if (written < 0) + return -4; + + if ((size_t)written >= dst_size) + return (ssize_t)(dst_size - 1); + + return written; +} + diff --git a/src/libnetdata/parsers/size.h b/src/libnetdata/parsers/size.h new file mode 100644 index 00000000000000..8d20f283d8f4d4 --- /dev/null +++ b/src/libnetdata/parsers/size.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef LIBNETDATA_PARSERS_SIZE_H +#define LIBNETDATA_PARSERS_SIZE_H + +#include "parsers.h" + +bool size_parse(const char *size_str, uint64_t *result, const char *default_unit); +#define size_parse_bytes(size_str, bytes) size_parse(size_str, bytes, "B") +#define size_parse_kb(size_str, kb) size_parse(size_str, kb, "KiB") +#define size_parse_mb(size_str, mb) size_parse(size_str, mb, "MiB") +#define size_parse_gb(size_str, gb) size_parse(size_str, gb, "GiB") + +ssize_t size_snprintf(char *dst, size_t dst_size, uint64_t value, const char *unit); +#define size_snprintf_bytes(dst, dst_size, value) size_snprintf(dst, dst_size, value, "B") +#define size_snprintf_kb(dst, dst_size, value) size_snprintf(dst, dst_size, value, "KiB") +#define size_snprintf_mb(dst, dst_size, value) size_snprintf(dst, dst_size, value, "MiB") +#define size_snprintf_gb(dst, dst_size, value) size_snprintf(dst, dst_size, value, "GiB") + +#endif //LIBNETDATA_PARSERS_SIZE_H diff --git a/src/libnetdata/parsers/sizes.md b/src/libnetdata/parsers/sizes.md new file mode 100644 index 00000000000000..ac9e0905324b62 --- /dev/null +++ b/src/libnetdata/parsers/sizes.md @@ -0,0 +1,52 @@ +## Data Sizes in Netdata + +Netdata provides a flexible system for specifying and formatting data sizes, used in various configurations and operations such as disk space management, and memory usage. This system allows users to specify data sizes in a human-readable format using multiple units from bytes to terabytes, supporting both binary (base-2) and decimal (base-10) standards. All units are UCUM-based for consistency and clarity. + +### Supported Size Units + +The following table lists all supported units and their corresponding values: + +| Symbol | Description | Value | Base | Formatter | +|:------:|:-----------:|:---------:|:-------:|:---------:| +| `B` | Bytes | `1B` | - | **Yes** | +| `k` | Kilobytes | `1000B` | Base-10 | No | +| `K` | Kilobytes | `1000B` | Base-10 | No | +| `KB` | Kilobytes | `1000B` | Base-10 | No | +| `KiB` | Kibibytes | `1024B` | Base-2 | **Yes** | +| `M` | Megabytes | `1000K` | Base-10 | No | +| `MB` | Megabytes | `1000K` | Base-10 | No | +| `MiB` | Mebibytes | `1024KiB` | Base-2 | **Yes** | +| `G` | Gigabytes | `1000M` | Base-10 | No | +| `GB` | Gigabytes | `1000M` | Base-10 | No | +| `GiB` | Gibibytes | `1024MiB` | Base-2 | **Yes** | +| `T` | Terabytes | `1000G` | Base-10 | No | +| `TB` | Terabytes | `1000G` | Base-10 | No | +| `TiB` | Tebibytes | `1024GiB` | Base-2 | **Yes** | +| `P` | Petabytes | `1000T` | Base-10 | No | +| `PB` | Petabytes | `1000T` | Base-10 | No | +| `PiB` | Pebibytes | `1024TiB` | Base-2 | **Yes** | + +### Size Expression Format + +Netdata allows users to express sizes using a number followed by a unit, such as `500MiB` (500 Mebibytes), `1GB` (1 Gigabyte), or `256K` (256 Kilobytes). + +- **Case Sensitivity**: Note that the parsing of units is case-sensitive. + +### Size Formatting + +Netdata formats a numeric size value (in bytes) into a human-readable string with an appropriate unit. The formatter's goal is to select the largest unit that can represent the size exactly, using up to two fractional digits. If two fractional digits are not enough to precisely represent the byte count, the formatter will use a smaller unit until it can accurately express the size, eventually falling back to bytes (`B`) if necessary. + +When formatting, Netdata prefers Base-2 units (`KiB`, `MiB`, `GiB`, etc.). + +- **Examples of Size Formatting**: + - **10,485,760 bytes** is formatted as `10MiB` (10 Mebibytes). + - **1,024 bytes** is formatted as `1KiB` (1 Kibibyte). + - **1,500 bytes** remains formatted as `1500B` because it cannot be precisely represented in `KiB` or any larger unit using up to two fractional digits. + +### Example Size Expressions + +Here are some examples of valid size expressions: + +1. `1024B`: 1024 bytes. +2. `1KiB`: 1024 bytes. +3. `5MiB`: 5 mebibytes (5 * 1024 * 1024 bytes). diff --git a/src/libnetdata/parsers/timeframe.c b/src/libnetdata/parsers/timeframe.c new file mode 100644 index 00000000000000..33ea6975030492 --- /dev/null +++ b/src/libnetdata/parsers/timeframe.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "timeframe.h" + +// -------------------------------------------------------------------------------------------------------------------- +// timeframe +/* +TIMEFRAME timeframe_parse(const char *txt) { + if(!txt || !*txt) + return TIMEFRAME_INVALID; + +char buf[strlen(txt) + 1]; +memcpy(buf, txt, strlen(txt) + 1); +char *s = trim_all(buf); +if(!s) + return TIMEFRAME_INVALID; + +while(isspace(*s)) s++; + +if(strcasecmp(s, "this minute") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_THIS_MINUTE, + .before = 0, + }; +} +if(strcasecmp(s, "this hour") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_THIS_HOUR, + .before = 0, + }; +} +if(strcasecmp(s, "today") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_TODAY, + .before = 0, + }; +} +if(strcasecmp(s, "this week") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_THIS_WEEK, + .before = 0, + }; +} +if(strcasecmp(s, "this month") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_THIS_MONTH, + .before = 0, + }; +} +if(strcasecmp(s, "this year") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_THIS_YEAR, + .before = 0, + }; +} + +if(strcasecmp(s, "last minute") == 0) { + return (TIMEFRAME) { + .after = -60, + .before = API_RELATIVE_TIME_THIS_MINUTE, + }; +} +if(strcasecmp(s, "last hour") == 0) { + return (TIMEFRAME) { + .after = -3600, + .before = API_RELATIVE_TIME_THIS_HOUR, + }; +} +if(strcasecmp(s, "yesterday") == 0) { + return (TIMEFRAME) { + .after = -86400, + .before = API_RELATIVE_TIME_TODAY, + }; +} +if(strcasecmp(s, "this week") == 0) { + return (TIMEFRAME) { + .after = -86400 * 7, + .before = API_RELATIVE_TIME_THIS_WEEK, + }; +} +if(strcasecmp(s, "this month") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_LAST_MONTH, + .before = API_RELATIVE_TIME_THIS_MONTH, + }; +} +if(strcasecmp(s, "this year") == 0) { + return (TIMEFRAME) { + .after = API_RELATIVE_TIME_LAST_YEAR, + .before = API_RELATIVE_TIME_THIS_YEAR, + }; +} + +const char *end; +double after = strtondd(s, (char **)&end); + +if(end == s) + return TIMEFRAME_INVALID; + +s = end; +while(isspace(*s)) s++; + +time_t multiplier = 1; +if(!isdigit(*s) && *s != '-') { + // after has units + bool found = false; + + for (size_t i = 0; i < sizeof(units) / sizeof(units[0]); i++) { + size_t len = strlen(units[i].unit); + + if (units[i].multiplier >= 1 * NSEC_PER_USEC && + strncmp(s, units[i].unit, len) == 0 && + (isspace(s[len]) || s[len] == '-')) { + multiplier = units[i].multiplier / NSEC_PER_SEC; + found = true; + s += len; + } + } + + if(!found) + return TIMEFRAME_INVALID; +} + +const char *dash = strchr(s, '-'); +if(!dash) return TIMEFRAME_INVALID; + +} +*/ diff --git a/src/libnetdata/parsers/timeframe.h b/src/libnetdata/parsers/timeframe.h new file mode 100644 index 00000000000000..a176dd30a19d0e --- /dev/null +++ b/src/libnetdata/parsers/timeframe.h @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_TIMEFRAME_H +#define NETDATA_TIMEFRAME_H + +#include "parsers.h" + +typedef struct { + time_t after; + time_t before; +} TIMEFRAME; + +#define API_RELATIVE_TIME_MAX (3 * 365 * 86400) + +#define API_RELATIVE_TIME_INVALID (-1000000000) + +#define API_RELATIVE_TIME_THIS_MINUTE (API_RELATIVE_TIME_INVALID - 1) // this minute at 00 seconds +#define API_RELATIVE_TIME_THIS_HOUR (API_RELATIVE_TIME_INVALID - 2) // this hour at 00 minutes, 00 seconds +#define API_RELATIVE_TIME_TODAY (API_RELATIVE_TIME_INVALID - 3) // today at 00:00:00 +#define API_RELATIVE_TIME_THIS_WEEK (API_RELATIVE_TIME_INVALID - 4) // this Monday, 00:00:00 +#define API_RELATIVE_TIME_THIS_MONTH (API_RELATIVE_TIME_INVALID - 5) // this month's 1st at 00:00:00 +#define API_RELATIVE_TIME_THIS_YEAR (API_RELATIVE_TIME_INVALID - 6) // this year's Jan 1st, at 00:00:00 +#define API_RELATIVE_TIME_LAST_MONTH (API_RELATIVE_TIME_INVALID - 7) // last month's 1st, at 00:00:00 +#define API_RELATIVE_TIME_LAST_YEAR (API_RELATIVE_TIME_INVALID - 8) // last year's Jan 1st, at 00:00:00 + +#define TIMEFRAME_INVALID (TIMEFRAME){ .after = API_RELATIVE_TIME_INVALID, .before = API_RELATIVE_TIME_INVALID } + +#endif //NETDATA_TIMEFRAME_H diff --git a/src/libnetdata/required_dummies.h b/src/libnetdata/required_dummies.h index 5bc2ded79e6895..cff4c563a5a375 100644 --- a/src/libnetdata/required_dummies.h +++ b/src/libnetdata/required_dummies.h @@ -20,6 +20,6 @@ void service_exits(void){} void rrd_collector_finished(void){} // required by get_system_cpus() -char *netdata_configured_host_prefix = ""; +const char *netdata_configured_host_prefix = ""; #endif // NETDATA_LIB_DUMMIES_H diff --git a/src/libnetdata/socket/security.c b/src/libnetdata/socket/security.c index 4c603610aa2dd7..33bf22d75491ae 100644 --- a/src/libnetdata/socket/security.c +++ b/src/libnetdata/socket/security.c @@ -730,7 +730,7 @@ int security_test_certificate(SSL *ssl) { * * @return It returns 0 on success and -1 otherwise. */ -int ssl_security_location_for_context(SSL_CTX *ctx, char *file, char *path) { +int ssl_security_location_for_context(SSL_CTX *ctx, const char *file, const char *path) { int load_custom = 1, load_default = 1; if (file || path) { if(!SSL_CTX_load_verify_locations(ctx, file, path)) { diff --git a/src/libnetdata/socket/security.h b/src/libnetdata/socket/security.h index 6a981a955108fe..c5c4d79c56beef 100644 --- a/src/libnetdata/socket/security.h +++ b/src/libnetdata/socket/security.h @@ -50,7 +50,7 @@ extern const char *tls_version; extern const char *tls_ciphers; extern bool netdata_ssl_validate_certificate; extern bool netdata_ssl_validate_certificate_sender; -int ssl_security_location_for_context(SSL_CTX *ctx,char *file,char *path); +int ssl_security_location_for_context(SSL_CTX *ctx, const char *file, const char *path); void netdata_ssl_initialize_openssl(); void netdata_ssl_cleanup(); diff --git a/src/libnetdata/socket/socket.c b/src/libnetdata/socket/socket.c index f0925038e4095b..b936b5de7faa4f 100644 --- a/src/libnetdata/socket/socket.c +++ b/src/libnetdata/socket/socket.c @@ -748,9 +748,9 @@ int listen_sockets_setup(LISTEN_SOCKETS *sockets) { } else sockets->default_port = (uint16_t)new_port; - char *s = appconfig_get(sockets->config, sockets->config_section, "bind to", sockets->default_bind_to); + const char *s = appconfig_get(sockets->config, sockets->config_section, "bind to", sockets->default_bind_to); while(*s) { - char *e = s; + const char *e = s; // skip separators, moving both s(tart) and e(nd) while(isspace((uint8_t)*e) || *e == ',') s = ++e; diff --git a/src/libnetdata/string/string.h b/src/libnetdata/string/string.h index 3408f52591c15d..1d5e5164a6b4c0 100644 --- a/src/libnetdata/string/string.h +++ b/src/libnetdata/string/string.h @@ -30,6 +30,10 @@ static inline int string_strcmp(STRING *string, const char *s) { return strcmp(string2str(string), s); } +static inline int string_strncmp(STRING *string, const char *s, size_t n) { + return strncmp(string2str(string), s, n); +} + void string_statistics(size_t *inserts, size_t *deletes, size_t *searches, size_t *entries, size_t *references, size_t *memory, size_t *duplications, size_t *releases); int string_unittest(size_t entries); diff --git a/src/ml/Config.cc b/src/ml/Config.cc index c6a750995de215..84846cd01ca7ba 100644 --- a/src/ml/Config.cc +++ b/src/ml/Config.cc @@ -27,29 +27,32 @@ void ml_config_load(ml_config_t *cfg) { unsigned max_train_samples = config_get_number(config_section_ml, "maximum num samples to train", 6 * 3600); unsigned min_train_samples = config_get_number(config_section_ml, "minimum num samples to train", 1 * 900); - unsigned train_every = config_get_number(config_section_ml, "train every", 3 * 3600); + unsigned train_every = config_get_duration_seconds(config_section_ml, "train every", 3 * 3600); unsigned num_models_to_use = config_get_number(config_section_ml, "number of models per dimension", 18); - unsigned delete_models_older_than = config_get_number(config_section_ml, "delete models older than", 60 * 60 * 24 * 7); + unsigned delete_models_older_than = config_get_duration_seconds(config_section_ml, "delete models older than", 60 * 60 * 24 * 7); unsigned diff_n = config_get_number(config_section_ml, "num samples to diff", 1); unsigned smooth_n = config_get_number(config_section_ml, "num samples to smooth", 3); unsigned lag_n = config_get_number(config_section_ml, "num samples to lag", 5); - double random_sampling_ratio = config_get_float(config_section_ml, "random sampling ratio", 1.0 / 5.0 /* default lag_n */); + double random_sampling_ratio = config_get_double(config_section_ml, "random sampling ratio", 1.0 / 5.0 /* default lag_n */); unsigned max_kmeans_iters = config_get_number(config_section_ml, "maximum number of k-means iterations", 1000); - double dimension_anomaly_rate_threshold = config_get_float(config_section_ml, "dimension anomaly score threshold", 0.99); + double dimension_anomaly_rate_threshold = config_get_double(config_section_ml, "dimension anomaly score threshold", 0.99); - double host_anomaly_rate_threshold = config_get_float(config_section_ml, "host anomaly rate threshold", 1.0); + double host_anomaly_rate_threshold = config_get_double(config_section_ml, "host anomaly rate threshold", 1.0); std::string anomaly_detection_grouping_method = config_get(config_section_ml, "anomaly detection grouping method", "average"); - time_t anomaly_detection_query_duration = config_get_number(config_section_ml, "anomaly detection grouping duration", 5 * 60); + time_t anomaly_detection_query_duration = config_get_duration_seconds(config_section_ml, "anomaly detection grouping duration", 5 * 60); size_t num_training_threads = config_get_number(config_section_ml, "num training threads", 4); size_t flush_models_batch_size = config_get_number(config_section_ml, "flush models batch size", 128); - size_t suppression_window = config_get_number(config_section_ml, "dimension anomaly rate suppression window", 900); - size_t suppression_threshold = config_get_number(config_section_ml, "dimension anomaly rate suppression threshold", suppression_window / 2); + size_t suppression_window = + config_get_duration_seconds(config_section_ml, "dimension anomaly rate suppression window", 900); + + size_t suppression_threshold = + config_get_number(config_section_ml, "dimension anomaly rate suppression threshold", suppression_window / 2); bool enable_statistics_charts = config_get_boolean(config_section_ml, "enable statistics charts", false); diff --git a/src/ml/ml-configuration.md b/src/ml/ml-configuration.md index 12cc20a47faed5..86a33c5d8f8106 100644 --- a/src/ml/ml-configuration.md +++ b/src/ml/ml-configuration.md @@ -15,7 +15,7 @@ Below is a list of all the available configuration params and their default valu # enabled = yes # maximum num samples to train = 21600 # minimum num samples to train = 900 - # train every = 10800 + # train every = 3h # number of models per dimension = 18 # dbengine anomaly rate every = 30 # num samples to diff = 1 @@ -26,12 +26,12 @@ Below is a list of all the available configuration params and their default valu # dimension anomaly score threshold = 0.99 # host anomaly rate threshold = 1.0 # anomaly detection grouping method = average - # anomaly detection grouping duration = 300 + # anomaly detection grouping duration = 5m # hosts to skip from training = !* # charts to skip from training = netdata.* - # dimension anomaly rate suppression window = 900 + # dimension anomaly rate suppression window = 15m # dimension anomaly rate suppression threshold = 450 - # delete models older than = 604800 + # delete models older than = 7d ``` ## Configuration Examples @@ -88,8 +88,8 @@ flowchart BT - `enabled`: `yes` to enable, `no` to disable. - `maximum num samples to train`: (`3600`/`86400`) This is the maximum amount of time you would like to train each model on. For example, the default of `21600` trains on the preceding 6 hours of data, assuming an `update every` of 1 second. - `minimum num samples to train`: (`900`/`21600`) This is the minimum amount of data required to be able to train a model. For example, the default of `900` implies that once at least 15 minutes of data is available for training, a model is trained, otherwise it is skipped and checked again at the next training run. -- `train every`: (`1800`/`21600`) This is how often each model will be retrained. For example, the default of `10800` means that each model is retrained every 3 hours. Note: The training of all models is spread out across the `train every` period for efficiency, so in reality, it means that each model will be trained in a staggered manner within each `train every` period. -- `number of models per dimension`: (`1`/`168`) This is the number of trained models that will be used for scoring. For example the default `number of models per dimension = 18` means that the most recently trained 18 models for the dimension will be used to determine the corresponding anomaly bit. This means that under default settings of `maximum num samples to train = 21600`, `train every = 10800` and `number of models per dimension = 18`, netdata will store and use the last 18 trained models for each dimension when determining the anomaly bit. This means that for the latest feature vector in this configuration to be considered anomalous it would need to look anomalous across _all_ the models trained for that dimension in the last 18*(10800/3600) ~= 54 hours. As such, increasing `number of models per dimension` may reduce some false positives since it will result in more models (covering a wider time frame of training) being used during scoring. +- `train every`: (`3h`/`6h`) This is how often each model will be retrained. For example, the default of `3h` means that each model is retrained every 3 hours. Note: The training of all models is spread out across the `train every` period for efficiency, so in reality, it means that each model will be trained in a staggered manner within each `train every` period. +- `number of models per dimension`: (`1`/`168`) This is the number of trained models that will be used for scoring. For example the default `number of models per dimension = 18` means that the most recently trained 18 models for the dimension will be used to determine the corresponding anomaly bit. This means that under default settings of `maximum num samples to train = 21600`, `train every = 3h` and `number of models per dimension = 18`, netdata will store and use the last 18 trained models for each dimension when determining the anomaly bit. This means that for the latest feature vector in this configuration to be considered anomalous it would need to look anomalous across _all_ the models trained for that dimension in the last 18*(10800/3600) ~= 54 hours. As such, increasing `number of models per dimension` may reduce some false positives since it will result in more models (covering a wider time frame of training) being used during scoring. - `dbengine anomaly rate every`: (`30`/`900`) This is how often netdata will aggregate all the anomaly bits into a single chart (`anomaly_detection.anomaly_rates`). The aggregation into a single chart allows enabling anomaly rate ranking over _all_ metrics with one API call as opposed to a call per chart. - `num samples to diff`: (`0`/`1`) This is a `0` or `1` to determine if you want the model to operate on differences of the raw data or just the raw data. For example, the default of `1` means that we take differences of the raw values. Using differences is more general and works on dimensions that might naturally tend to have some trends or cycles in them that is normal behavior to which we don't want to be too sensitive. - `num samples to smooth`: (`0`/`5`) This is a small integer that controls the amount of smoothing applied as part of the feature processing used by the model. For example, the default of `3` means that the rolling average of the last 3 values is used. Smoothing like this helps the model be a little more robust to spiky types of dimensions that naturally "jump" up or down as part of their normal behavior. @@ -99,7 +99,7 @@ flowchart BT - `dimension anomaly score threshold`: (`0.01`/`5.00`) This is the threshold at which an individual dimension at a specific timestep is considered anomalous or not. For example, the default of `0.99` means that a dimension with an anomaly score of 99% or higher is flagged as anomalous. This is a normalized probability based on the training data, so the default of 99% means that anything that is as strange (based on distance measure) or more strange as the most strange 1% of data observed during training will be flagged as anomalous. If you wanted to make the anomaly detection on individual dimensions more sensitive you could try a value like `0.90` (90%) or to make it less sensitive you could try `1.5` (150%). - `host anomaly rate threshold`: (`0.1`/`10.0`) This is the percentage of dimensions (based on all those enabled for anomaly detection) that need to be considered anomalous at specific timestep for the host itself to be considered anomalous. For example, the default value of `1.0` means that if more than 1% of dimensions are anomalous at the same time then the host itself is considered in an anomalous state. - `anomaly detection grouping method`: The grouping method used when calculating node level anomaly rate. -- `anomaly detection grouping duration`: (`60`/`900`) The duration across which to calculate the node level anomaly rate, the default of `900` means that the node level anomaly rate is calculated across a rolling 5 minute window. +- `anomaly detection grouping duration`: (`1m`/`15m`) The duration across which to calculate the node level anomaly rate, the default of `900` means that the node level anomaly rate is calculated across a rolling 5 minute window. - `hosts to skip from training`: This parameter allows you to turn off anomaly detection for any child hosts on a parent host by defining those you would like to skip from training here. For example, a value like `dev-*` skips all hosts on a parent that begin with the "dev-" prefix. The default value of `!*` means "don't skip any". - `charts to skip from training`: This parameter allows you to exclude certain charts from anomaly detection. By default, only netdata related charts are excluded. This is to avoid the scenario where accessing the netdata dashboard could itself trigger some anomalies if you don't access them regularly. If you want to include charts that are excluded by default, add them in small groups and then measure any impact on performance before adding additional ones. Example: If you want to include system, apps, and user charts:`!system.* !apps.* !user.* *`. -- `delete models older than`: (`86400`/`604800`) Delete old models from the database that are unused, by default models will be deleted after 7 days. +- `delete models older than`: (`1d`/`7d`) Delete old models from the database that are unused, by default models will be deleted after 7 days. diff --git a/src/registry/registry.h b/src/registry/registry.h index 848eb0ac00bc64..b2eb7c00dd66ec 100644 --- a/src/registry/registry.h +++ b/src/registry/registry.h @@ -76,7 +76,7 @@ void registry_statistics(void); const char *registry_get_this_machine_guid(void); char *registry_get_mgmt_api_key(void); -char *registry_get_this_machine_hostname(void); +const char *registry_get_this_machine_hostname(void); int regenerate_guid(const char *guid, char *result); diff --git a/src/registry/registry_init.c b/src/registry/registry_init.c index 9093610eab537d..b98c04beaa7c87 100644 --- a/src/registry/registry_init.c +++ b/src/registry/registry_init.c @@ -93,7 +93,7 @@ int registry_init(void) { // configuration options registry.save_registry_every_entries = (unsigned long long)config_get_number(CONFIG_SECTION_REGISTRY, "registry save db every new entries", 1000000); - registry.persons_expiration = config_get_number(CONFIG_SECTION_REGISTRY, "registry expire idle persons days", 365) * 86400; + registry.persons_expiration = config_get_duration_days(CONFIG_SECTION_REGISTRY, "registry expire idle persons", 365) * 86400; registry.registry_domain = config_get(CONFIG_SECTION_REGISTRY, "registry domain", ""); registry.registry_to_announce = config_get(CONFIG_SECTION_REGISTRY, "registry to announce", "https://registry.my-netdata.io"); registry.hostname = config_get(CONFIG_SECTION_REGISTRY, "registry hostname", netdata_configured_hostname); diff --git a/src/registry/registry_internals.c b/src/registry/registry_internals.c index 5e83bdb79f2a07..51a861866b61fd 100644 --- a/src/registry/registry_internals.c +++ b/src/registry/registry_internals.c @@ -266,7 +266,7 @@ static inline int is_machine_guid_blacklisted(const char *guid) { return 0; } -char *registry_get_this_machine_hostname(void) { +const char *registry_get_this_machine_hostname(void) { return registry.hostname; } diff --git a/src/registry/registry_internals.h b/src/registry/registry_internals.h index c2270eb83c3929..39d37e4f03eef8 100644 --- a/src/registry/registry_internals.h +++ b/src/registry/registry_internals.h @@ -30,9 +30,9 @@ struct registry { // configuration unsigned long long save_registry_every_entries; - char *registry_domain; - char *hostname; - char *registry_to_announce; + const char *registry_domain; + const char *hostname; + const char *registry_to_announce; const char *cloud_base_url; time_t persons_expiration; // seconds to expire idle persons int verify_cookies_redirects; @@ -42,10 +42,10 @@ struct registry { size_t max_name_length; // file/path names - char *pathname; - char *db_filename; - char *log_filename; - char *machine_guid_filename; + const char *pathname; + const char *db_filename; + const char *log_filename; + const char *machine_guid_filename; // open files FILE *log_fp; diff --git a/src/streaming/README.md b/src/streaming/README.md index fe4e01baed92d5..e34b2c94c496ca 100644 --- a/src/streaming/README.md +++ b/src/streaming/README.md @@ -30,6 +30,8 @@ node**. This file is automatically generated by Netdata the first time it is sta #### `[stream]` section +This section is used by the sending Netdata. + | Setting | Default | Description | |-------------------------------------------------|---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `enabled` | `no` | Whether this node streams metrics to any parent. Change to `yes` to enable streaming. | @@ -38,34 +40,62 @@ node**. This file is automatically generated by Netdata the first time it is sta | `CApath` | `/etc/ssl/certs/` | The directory where known certificates are found. Defaults to OpenSSL's default path. | | `CAfile` | `/etc/ssl/certs/cert.pem` | Add a parent node certificate to the list of known certificates in `CAPath`. | | `api key` | | The `API_KEY` to use as the child node. | -| `timeout seconds` | `60` | The timeout to connect and send metrics to a parent. | +| `timeout` | `1m` | The timeout to connect and send metrics to a parent. | | `default port` | `19999` | The port to use if `destination` does not specify one. | | [`send charts matching`](#send-charts-matching) | `*` | A space-separated list of [Netdata simple patterns](/src/libnetdata/simple_pattern/README.md) to filter which charts are streamed. [Read more →](#send-charts-matching) | | `buffer size bytes` | `10485760` | The size of the buffer to use when sending metrics. The default `10485760` equals a buffer of 10MB, which is good for 60 seconds of data. Increase this if you expect latencies higher than that. The buffer is flushed on reconnect. | -| `reconnect delay seconds` | `5` | How long to wait until retrying to connect to the parent node. | +| `reconnect delay` | `5s` | How long to wait until retrying to connect to the parent node. | | `initial clock resync iterations` | `60` | Sync the clock of charts for how many seconds when starting. | | `parent using h2o` | `no` | Set to yes if you are connecting to parent trough it's h2o webserver/port. Currently there is no reason to set this to `yes` unless you are testing the new h2o based netdata webserver. When production ready this will be set to `yes` as default. | -### `[API_KEY]` and `[MACHINE_GUID]` sections - -| Setting | Default | Description | -|-----------------------------------------------|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `enabled` | `no` | Whether this API KEY enabled or disabled. | -| [`allow from`](#allow-from) | `*` | A space-separated list of [Netdata simple patterns](/src/libnetdata/simple_pattern/README.md) matching the IPs of nodes that will stream metrics using this API key. [Read more →](#allow-from) | -| `default history` | `3600` | The default amount of child metrics history to retain when using the `ram` memory mode. | -| [`default memory mode`](#default-memory-mode) | `ram` | The [database](/src/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `ram`, or `none`. [Read more →](#default-memory-mode) | -| `health enabled by default` | `auto` | Whether alerts and notifications should be enabled for nodes using this `API_KEY`. `auto` enables alerts when the child is connected. `yes` enables alerts always, and `no` disables alerts. | -| `default postpone alarms on connect seconds` | `60` | Postpone alerts and notifications for a period of time after the child connects. | -| `default health log history` | `432000` | History of health log events (in seconds) kept in the database. | -| `default proxy enabled` | | Route metrics through a proxy. | -| `default proxy destination` | | Space-separated list of `IP:PORT` for proxies. | -| `default proxy api key` | | The `API_KEY` of the proxy. | -| `default send charts matching` | `*` | See [`send charts matching`](#send-charts-matching). | -| `enable compression` | `yes` | Enable/disable stream compression. | -| `enable replication` | `yes` | Enable/disable replication. | -| `seconds to replicate` | `86400` | How many seconds of data to replicate from each child at a time | -| `seconds per replication step` | `600` | The duration we want to replicate per each replication step. | -| `is ephemeral node` | `no` | Indicate whether this child is an ephemeral node. An ephemeral node will become unavailable after the specified duration of "cleanup ephemeral hosts after secs" from the time of the node's last connection. | +### `[API_KEY]` sections + +This section defines an API key for other agents to connect to this Netdata. + +| Setting | Default | Description | +|------------------------------|------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `enabled` | `no` | Whether this API KEY enabled or disabled. | +| `type` | `api` | This section defines an API key. | +| [`allow from`](#allow-from) | `*` | A space-separated list of [Netdata simple patterns](/src/libnetdata/simple_pattern/README.md) matching the IPs of nodes that will stream metrics using this API key. [Read more →](#allow-from) | +| `retention` | `1h` | The default amount of child metrics history to retain when using the `ram` db. | +| [`db`](#default-memory-mode) | `dbengine` | The [database](/src/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `ram`, or `none`. [Read more →](#default-memory-mode) | +| `health enabled by default` | `auto` | Whether alerts and notifications should be enabled for nodes using this `API_KEY`. `auto` enables alerts when the child is connected. `yes` enables alerts always, and `no` disables alerts. | +| `postpone alerts on connect` | `1m` | Postpone alerts and notifications for a period of time after the child connects. | +| `health log retention` | `5d` | History of health log events (in seconds) kept in the database. | +| `proxy enabled` | | Route metrics through a proxy. | +| `proxy destination` | | Space-separated list of `IP:PORT` for proxies. | +| `proxy api key` | | The `API_KEY` of the proxy. | +| `send charts matching` | `*` | See [`send charts matching`](#send-charts-matching). | +| `enable compression` | `yes` | Enable/disable stream compression. | +| `enable replication` | `yes` | Enable/disable replication. | +| `replication period` | `1d` | Limits the maximum window that will be replicated from each child. | +| `replication step` | `10m` | The duration we want to replicate per each replication step. | +| `is ephemeral node` | `no` | Indicate whether this child is an ephemeral node. An ephemeral node will become unavailable after the specified duration of "cleanup ephemeral hosts after" from the time of the node's last connection. | + + +### `[MACHINE_GUID]` sections + +This section is about customizing configuration for specific agents. It allows many agents to share the same API key, while providing customizability per remote agent. + +| Setting | Default | Description | +|------------------------------|------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `enabled` | `no` | Whether this MACHINE_GUID enabled or disabled. | +| `type` | `machine` | This section defines the configuration for a specific agent. | +| [`allow from`](#allow-from) | `*` | A space-separated list of [Netdata simple patterns](/src/libnetdata/simple_pattern/README.md) matching the IPs of nodes that will stream metrics using this API key. [Read more →](#allow-from) | +| `retention` | `3600` | The default amount of child metrics history to retain when using the `ram` db. | +| [`db`](#default-memory-mode) | `dbengine` | The [database](/src/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `ram`, or `none`. [Read more →](#default-memory-mode) | +| `health enabled` | `auto` | Whether alerts and notifications should be enabled for nodes using this `API_KEY`. `auto` enables alerts when the child is connected. `yes` enables alerts always, and `no` disables alerts. | +| `postpone alerts on connect` | `1m` | Postpone alerts and notifications for a period of time after the child connects. | +| `health log retention` | `5d` | History of health log events (in seconds) kept in the database. | +| `proxy enabled` | | Route metrics through a proxy. | +| `proxy destination` | | Space-separated list of `IP:PORT` for proxies. | +| `proxy api key` | | The `API_KEY` of the proxy. | +| `send charts matching` | `*` | See [`send charts matching`](#send-charts-matching). | +| `enable compression` | `yes` | Enable/disable stream compression. | +| `enable replication` | `yes` | Enable/disable replication. | +| `replication period` | `1d` | Limits the maximum window that will be replicated from each child. | +| `replication step` | `10m` | The duration we want to replicate per each replication step. | +| `is ephemeral node` | `no` | Indicate whether this child is an ephemeral node. An ephemeral node will become unavailable after the specified duration of "cleanup ephemeral hosts after" from the time of the node's last connection. | #### `destination` @@ -131,7 +161,7 @@ To allow all IPs starting with `10.*`, except `10.1.2.3`: > If you set specific IP addresses here, and also use the `allow connections` setting in the `[web]` section of > `netdata.conf`, be sure to add the IP address there so that it can access the API port. -#### `default memory mode` +#### `db` The [database](/src/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `ram`, , or `none`. @@ -142,19 +172,15 @@ Valid settings are `dbengine`, `ram`, , or `none`. streaming configurations that use ephemeral nodes. - `none`: No database. -When using `default memory mode = dbengine`, the parent node creates a separate instance of the TSDB to store metrics -from child nodes. The [size of _each_ instance is configurable](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md) with the `page -cache size` and `dbengine multihost disk space` settings in the `[global]` section in `netdata.conf`. - ### `netdata.conf` -| Setting | Default | Description | -|--------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `[global]` section | | | -| `memory mode` | `dbengine` | Determines the [database type](/src/database/README.md) to be used on that node. Other options settings include `none`, and `ram`. `none` disables the database at this host. This also disables alerts and notifications, as those can't run without a database. | -| `[web]` section | | | -| `mode` | `static-threaded` | Determines the [web server](/src/web/server/README.md) type. The other option is `none`, which disables the dashboard, API, and registry. | -| `accept a streaming request every seconds` | `0` | Set a limit on how often a parent node accepts streaming requests from child nodes. `0` equals no limit. If this is set, you may see `... too busy to accept new streaming request. Will be allowed in X secs` in Netdata's `error.log`. | +| Setting | Default | Description | +|------------------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `[db]` section | | | +| `mode` | `dbengine` | Determines the [database type](/src/database/README.md) to be used on that node. Other options settings include `none`, and `ram`. `none` disables the database at this host. This also disables alerts and notifications, as those can't run without a database. | +| `[web]` section | | | +| `mode` | `static-threaded` | Determines the [web server](/src/web/server/README.md) type. The other option is `none`, which disables the dashboard, API, and registry. | +| `accept a streaming request every` | `off` | Set a limit on how often a parent node accepts streaming requests from child nodes. `0` equals no limit. If this is set, you may see `... too busy to accept new streaming request. Will be allowed in X secs` in Netdata's `error.log`. | ### Basic use cases @@ -178,13 +204,13 @@ default `dbengine` as specified by the `API_KEY`, and alerts are disabled. ```conf [API_KEY] enabled = yes - default memory mode = dbengine - health enabled by default = auto + db = dbengine + health enabled = auto allow from = * [MACHINE_GUID] enabled = yes - memory mode = ram + db = ram health enabled = no ``` @@ -413,7 +439,7 @@ In the following example, the proxy receives metrics from a child node using the [66666666-7777-8888-9999-000000000000] enabled = yes - default memory mode = dbengine + db = dbengine ``` ### Ephemeral nodes @@ -429,7 +455,7 @@ On the parent, set the following in `stream.conf`: enabled = yes # one hour of data for each of the child nodes - default history = 3600 + history = 1h # do not save child metrics on disk default memory = ram @@ -455,9 +481,9 @@ On the child nodes, set the following in `stream.conf`: In addition, edit `netdata.conf` on each child node to disable the database and alerts. ```bash -[global] +[db] # disable the local database - memory mode = none + db = none [health] # disable health checks @@ -476,11 +502,11 @@ Replication is enabled by default in Netdata, but you can customize the replicat # Enable replication for all hosts using this api key. Default: yes. enable replication = yes - # How many seconds of data to replicate from each child at a time. Default: a day (86400 seconds). - seconds to replicate = 86400 + # How many seconds of data to replicate from each child at a time. Default: a day. + replication period = 1d - # The duration we want to replicate per each replication step. Default: 600 seconds (10 minutes). - seconds per replication step = 600 + # The duration we want to replicate per each replication step. Default: 10 minutes. + replication step = 10m ``` You can monitor the replication process in two ways: @@ -597,9 +623,9 @@ ERROR : STREAM_SENDER[CHILD HOSTNAME] : STREAM child HOSTNAME [send to PARENT HO ### Stream charts wrong Chart data needs to be consistent between child and parent nodes. If there are differences between chart data on -a parent and a child, such as gaps in metrics collection, it most often means your child's `memory mode` +a parent and a child, such as gaps in metrics collection, it most often means your child's `[db].db` setting does not match the parent's. To learn more about the different ways Netdata can store metrics, and thus keep chart -data consistent, read our [memory mode documentation](/src/database/README.md). +data consistent, read our [db documentation](/src/database/README.md). ### Forbidding access diff --git a/src/streaming/receiver.c b/src/streaming/receiver.c index 6a7a37d9feb0b9..6073dabefb43a6 100644 --- a/src/streaming/receiver.c +++ b/src/streaming/receiver.c @@ -6,19 +6,6 @@ extern struct config stream_config; void receiver_state_free(struct receiver_state *rpt) { - - freez(rpt->key); - freez(rpt->hostname); - freez(rpt->registry_hostname); - freez(rpt->machine_guid); - freez(rpt->os); - freez(rpt->timezone); - freez(rpt->abbrev_timezone); - freez(rpt->client_ip); - freez(rpt->client_port); - freez(rpt->program_name); - freez(rpt->program_version); - netdata_ssl_close(&rpt->ssl); if(rpt->fd != -1) { @@ -29,10 +16,21 @@ void receiver_state_free(struct receiver_state *rpt) { rrdpush_decompressor_destroy(&rpt->decompressor); if(rpt->system_info) - rrdhost_system_info_free(rpt->system_info); + rrdhost_system_info_free(rpt->system_info); __atomic_sub_fetch(&netdata_buffers_statistics.rrdhost_receivers, sizeof(*rpt), __ATOMIC_RELAXED); + freez(rpt->key); + freez(rpt->hostname); + freez(rpt->registry_hostname); + freez(rpt->machine_guid); + freez(rpt->os); + freez(rpt->timezone); + freez(rpt->abbrev_timezone); + freez(rpt->client_ip); + freez(rpt->client_port); + freez(rpt->program_name); + freez(rpt->program_version); freez(rpt); } @@ -433,7 +431,7 @@ static bool rrdhost_set_receiver(RRDHOST *host, struct receiver_state *rpt) { } } - host->health_log.health_log_history = rpt->config.alarms_history; + host->health_log.health_log_retention_s = rpt->config.alarms_history; // this is a test // if(rpt->hops <= host->sender->hops) @@ -589,7 +587,7 @@ static void rrdpush_receive(struct receiver_state *rpt) rpt->config.health_enabled = health_plugin_enabled(); rpt->config.alarms_delay = 60; - rpt->config.alarms_history = HEALTH_LOG_DEFAULT_HISTORY; + rpt->config.alarms_history = HEALTH_LOG_RETENTION_DEFAULT; rpt->config.rrdpush_enabled = (int)default_rrdpush_enabled; rpt->config.rrdpush_destination = default_rrdpush_destination; @@ -600,15 +598,15 @@ static void rrdpush_receive(struct receiver_state *rpt) rpt->config.rrdpush_seconds_to_replicate = default_rrdpush_seconds_to_replicate; rpt->config.rrdpush_replication_step = default_rrdpush_replication_step; - rpt->config.update_every = (int)appconfig_get_number(&stream_config, rpt->machine_guid, "update every", rpt->config.update_every); + rpt->config.update_every = (int)appconfig_get_duration_seconds(&stream_config, rpt->machine_guid, "update every", rpt->config.update_every); if(rpt->config.update_every < 0) rpt->config.update_every = 1; - rpt->config.history = (int)appconfig_get_number(&stream_config, rpt->key, "default history", rpt->config.history); - rpt->config.history = (int)appconfig_get_number(&stream_config, rpt->machine_guid, "history", rpt->config.history); + rpt->config.history = (int)appconfig_get_number(&stream_config, rpt->key, "retention", rpt->config.history); + rpt->config.history = (int)appconfig_get_number(&stream_config, rpt->machine_guid, "retention", rpt->config.history); if(rpt->config.history < 5) rpt->config.history = 5; - rpt->config.mode = rrd_memory_mode_id(appconfig_get(&stream_config, rpt->key, "default memory mode", rrd_memory_mode_name(rpt->config.mode))); - rpt->config.mode = rrd_memory_mode_id(appconfig_get(&stream_config, rpt->machine_guid, "memory mode", rrd_memory_mode_name(rpt->config.mode))); + rpt->config.mode = rrd_memory_mode_id(appconfig_get(&stream_config, rpt->key, "db", rrd_memory_mode_name(rpt->config.mode))); + rpt->config.mode = rrd_memory_mode_id(appconfig_get(&stream_config, rpt->machine_guid, "db", rrd_memory_mode_name(rpt->config.mode))); if (unlikely(rpt->config.mode == RRD_MEMORY_MODE_DBENGINE && !dbengine_enabled)) { netdata_log_error("STREAM '%s' [receive from %s:%s]: " @@ -623,32 +621,32 @@ static void rrdpush_receive(struct receiver_state *rpt) rpt->config.health_enabled = appconfig_get_boolean_ondemand(&stream_config, rpt->key, "health enabled by default", rpt->config.health_enabled); rpt->config.health_enabled = appconfig_get_boolean_ondemand(&stream_config, rpt->machine_guid, "health enabled", rpt->config.health_enabled); - rpt->config.alarms_delay = appconfig_get_number(&stream_config, rpt->key, "default postpone alarms on connect seconds", rpt->config.alarms_delay); - rpt->config.alarms_delay = appconfig_get_number(&stream_config, rpt->machine_guid, "postpone alarms on connect seconds", rpt->config.alarms_delay); + rpt->config.alarms_delay = appconfig_get_duration_seconds(&stream_config, rpt->key, "postpone alerts on connect", rpt->config.alarms_delay); + rpt->config.alarms_delay = appconfig_get_duration_seconds(&stream_config, rpt->machine_guid, "postpone alerts on connect", rpt->config.alarms_delay); - rpt->config.alarms_history = appconfig_get_number(&stream_config, rpt->key, "default health log history", rpt->config.alarms_history); - rpt->config.alarms_history = appconfig_get_number(&stream_config, rpt->machine_guid, "health log history", rpt->config.alarms_history); + rpt->config.alarms_history = appconfig_get_duration_seconds(&stream_config, rpt->key, "health log retention", rpt->config.alarms_history); + rpt->config.alarms_history = appconfig_get_duration_seconds(&stream_config, rpt->machine_guid, "health log retention", rpt->config.alarms_history); - rpt->config.rrdpush_enabled = appconfig_get_boolean(&stream_config, rpt->key, "default proxy enabled", rpt->config.rrdpush_enabled); + rpt->config.rrdpush_enabled = appconfig_get_boolean(&stream_config, rpt->key, "proxy enabled", rpt->config.rrdpush_enabled); rpt->config.rrdpush_enabled = appconfig_get_boolean(&stream_config, rpt->machine_guid, "proxy enabled", rpt->config.rrdpush_enabled); - rpt->config.rrdpush_destination = appconfig_get(&stream_config, rpt->key, "default proxy destination", rpt->config.rrdpush_destination); + rpt->config.rrdpush_destination = appconfig_get(&stream_config, rpt->key, "proxy destination", rpt->config.rrdpush_destination); rpt->config.rrdpush_destination = appconfig_get(&stream_config, rpt->machine_guid, "proxy destination", rpt->config.rrdpush_destination); - rpt->config.rrdpush_api_key = appconfig_get(&stream_config, rpt->key, "default proxy api key", rpt->config.rrdpush_api_key); + rpt->config.rrdpush_api_key = appconfig_get(&stream_config, rpt->key, "proxy api key", rpt->config.rrdpush_api_key); rpt->config.rrdpush_api_key = appconfig_get(&stream_config, rpt->machine_guid, "proxy api key", rpt->config.rrdpush_api_key); - rpt->config.rrdpush_send_charts_matching = appconfig_get(&stream_config, rpt->key, "default proxy send charts matching", rpt->config.rrdpush_send_charts_matching); + rpt->config.rrdpush_send_charts_matching = appconfig_get(&stream_config, rpt->key, "proxy send charts matching", rpt->config.rrdpush_send_charts_matching); rpt->config.rrdpush_send_charts_matching = appconfig_get(&stream_config, rpt->machine_guid, "proxy send charts matching", rpt->config.rrdpush_send_charts_matching); rpt->config.rrdpush_enable_replication = appconfig_get_boolean(&stream_config, rpt->key, "enable replication", rpt->config.rrdpush_enable_replication); rpt->config.rrdpush_enable_replication = appconfig_get_boolean(&stream_config, rpt->machine_guid, "enable replication", rpt->config.rrdpush_enable_replication); - rpt->config.rrdpush_seconds_to_replicate = appconfig_get_number(&stream_config, rpt->key, "seconds to replicate", rpt->config.rrdpush_seconds_to_replicate); - rpt->config.rrdpush_seconds_to_replicate = appconfig_get_number(&stream_config, rpt->machine_guid, "seconds to replicate", rpt->config.rrdpush_seconds_to_replicate); + rpt->config.rrdpush_seconds_to_replicate = appconfig_get_duration_seconds(&stream_config, rpt->key, "replication period", rpt->config.rrdpush_seconds_to_replicate); + rpt->config.rrdpush_seconds_to_replicate = appconfig_get_duration_seconds(&stream_config, rpt->machine_guid, "replication period", rpt->config.rrdpush_seconds_to_replicate); - rpt->config.rrdpush_replication_step = appconfig_get_number(&stream_config, rpt->key, "seconds per replication step", rpt->config.rrdpush_replication_step); - rpt->config.rrdpush_replication_step = appconfig_get_number(&stream_config, rpt->machine_guid, "seconds per replication step", rpt->config.rrdpush_replication_step); + rpt->config.rrdpush_replication_step = appconfig_get_number(&stream_config, rpt->key, "replication step", rpt->config.rrdpush_replication_step); + rpt->config.rrdpush_replication_step = appconfig_get_number(&stream_config, rpt->machine_guid, "replication step", rpt->config.rrdpush_replication_step); rpt->config.rrdpush_compression = default_rrdpush_compression_enabled; rpt->config.rrdpush_compression = appconfig_get_boolean(&stream_config, rpt->key, "enable compression", rpt->config.rrdpush_compression); @@ -659,7 +657,7 @@ static void rrdpush_receive(struct receiver_state *rpt) is_ephemeral = appconfig_get_boolean(&stream_config, rpt->machine_guid, "is ephemeral node", is_ephemeral); if(rpt->config.rrdpush_compression) { - char *order = appconfig_get(&stream_config, rpt->key, "compression algorithms order", RRDPUSH_COMPRESSION_ALGORITHMS_ORDER); + const char *order = appconfig_get(&stream_config, rpt->key, "compression algorithms order", RRDPUSH_COMPRESSION_ALGORITHMS_ORDER); order = appconfig_get(&stream_config, rpt->machine_guid, "compression algorithms order", order); rrdpush_parse_compression_order(rpt, order); } @@ -921,7 +919,7 @@ void *rrdpush_receiver_thread(void *ptr) { worker_unregister(); rrdhost_clear_receiver(rpt); - receiver_state_free(rpt); rrdhost_set_is_parent_label(); + receiver_state_free(rpt); return NULL; } diff --git a/src/streaming/rrdpush.c b/src/streaming/rrdpush.c index 7a5d6f73084aba..8bb5454034880b 100644 --- a/src/streaming/rrdpush.c +++ b/src/streaming/rrdpush.c @@ -25,30 +25,19 @@ * */ -struct config stream_config = { - .first_section = NULL, - .last_section = NULL, - .mutex = NETDATA_MUTEX_INITIALIZER, - .index = { - .avl_tree = { - .root = NULL, - .compar = appconfig_section_compare - }, - .rwlock = AVL_LOCK_INITIALIZER - } -}; +struct config stream_config = APPCONFIG_INITIALIZER; unsigned int default_rrdpush_enabled = 0; unsigned int default_rrdpush_compression_enabled = 1; -char *default_rrdpush_destination = NULL; -char *default_rrdpush_api_key = NULL; -char *default_rrdpush_send_charts_matching = "*"; +const char *default_rrdpush_destination = NULL; +const char *default_rrdpush_api_key = NULL; +const char *default_rrdpush_send_charts_matching = "*"; bool default_rrdpush_enable_replication = true; time_t default_rrdpush_seconds_to_replicate = 86400; time_t default_rrdpush_replication_step = 600; -char *netdata_ssl_ca_path = NULL; -char *netdata_ssl_ca_file = NULL; +const char *netdata_ssl_ca_path = NULL; +const char *netdata_ssl_ca_file = NULL; static void load_stream_conf() { errno_clear(); @@ -61,32 +50,36 @@ static void load_stream_conf() { if(!appconfig_load(&stream_config, filename, 0, NULL)) nd_log_daemon(NDLP_NOTICE, "CONFIG: cannot load stock config '%s'. Running with internal defaults.", filename); } - freez(filename); -} - -bool rrdpush_receiver_needs_dbengine() { - struct section *co; - - for(co = stream_config.first_section; co; co = co->next) { - if(strcmp(co->name, "stream") == 0) - continue; // the first section is not relevant - char *s; - - s = appconfig_get_by_section(co, "enabled", NULL); - if(!s || !appconfig_test_boolean_value(s)) - continue; - - s = appconfig_get_by_section(co, "default memory mode", NULL); - if(s && strcmp(s, "dbengine") == 0) - return true; + freez(filename); - s = appconfig_get_by_section(co, "memory mode", NULL); - if(s && strcmp(s, "dbengine") == 0) - return true; - } + appconfig_move(&stream_config, + CONFIG_SECTION_STREAM, "timeout seconds", + CONFIG_SECTION_STREAM, "timeout"); + + appconfig_move(&stream_config, + CONFIG_SECTION_STREAM, "reconnect delay seconds", + CONFIG_SECTION_STREAM, "reconnect delay"); + + appconfig_move_everywhere(&stream_config, "default memory mode", "db"); + appconfig_move_everywhere(&stream_config, "memory mode", "db"); + appconfig_move_everywhere(&stream_config, "db mode", "db"); + appconfig_move_everywhere(&stream_config, "default history", "retention"); + appconfig_move_everywhere(&stream_config, "history", "retention"); + appconfig_move_everywhere(&stream_config, "default proxy enabled", "proxy enabled"); + appconfig_move_everywhere(&stream_config, "default proxy destination", "proxy destination"); + appconfig_move_everywhere(&stream_config, "default proxy api key", "proxy api key"); + appconfig_move_everywhere(&stream_config, "default proxy send charts matching", "proxy send charts matching"); + appconfig_move_everywhere(&stream_config, "default health log history", "health log retention"); + appconfig_move_everywhere(&stream_config, "health log history", "health log retention"); + appconfig_move_everywhere(&stream_config, "seconds to replicate", "replication period"); + appconfig_move_everywhere(&stream_config, "seconds per replication step", "replication step"); + appconfig_move_everywhere(&stream_config, "default postpone alarms on connect seconds", "postpone alerts on connect"); + appconfig_move_everywhere(&stream_config, "postpone alarms on connect seconds", "postpone alerts on connect"); +} - return false; +bool rrdpush_receiver_needs_dbengine(void) { + return stream_conf_needs_dbengine(&stream_config); } int rrdpush_init() { @@ -94,19 +87,33 @@ int rrdpush_init() { // load stream.conf load_stream_conf(); - default_rrdpush_enabled = (unsigned int)appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "enabled", default_rrdpush_enabled); - default_rrdpush_destination = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "destination", ""); - default_rrdpush_api_key = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "api key", ""); - default_rrdpush_send_charts_matching = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "send charts matching", default_rrdpush_send_charts_matching); + default_rrdpush_enabled = + (unsigned int)appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "enabled", default_rrdpush_enabled); - default_rrdpush_enable_replication = config_get_boolean(CONFIG_SECTION_DB, "enable replication", default_rrdpush_enable_replication); - default_rrdpush_seconds_to_replicate = config_get_number(CONFIG_SECTION_DB, "seconds to replicate", default_rrdpush_seconds_to_replicate); - default_rrdpush_replication_step = config_get_number(CONFIG_SECTION_DB, "seconds per replication step", default_rrdpush_replication_step); + default_rrdpush_destination = + appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "destination", ""); - rrdhost_free_orphan_time_s = config_get_number(CONFIG_SECTION_DB, "cleanup orphan hosts after secs", rrdhost_free_orphan_time_s); + default_rrdpush_api_key = + appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "api key", ""); - default_rrdpush_compression_enabled = (unsigned int)appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, - "enable compression", default_rrdpush_compression_enabled); + default_rrdpush_send_charts_matching = + appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "send charts matching", default_rrdpush_send_charts_matching); + + default_rrdpush_enable_replication = + config_get_boolean(CONFIG_SECTION_DB, "enable replication", default_rrdpush_enable_replication); + + default_rrdpush_seconds_to_replicate = + config_get_duration_seconds(CONFIG_SECTION_DB, "replication period", default_rrdpush_seconds_to_replicate); + + default_rrdpush_replication_step = + config_get_duration_seconds(CONFIG_SECTION_DB, "replication step", default_rrdpush_replication_step); + + rrdhost_free_orphan_time_s = + config_get_duration_seconds(CONFIG_SECTION_DB, "cleanup orphan hosts after", rrdhost_free_orphan_time_s); + + default_rrdpush_compression_enabled = + (unsigned int)appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, + "enable compression", default_rrdpush_compression_enabled); rrdpush_compression_levels[COMPRESSION_ALGORITHM_BROTLI] = (int)appconfig_get_number( &stream_config, CONFIG_SECTION_STREAM, "brotli compression level", @@ -201,22 +208,7 @@ static inline bool should_send_chart_matching(RRDSET *st, RRDSET_FLAGS flags) { } int configured_as_parent() { - struct section *section = NULL; - int is_parent = 0; - - appconfig_wrlock(&stream_config); - for (section = stream_config.first_section; section; section = section->next) { - nd_uuid_t uuid; - - if (uuid_parse(section->name, uuid) != -1 && - appconfig_get_boolean_by_section(section, "enabled", 0)) { - is_parent = 1; - break; - } - } - appconfig_unlock(&stream_config); - - return is_parent; + return stream_conf_has_uuid_section(&stream_config); } // chart labels diff --git a/src/streaming/rrdpush.h b/src/streaming/rrdpush.h index 17d3aadd9efb2c..f8e1076410d8fe 100644 --- a/src/streaming/rrdpush.h +++ b/src/streaming/rrdpush.h @@ -297,12 +297,12 @@ struct receiver_state { time_t alarms_delay; uint32_t alarms_history; int rrdpush_enabled; - char *rrdpush_api_key; // DONT FREE - it is allocated in appconfig - char *rrdpush_send_charts_matching; // DONT FREE - it is allocated in appconfig + const char *rrdpush_api_key; // DONT FREE - it is allocated in appconfig + const char *rrdpush_send_charts_matching; // DONT FREE - it is allocated in appconfig bool rrdpush_enable_replication; time_t rrdpush_seconds_to_replicate; time_t rrdpush_replication_step; - char *rrdpush_destination; // DONT FREE - it is allocated in appconfig + const char *rrdpush_destination; // DONT FREE - it is allocated in appconfig unsigned int rrdpush_compression; STREAM_CAPABILITIES compression_priorities[COMPRESSION_ALGORITHM_MAX]; } config; @@ -349,9 +349,9 @@ struct rrdpush_destinations { extern unsigned int default_rrdpush_enabled; extern unsigned int default_rrdpush_compression_enabled; -extern char *default_rrdpush_destination; -extern char *default_rrdpush_api_key; -extern char *default_rrdpush_send_charts_matching; +extern const char *default_rrdpush_destination; +extern const char *default_rrdpush_api_key; +extern const char *default_rrdpush_send_charts_matching; extern bool default_rrdpush_enable_replication; extern time_t default_rrdpush_seconds_to_replicate; extern time_t default_rrdpush_replication_step; diff --git a/src/streaming/sender.c b/src/streaming/sender.c index 31b2e8b863913a..6b33ccd8788410 100644 --- a/src/streaming/sender.c +++ b/src/streaming/sender.c @@ -465,8 +465,8 @@ void *rrdpush_sender_thread(void *ptr) { netdata_log_info("STREAM %s [send]: thread created (task id %d)", rrdhost_hostname(s->host), gettid_cached()); - s->timeout = (int)appconfig_get_number( - &stream_config, CONFIG_SECTION_STREAM, "timeout seconds", 600); + s->timeout = (int)appconfig_get_duration_seconds( + &stream_config, CONFIG_SECTION_STREAM, "timeout", 600); s->default_port = (int)appconfig_get_number( &stream_config, CONFIG_SECTION_STREAM, "default port", 19999); @@ -474,8 +474,8 @@ void *rrdpush_sender_thread(void *ptr) { s->buffer->max_size = (size_t)appconfig_get_number( &stream_config, CONFIG_SECTION_STREAM, "buffer size bytes", 1024 * 1024 * 10); - s->reconnect_delay = (unsigned int)appconfig_get_number( - &stream_config, CONFIG_SECTION_STREAM, "reconnect delay seconds", 5); + s->reconnect_delay = (unsigned int)appconfig_get_duration_seconds( + &stream_config, CONFIG_SECTION_STREAM, "reconnect delay", 5); remote_clock_resync_iterations = (unsigned int)appconfig_get_number( &stream_config, CONFIG_SECTION_STREAM, diff --git a/src/streaming/sender_internals.h b/src/streaming/sender_internals.h index 237113a8c10b0a..0db4ac6985b2ae 100644 --- a/src/streaming/sender_internals.h +++ b/src/streaming/sender_internals.h @@ -38,8 +38,8 @@ #endif extern struct config stream_config; -extern char *netdata_ssl_ca_path; -extern char *netdata_ssl_ca_file; +extern const char *netdata_ssl_ca_path; +extern const char *netdata_ssl_ca_file; bool attempt_to_connect(struct sender_state *state); void rrdpush_sender_on_connect(RRDHOST *host); diff --git a/src/streaming/stream.conf b/src/streaming/stream.conf index 0b9be526e8bc7e..659bd830df56ff 100644 --- a/src/streaming/stream.conf +++ b/src/streaming/stream.conf @@ -62,7 +62,7 @@ #enable compression = yes # The timeout to connect and send metrics - #timeout seconds = 60 + #timeout = 1m # If the destination line above does not specify a port, use this #default port = 19999 @@ -84,7 +84,7 @@ # If the connection fails, or it disconnects, # retry after that many seconds. - #reconnect delay seconds = 5 + #reconnect delay = 5s # Sync the clock of the charts for that many iterations, when starting. # It is ignored when replication is enabled @@ -127,19 +127,19 @@ # should also be matched at netdata.conf [web].allow connections from #allow from = * - # The default history in entries, for all hosts using this API key. + # The history in entries (for db alloc or ram), for all hosts using this API key. # You can also set it per host below. - # For the default db mode (dbengine), this is ignored. - #default history = 3600 + # For the default db (dbengine), this is ignored. + #retention = 3600 - # The default memory mode to be used for all hosts using this API key. + # The database to be used for all hosts using this API key. # You can also set it per host below. # If you don't set it here, the memory mode of netdata.conf will be used. # Valid modes: # ram keep it in RAM, don't touch the disk # none no database at all (use this on headless proxies) - # dbengine like a traditional database - #default memory mode = dbengine + # dbengine Netdata's high performance database + #db = dbengine # Shall we enable health monitoring for the hosts using this API key? # 3 possible values: @@ -151,18 +151,18 @@ # The default is taken from [health].enabled of netdata.conf #health enabled by default = auto - # postpone alarms for a short period after the sender is connected - #default postpone alarms on connect seconds = 60 + # postpone alerts for a short period after the sender is connected + #postpone alerts on connect = 1m - # seconds of health log events to keep - #default health log history = 432000 + # the duration to maintain health log events + #health log retention = 5d # need to route metrics differently? set these. # the defaults are the ones at the [stream] section (above) - #default proxy enabled = yes | no - #default proxy destination = IP:PORT IP:PORT ... - #default proxy api key = API_KEY - #default proxy send charts matching = * + #proxy enabled = yes | no + #proxy destination = IP:PORT IP:PORT ... + #proxy api key = API_KEY + #proxy send charts matching = * # Stream Compression # By default it is enabled. @@ -177,13 +177,13 @@ #enable replication = yes # How many seconds to replicate from each child. Default: a day - #seconds to replicate = 86400 + #replication period = 1d # The duration we want to replicate per each step. - #seconds per replication step = 600 + #replication step = 10m # Indicate whether this child is an ephemeral node. An ephemeral node will become unavailable - # after the specified duration of "cleanup ephemeral hosts after secs" (as defined in the db section of netdata.conf) + # after the specified duration of "cleanup ephemeral hosts after" (as defined in the db section of netdata.conf) # from the time of the node's last connection. #is ephemeral node = no @@ -221,20 +221,20 @@ #allow from = * # The number of entries in the database. - # This is ignored for db mode dbengine. - #history = 3600 + # This is ignored for db dbengine. + #retention = 3600 # The memory mode of the database: ram | none | dbengine - #memory mode = dbengine + #db = dbengine # Health / alarms control: yes | no | auto #health enabled = auto - # postpone alarms when the sender connects - #postpone alarms on connect seconds = 60 + # postpone alerts when the sender connects + #postpone alerts on connect = 1m - # seconds of health log events to keep - #health log history = 432000 + # the duration to maintain health log events + #health log retention = 5d # need to route metrics differently? # the defaults are the ones at the [API KEY] section @@ -253,12 +253,12 @@ #enable replication = yes # How many seconds to replicate from each child. - #seconds to replicate = 86400 + #replication period = 1d # The duration we want to replicate per each step. - #seconds per replication step = 600 + #replication step = 10m # Indicate whether this child is an ephemeral node. An ephemeral node will become unavailable - # after the specified duration of "cleanup ephemeral hosts after secs" (as defined in the db section of netdata.conf) + # after the specified duration of "cleanup ephemeral hosts after" (as defined in the db section of netdata.conf) # from the time of the node's last connection. #is ephemeral node = no diff --git a/src/web/api/formatters/charts2json.c b/src/web/api/formatters/charts2json.c index 0b45d77c49951a..9407f224b61f11 100644 --- a/src/web/api/formatters/charts2json.c +++ b/src/web/api/formatters/charts2json.c @@ -37,7 +37,7 @@ const char* get_release_channel() { } void charts2json(RRDHOST *host, BUFFER *wb) { - static char *custom_dashboard_info_js_filename = NULL; + static const char *custom_dashboard_info_js_filename = NULL; size_t c = 0, dimensions = 0, memory = 0, alarms = 0; RRDSET *st; diff --git a/src/web/api/health/README.md b/src/web/api/health/README.md index 725b4a36f76596..e59465dccb21fa 100644 --- a/src/web/api/health/README.md +++ b/src/web/api/health/README.md @@ -33,7 +33,7 @@ The size of the alert log is configured in `netdata.conf`. There are 2 settings: ``` [health] in memory max health log entries = 1000 - health log history = 432000 + health log retention = 5d ``` The API call retrieves all entries of the alert log: diff --git a/src/web/api/queries/weights.c b/src/web/api/queries/weights.c index 44928fea8a3003..0442f5916b751b 100644 --- a/src/web/api/queries/weights.c +++ b/src/web/api/queries/weights.c @@ -4,9 +4,7 @@ #include "database/KolmogorovSmirnovDist.h" #define MAX_POINTS 10000 -int enable_metric_correlations = CONFIG_BOOLEAN_YES; int metric_correlations_version = 1; -WEIGHTS_METHOD default_metric_correlations_method = WEIGHTS_METHOD_MC_KS2; typedef struct weights_stats { NETDATA_DOUBLE max_base_high_ratio; @@ -36,7 +34,7 @@ WEIGHTS_METHOD weights_string_to_method(const char *method) { if(strcmp(method, weights_methods[i].name) == 0) return weights_methods[i].value; - return default_metric_correlations_method; + return WEIGHTS_METHOD_MC_KS2; } const char *weights_method_to_string(WEIGHTS_METHOD method) { @@ -44,7 +42,7 @@ const char *weights_method_to_string(WEIGHTS_METHOD method) { if(weights_methods[i].value == method) return weights_methods[i].name; - return "unknown"; + return "ks2"; } // ---------------------------------------------------------------------------- diff --git a/src/web/api/queries/weights.h b/src/web/api/queries/weights.h index be7e5a8b3f6f3a..6d2bf8e090f4ad 100644 --- a/src/web/api/queries/weights.h +++ b/src/web/api/queries/weights.h @@ -18,9 +18,7 @@ typedef enum { WEIGHTS_FORMAT_MULTINODE = 3, } WEIGHTS_FORMAT; -extern int enable_metric_correlations; extern int metric_correlations_version; -extern WEIGHTS_METHOD default_metric_correlations_method; typedef bool (*weights_interrupt_callback_t)(void *data); diff --git a/src/web/api/v1/api_v1_manage.c b/src/web/api/v1/api_v1_manage.c index 46a12d8bdaf863..46611fbf50a88c 100644 --- a/src/web/api/v1/api_v1_manage.c +++ b/src/web/api/v1/api_v1_manage.c @@ -7,7 +7,7 @@ char *api_secret; static char *get_mgmt_api_key(void) { char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s/netdata.api.key", netdata_configured_varlib_dir); - char *api_key_filename=config_get(CONFIG_SECTION_REGISTRY, "netdata management api key file", filename); + const char *api_key_filename = config_get(CONFIG_SECTION_REGISTRY, "netdata management api key file", filename); static char guid[GUID_LEN + 1] = ""; if(likely(guid[0])) diff --git a/src/web/api/v1/api_v1_weights.c b/src/web/api/v1/api_v1_weights.c index 0d8e5f37157237..e39fceae12d7db 100644 --- a/src/web/api/v1/api_v1_weights.c +++ b/src/web/api/v1/api_v1_weights.c @@ -3,7 +3,7 @@ #include "api_v1_calls.h" int api_v1_metric_correlations(RRDHOST *host, struct web_client *w, char *url) { - return web_client_api_request_weights(host, w, url, default_metric_correlations_method, WEIGHTS_FORMAT_CHARTS, 1); + return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_MC_KS2, WEIGHTS_FORMAT_CHARTS, 1); } int api_v1_weights(RRDHOST *host, struct web_client *w, char *url) { diff --git a/src/web/rtc/webrtc.c b/src/web/rtc/webrtc.c index eb73f0b5e45f5d..07cbfc5e66ffd6 100644 --- a/src/web/rtc/webrtc.c +++ b/src/web/rtc/webrtc.c @@ -84,8 +84,8 @@ static struct { bool enabled; char *iceServers[WEBRTC_MAX_ICE_SERVERS]; int iceServersCount; - char *proxyServer; - char *bindAddress; + const char *proxyServer; + const char *bindAddress; struct { SPINLOCK spinlock; @@ -142,10 +142,12 @@ static void webrtc_config_ice_servers(void) { webrtc_base.iceServersCount = i; internal_error(true, "WEBRTC: there are %d default ice servers: '%s'", webrtc_base.iceServersCount, buffer_tostring(wb)); - char *servers = config_get(CONFIG_SECTION_WEBRTC, "ice servers", buffer_tostring(wb)); + const char *servers = config_get(CONFIG_SECTION_WEBRTC, "ice servers", buffer_tostring(wb)); webrtc_base.iceServersCount = 0; - char *s = servers, *e; + char tmp[strlen(servers) + 1]; + strcpy(tmp, servers); + char *s = tmp, *e; while(*s) { if(isspace(*s)) s++; diff --git a/src/web/server/README.md b/src/web/server/README.md index e406408756e392..e155be6214702b 100644 --- a/src/web/server/README.md +++ b/src/web/server/README.md @@ -20,42 +20,42 @@ Scroll down to the `[web]` section to find the following settings. ## Settings -| Setting | Default | Description | -|:-------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `ssl key` | `/etc/netdata/ssl/key.pem` | Declare the location of an SSL key to [enable HTTPS](#enable-httpstls-support). | -| `ssl certificate` | `/etc/netdata/ssl/cert.pem` | Declare the location of an SSL certificate to [enable HTTPS](#enable-httpstls-support). | -| `tls version` | `1.3` | Choose which TLS version to use. While all versions are allowed (`1` or `1.0`, `1.1`, `1.2` and `1.3`), we recommend `1.3` for the most secure encryption. If left blank, Netdata uses the highest available protocol version on your system. | -| `tls ciphers` | `none` | Choose which TLS cipher to use. Options include `TLS_AES_256_GCM_SHA384`, `TLS_CHACHA20_POLY1305_SHA256`, and `TLS_AES_128_GCM_SHA256`. If left blank, Netdata uses the default cipher list for that protocol provided by your TLS implementation. | -| `ses max window` | `15` | See [single exponential smoothing](/src/web/api/queries/ses/README.md). | -| `des max window` | `15` | See [double exponential smoothing](/src/web/api/queries/des/README.md). | -| `mode` | `static-threaded` | Turns on (`static-threaded` or off (`none`) the static-threaded web server. See the [example](#disable-the-web-server) to turn off the web server and disable the dashboard. | -| `listen backlog` | `4096` | The port backlog. Check `man 2 listen`. | -| `default port` | `19999` | The listen port for the static web server. | -| `web files owner` | `netdata` | The user that owns the web static files. Netdata will refuse to serve a file that is not owned by this user, even if it has read access to that file. If the user given is not found, Netdata will only serve files owned by user given in `run as user`. | -| `web files group` | `netdata` | If this is set, Netdata will check if the file is owned by this group and refuse to serve the file if it's not. | -| `disconnect idle clients after seconds` | `60` | The time in seconds to disconnect web clients after being totally idle. | -| `timeout for first request` | `60` | How long to wait for a client to send a request before closing the socket. Prevents slow request attacks. | -| `accept a streaming request every seconds` | `0` | Can be used to set a limit on how often a parent node will accept streaming requests from child nodes in a [streaming and replication setup](/src/streaming/README.md). | -| `respect do not track policy` | `no` | If set to `yes`, Netdata will respect the user's browser preferences for [Do Not Track](https://www.eff.org/issues/do-not-track) (DNT) and storing cookies. If DNT is _enabled_ in the browser, and this option is set to `yes`, nodes will not connect to any [registry](/src/registry/README.md). For certain browsers, users must disable DNT and change this option to `yes` for full functionality. | -| `x-frame-options response header` | ` ` | Avoid [clickjacking attacks](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options), by ensuring that the content is not embedded into other sites. | -| `allow connections from` | `localhost *` | Declare which IP addresses or full-qualified domain names (FQDNs) are allowed to connect to the web server, including the [dashboard](/docs/dashboards-and-charts/README.md) or [HTTP API](/src/web/api/README.md). This is a global setting with higher priority to any of the ones below. | -| `allow connections by dns` | `heuristic` | See the [access list examples](#access-lists) for details on using `allow` settings. | -| `allow dashboard from` | `localhost *` | | -| `allow dashboard by dns` | `heuristic` | | -| `allow badges from` | `*` | | -| `allow badges by dns` | `heuristic` | | -| `allow streaming from` | `*` | | -| `allow streaming by dns` | `heuristic` | | -| `allow netdata.conf` | `localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.* UNKNOWN` | | -| `allow netdata.conf by dns` | `no` | | -| `allow management from` | `localhost` | | -| `allow management by dns` | `heuristic` | | -| `enable gzip compression` | `yes` | When set to `yes`, Netdata web responses will be GZIP compressed, if the web client accepts such responses. | -| `gzip compression strategy` | `default` | Valid settings are `default`, `filtered`, `huffman only`, `rle` and `fixed`. | -| `gzip compression level` | `3` | Valid settings are 1 (fastest) to 9 (best ratio). | -| `web server threads` | ` ` | How many processor threads the web server is allowed. The default is system-specific, the minimum of `6` or the number of CPU cores. | -| `web server max sockets` | ` ` | Available sockets. The default is system-specific, automatically adjusted to 50% of the max number of open files Netdata is allowed to use (via `/etc/security/limits.conf` or systemd), to allow enough file descriptors to be available for data collection. | -| `custom dashboard_info.js` | ` ` | Specifies the location of a custom `dashboard.js` file. See [customizing the standard dashboard](/docs/developer-and-contributor-corner/customize.md#customize-the-standard-dashboard) for details. | +| Setting | Default | Description | +|:-----------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `ssl key` | `/etc/netdata/ssl/key.pem` | Declare the location of an SSL key to [enable HTTPS](#enable-httpstls-support). | +| `ssl certificate` | `/etc/netdata/ssl/cert.pem` | Declare the location of an SSL certificate to [enable HTTPS](#enable-httpstls-support). | +| `tls version` | `1.3` | Choose which TLS version to use. While all versions are allowed (`1` or `1.0`, `1.1`, `1.2` and `1.3`), we recommend `1.3` for the most secure encryption. If left blank, Netdata uses the highest available protocol version on your system. | +| `tls ciphers` | `none` | Choose which TLS cipher to use. Options include `TLS_AES_256_GCM_SHA384`, `TLS_CHACHA20_POLY1305_SHA256`, and `TLS_AES_128_GCM_SHA256`. If left blank, Netdata uses the default cipher list for that protocol provided by your TLS implementation. | +| `ses max window` | `15` | See [single exponential smoothing](/src/web/api/queries/ses/README.md). | +| `des max window` | `15` | See [double exponential smoothing](/src/web/api/queries/des/README.md). | +| `mode` | `static-threaded` | Turns on (`static-threaded` or off (`none`) the static-threaded web server. See the [example](#disable-the-web-server) to turn off the web server and disable the dashboard. | +| `listen backlog` | `4096` | The port backlog. Check `man 2 listen`. | +| `default port` | `19999` | The listen port for the static web server. | +| `web files owner` | `netdata` | The user that owns the web static files. Netdata will refuse to serve a file that is not owned by this user, even if it has read access to that file. If the user given is not found, Netdata will only serve files owned by user given in `run as user`. | +| `web files group` | `netdata` | If this is set, Netdata will check if the file is owned by this group and refuse to serve the file if it's not. | +| `disconnect idle clients after` | `1m` | The time in seconds to disconnect web clients after being totally idle. | +| `timeout for first request` | `1m` | How long to wait for a client to send a request before closing the socket. Prevents slow request attacks. | +| `accept a streaming request every` | `off` | Can be used to set a limit on how often a parent node will accept streaming requests from child nodes in a [streaming and replication setup](/src/streaming/README.md). | +| `respect do not track policy` | `no` | If set to `yes`, Netdata will respect the user's browser preferences for [Do Not Track](https://www.eff.org/issues/do-not-track) (DNT) and storing cookies. If DNT is _enabled_ in the browser, and this option is set to `yes`, nodes will not connect to any [registry](/src/registry/README.md). For certain browsers, users must disable DNT and change this option to `yes` for full functionality. | +| `x-frame-options response header` | ` ` | Avoid [clickjacking attacks](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options), by ensuring that the content is not embedded into other sites. | +| `allow connections from` | `localhost *` | Declare which IP addresses or full-qualified domain names (FQDNs) are allowed to connect to the web server, including the [dashboard](/docs/dashboards-and-charts/README.md) or [HTTP API](/src/web/api/README.md). This is a global setting with higher priority to any of the ones below. | +| `allow connections by dns` | `heuristic` | See the [access list examples](#access-lists) for details on using `allow` settings. | +| `allow dashboard from` | `localhost *` | | +| `allow dashboard by dns` | `heuristic` | | +| `allow badges from` | `*` | | +| `allow badges by dns` | `heuristic` | | +| `allow streaming from` | `*` | | +| `allow streaming by dns` | `heuristic` | | +| `allow netdata.conf` | `localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.* UNKNOWN` | | +| `allow netdata.conf by dns` | `no` | | +| `allow management from` | `localhost` | | +| `allow management by dns` | `heuristic` | | +| `enable gzip compression` | `yes` | When set to `yes`, Netdata web responses will be GZIP compressed, if the web client accepts such responses. | +| `gzip compression strategy` | `default` | Valid settings are `default`, `filtered`, `huffman only`, `rle` and `fixed`. | +| `gzip compression level` | `3` | Valid settings are 1 (fastest) to 9 (best ratio). | +| `web server threads` | ` ` | How many processor threads the web server is allowed. The default is system-specific, the minimum of `6` or the number of CPU cores. | +| `web server max sockets` | ` ` | Available sockets. The default is system-specific, automatically adjusted to 50% of the max number of open files Netdata is allowed to use (via `/etc/security/limits.conf` or systemd), to allow enough file descriptors to be available for data collection. | +| `custom dashboard_info.js` | ` ` | Specifies the location of a custom `dashboard.js` file. See [customizing the standard dashboard](/docs/developer-and-contributor-corner/customize.md#customize-the-standard-dashboard) for details. | ## Examples diff --git a/src/web/server/web_client.c b/src/web/server/web_client.c index 803bfe2d5f11c3..2d807835f6b080 100644 --- a/src/web/server/web_client.c +++ b/src/web/server/web_client.c @@ -6,7 +6,7 @@ // it is used by all netdata web servers int respect_web_browser_do_not_track_policy = 0; -char *web_x_frame_options = NULL; +const char *web_x_frame_options = NULL; int web_enable_gzip = 1, web_gzip_level = 3, web_gzip_strategy = Z_DEFAULT_STRATEGY; @@ -903,8 +903,8 @@ void web_client_build_http_header(struct web_client *w) { if(w->mode == HTTP_REQUEST_MODE_OPTIONS) { buffer_strcat(w->response.header_output, - "Access-Control-Allow-Methods: GET, OPTIONS\r\n" - "Access-Control-Allow-Headers: accept, x-requested-with, origin, content-type, cookie, pragma, cache-control, x-auth-token\r\n" + "Access-Control-Allow-Methods: GET, POST, OPTIONS\r\n" + "Access-Control-Allow-Headers: accept, x-requested-with, origin, content-type, cookie, pragma, cache-control, x-auth-token, x-netdata-auth, x-transaction-id\r\n" "Access-Control-Max-Age: 1209600\r\n" // 86400 * 14 ); } diff --git a/src/web/server/web_client.h b/src/web/server/web_client.h index 45ab888668e34c..cd1e5ccdfbf9c7 100644 --- a/src/web/server/web_client.h +++ b/src/web/server/web_client.h @@ -12,7 +12,7 @@ extern int web_enable_gzip, web_gzip_level, web_gzip_strategy; #define HTTP_REQ_MAX_HEADER_FETCH_TRIES 100 extern int respect_web_browser_do_not_track_policy; -extern char *web_x_frame_options; +extern const char *web_x_frame_options; typedef enum __attribute__((packed)) { HTTP_VALIDATION_OK,